From ea84da913f8cc183362587fde320abfd32c0fc0d Mon Sep 17 00:00:00 2001 From: Apple Date: Wed, 29 Mar 2017 20:13:42 +0000 Subject: [PATCH] libplatform-126.50.8.tar.gz --- include/os/internal/atomic.h | 269 +++++++---------------------------- include/os/lock_private.h | 2 +- src/os/lock.c | 33 ++++- 3 files changed, 83 insertions(+), 221 deletions(-) diff --git a/include/os/internal/atomic.h b/include/os/internal/atomic.h index f2af82b..9b3294d 100644 --- a/include/os/internal/atomic.h +++ b/include/os/internal/atomic.h @@ -46,8 +46,7 @@ #pragma mark - #pragma mark memory_order -typedef enum _os_atomic_memory_order -{ +typedef enum _os_atomic_memory_order { _os_atomic_memory_order_relaxed, _os_atomic_memory_order_consume, _os_atomic_memory_order_acquire, @@ -55,139 +54,82 @@ typedef enum _os_atomic_memory_order _os_atomic_memory_order_acq_rel, _os_atomic_memory_order_seq_cst, _os_atomic_memory_order_ordered, + _os_atomic_memory_order_dependency, } _os_atomic_memory_order; #if !OS_ATOMIC_UP -#define os_atomic_memory_order_relaxed \ - _os_atomic_memory_order_relaxed -#define os_atomic_memory_order_acquire \ - _os_atomic_memory_order_acquire -#define os_atomic_memory_order_release \ - _os_atomic_memory_order_release -#define os_atomic_memory_order_acq_rel \ - _os_atomic_memory_order_acq_rel -#define os_atomic_memory_order_seq_cst \ - _os_atomic_memory_order_seq_cst -#define os_atomic_memory_order_ordered \ - _os_atomic_memory_order_seq_cst +#define os_atomic_memory_order_relaxed _os_atomic_memory_order_relaxed +#define os_atomic_memory_order_acquire _os_atomic_memory_order_acquire +#define os_atomic_memory_order_release _os_atomic_memory_order_release +#define os_atomic_memory_order_acq_rel _os_atomic_memory_order_acq_rel +#define os_atomic_memory_order_seq_cst _os_atomic_memory_order_seq_cst +#define os_atomic_memory_order_ordered _os_atomic_memory_order_seq_cst +#define os_atomic_memory_order_dependency _os_atomic_memory_order_acquire #else // OS_ATOMIC_UP -#define os_atomic_memory_order_relaxed \ - _os_atomic_memory_order_relaxed -#define os_atomic_memory_order_acquire \ - _os_atomic_memory_order_relaxed -#define os_atomic_memory_order_release \ - _os_atomic_memory_order_relaxed -#define os_atomic_memory_order_acq_rel \ - _os_atomic_memory_order_relaxed -#define os_atomic_memory_order_seq_cst \ - _os_atomic_memory_order_relaxed -#define os_atomic_memory_order_ordered \ - _os_atomic_memory_order_relaxed +#define os_atomic_memory_order_relaxed _os_atomic_memory_order_relaxed +#define os_atomic_memory_order_acquire _os_atomic_memory_order_relaxed +#define os_atomic_memory_order_release _os_atomic_memory_order_relaxed +#define os_atomic_memory_order_acq_rel _os_atomic_memory_order_relaxed +#define os_atomic_memory_order_seq_cst _os_atomic_memory_order_relaxed +#define os_atomic_memory_order_ordered _os_atomic_memory_order_relaxed +#define os_atomic_memory_order_dependency _os_atomic_memory_order_relaxed #endif // OS_ATOMIC_UP -#if __has_extension(c_generic_selections) -#define _os_atomic_basetypeof(p) \ - typeof(*_Generic((p), \ - char*: (char*)(p), \ - volatile char*: (char*)(p), \ - signed char*: (signed char*)(p), \ - volatile signed char*: (signed char*)(p), \ - unsigned char*: (unsigned char*)(p), \ - volatile unsigned char*: (unsigned char*)(p), \ - short*: (short*)(p), \ - volatile short*: (short*)(p), \ - unsigned short*: (unsigned short*)(p), \ - volatile unsigned short*: (unsigned short*)(p), \ - int*: (int*)(p), \ - volatile int*: (int*)(p), \ - unsigned int*: (unsigned int*)(p), \ - volatile unsigned int*: (unsigned int*)(p), \ - long*: (long*)(p), \ - volatile long*: (long*)(p), \ - unsigned long*: (unsigned long*)(p), \ - volatile unsigned long*: (unsigned long*)(p), \ - long long*: (long long*)(p), \ - volatile long long*: (long long*)(p), \ - unsigned long long*: (unsigned long long*)(p), \ - volatile unsigned long long*: (unsigned long long*)(p), \ - const void**: (const void**)(p), \ - const void*volatile*: (const void**)(p), \ - default: (void**)(p))) -#endif - -#if __has_extension(c_atomic) && __has_extension(c_generic_selections) #pragma mark - #pragma mark c11 +#if !__has_extension(c_atomic) +#error "Please use a C11 compiler" +#endif + +#define os_atomic(type) type _Atomic + #define _os_atomic_c11_atomic(p) \ - _Generic((p), \ - char*: (_Atomic(char)*)(p), \ - volatile char*: (volatile _Atomic(char)*)(p), \ - signed char*: (_Atomic(signed char)*)(p), \ - volatile signed char*: (volatile _Atomic(signed char)*)(p), \ - unsigned char*: (_Atomic(unsigned char)*)(p), \ - volatile unsigned char*: (volatile _Atomic(unsigned char)*)(p), \ - short*: (_Atomic(short)*)(p), \ - volatile short*: (volatile _Atomic(short)*)(p), \ - unsigned short*: (_Atomic(unsigned short)*)(p), \ - volatile unsigned short*: (volatile _Atomic(unsigned short)*)(p), \ - int*: (_Atomic(int)*)(p), \ - volatile int*: (volatile _Atomic(int)*)(p), \ - unsigned int*: (_Atomic(unsigned int)*)(p), \ - volatile unsigned int*: (volatile _Atomic(unsigned int)*)(p), \ - long*: (_Atomic(long)*)(p), \ - volatile long*: (volatile _Atomic(long)*)(p), \ - unsigned long*: (_Atomic(unsigned long)*)(p), \ - volatile unsigned long*: (volatile _Atomic(unsigned long)*)(p), \ - long long*: (_Atomic(long long)*)(p), \ - volatile long long*: (volatile _Atomic(long long)*)(p), \ - unsigned long long*: (_Atomic(unsigned long long)*)(p), \ - volatile unsigned long long*: \ - (volatile _Atomic(unsigned long long)*)(p), \ - const void**: (_Atomic(const void*)*)(p), \ - const void*volatile*: (volatile _Atomic(const void*)*)(p), \ - default: (volatile _Atomic(void*)*)(p)) + ((typeof(*(p)) _Atomic *)(p)) + +// This removes the _Atomic and volatile qualifiers on the type of *p +#define _os_atomic_basetypeof(p) \ + typeof(__c11_atomic_load(_os_atomic_c11_atomic(p), \ + _os_atomic_memory_order_relaxed)) + +#define _os_atomic_baseptr(p) \ + ((_os_atomic_basetypeof(p) *)(p)) #define _os_atomic_barrier(m) \ - ({ __c11_atomic_thread_fence(os_atomic_memory_order_##m); }) + __c11_atomic_thread_fence(os_atomic_memory_order_##m) #define os_atomic_load(p, m) \ - ({ _os_atomic_basetypeof(p) _r = \ - __c11_atomic_load(_os_atomic_c11_atomic(p), \ - os_atomic_memory_order_##m); (typeof(*(p)))_r; }) + __c11_atomic_load(_os_atomic_c11_atomic(p), os_atomic_memory_order_##m) #define os_atomic_store(p, v, m) \ - ({ _os_atomic_basetypeof(p) _v = (v); \ - __c11_atomic_store(_os_atomic_c11_atomic(p), _v, \ - os_atomic_memory_order_##m); }) + __c11_atomic_store(_os_atomic_c11_atomic(p), v, \ + os_atomic_memory_order_##m) #define os_atomic_xchg(p, v, m) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = \ - __c11_atomic_exchange(_os_atomic_c11_atomic(p), _v, \ - os_atomic_memory_order_##m); (typeof(*(p)))_r; }) + __c11_atomic_exchange(_os_atomic_c11_atomic(p), v, \ + os_atomic_memory_order_##m) #define os_atomic_cmpxchg(p, e, v, m) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); \ + ({ _os_atomic_basetypeof(p) _r = (e); \ __c11_atomic_compare_exchange_strong(_os_atomic_c11_atomic(p), \ - &_r, _v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); }) + &_r, v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); }) #define os_atomic_cmpxchgv(p, e, v, g, m) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ + ({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \ __c11_atomic_compare_exchange_strong(_os_atomic_c11_atomic(p), \ - &_r, _v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); \ - *(g) = (typeof(*(p)))_r; _b; }) + &_r, v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); \ + *(g) = _r; _b; }) #define os_atomic_cmpxchgvw(p, e, v, g, m) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ + ({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \ __c11_atomic_compare_exchange_weak(_os_atomic_c11_atomic(p), \ - &_r, _v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); \ - *(g) = (typeof(*(p)))_r; _b; }) + &_r, v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); \ + *(g) = _r; _b; }) #define _os_atomic_c11_op(p, v, m, o, op) \ ({ _os_atomic_basetypeof(p) _v = (v), _r = \ __c11_atomic_fetch_##o(_os_atomic_c11_atomic(p), _v, \ - os_atomic_memory_order_##m); (typeof(*(p)))(_r op _v); }) + os_atomic_memory_order_##m); (typeof(_r))(_r op _v); }) #define _os_atomic_c11_op_orig(p, v, m, o, op) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = \ - __c11_atomic_fetch_##o(_os_atomic_c11_atomic(p), _v, \ - os_atomic_memory_order_##m); (typeof(*(p)))_r; }) + __c11_atomic_fetch_##o(_os_atomic_c11_atomic(p), v, \ + os_atomic_memory_order_##m) #define os_atomic_add(p, v, m) \ _os_atomic_c11_op((p), (v), m, add, +) @@ -210,116 +152,11 @@ typedef enum _os_atomic_memory_order #define os_atomic_xor_orig(p, v, m) \ _os_atomic_c11_op_orig((p), (v), m, xor, ^) -#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2) -#pragma mark - -#pragma mark gnu99 - -#define _os_atomic_full_barrier() \ - __sync_synchronize() -#define _os_atomic_barrier(m) \ - ({ switch(os_atomic_memory_order_##m) { \ - case _os_atomic_memory_order_relaxed: \ - break; \ - default: \ - _os_atomic_full_barrier(); break; \ - } }) -#define os_atomic_load(p, m) \ - ({ typeof(*(p)) _r = *(p); \ - switch(os_atomic_memory_order_##m) { \ - case _os_atomic_memory_order_relaxed: \ - case _os_atomic_memory_order_acquire: \ - case _os_atomic_memory_order_seq_cst: \ - _os_atomic_barrier(m); \ - break; \ - default: \ - _os_atomic_unimplemented(); break; \ - } _r; }) -#define os_atomic_store(p, v, m) \ - ({ switch(os_atomic_memory_order_##m) { \ - case _os_atomic_memory_order_relaxed: \ - case _os_atomic_memory_order_release: \ - case _os_atomic_memory_order_seq_cst: \ - _os_atomic_barrier(m); \ - *(p) = (v); break; \ - default: \ - _os_atomic_unimplemented(); break; \ - } switch(os_atomic_memory_order_##m) { \ - case _os_atomic_memory_order_seq_cst: \ - _os_atomic_barrier(m); break; \ - default: \ - break; \ - } }) -#if __has_builtin(__sync_swap) -#define os_atomic_xchg(p, v, m) \ - ((typeof(*(p)))__sync_swap((p), (v))) -#else -#define os_atomic_xchg(p, v, m) \ - ((typeof(*(p)))__sync_lock_test_and_set((p), (v))) -#endif -#define os_atomic_cmpxchg(p, e, v, m) \ - __sync_bool_compare_and_swap((p), (e), (v)) -#define os_atomic_cmpxchgv(p, e, v, g, m) \ - ({ typeof(*(g)) _e = (e), _r = \ - __sync_val_compare_and_swap((p), _e, (v)); \ - bool _b = (_e == _r); *(g) = _r; _b; }) -#define os_atomic_cmpxchgvw(p, e, v, g, m) \ - os_atomic_cmpxchgv((p), (e), (v), (g), m) - -#define os_atomic_add(p, v, m) \ - __sync_add_and_fetch((p), (v)) -#define os_atomic_add_orig(p, v, m) \ - __sync_fetch_and_add((p), (v)) -#define os_atomic_sub(p, v, m) \ - __sync_sub_and_fetch((p), (v)) -#define os_atomic_sub_orig(p, v, m) \ - __sync_fetch_and_sub((p), (v)) -#define os_atomic_and(p, v, m) \ - __sync_and_and_fetch((p), (v)) -#define os_atomic_and_orig(p, v, m) \ - __sync_fetch_and_and((p), (v)) -#define os_atomic_or(p, v, m) \ - __sync_or_and_fetch((p), (v)) -#define os_atomic_or_orig(p, v, m) \ - __sync_fetch_and_or((p), (v)) -#define os_atomic_xor(p, v, m) \ - __sync_xor_and_fetch((p), (v)) -#define os_atomic_xor_orig(p, v, m) \ - __sync_fetch_and_xor((p), (v)) - -#if defined(__x86_64__) || defined(__i386__) -// GCC emits nothing for __sync_synchronize() on x86_64 & i386 -#undef _os_atomic_full_barrier -#define _os_atomic_full_barrier() \ - ({ __asm__ __volatile__( \ - "mfence" \ - : : : "memory"); }) -#undef os_atomic_load -#define os_atomic_load(p, m) \ - ({ switch(os_atomic_memory_order_##m) { \ - case _os_atomic_memory_order_relaxed: \ - case _os_atomic_memory_order_acquire: \ - case _os_atomic_memory_order_seq_cst: \ - break; \ - default: \ - _os_atomic_unimplemented(); break; \ - } *(p); }) -// xchg is faster than store + mfence -#undef os_atomic_store -#define os_atomic_store(p, v, m) \ - ({ switch(os_atomic_memory_order_##m) { \ - case _os_atomic_memory_order_relaxed: \ - case _os_atomic_memory_order_release: \ - *(p) = (v); break; \ - case _os_atomic_memory_order_seq_cst: \ - (void)os_atomic_xchg((p), (v), m); break; \ - default:\ - _os_atomic_unimplemented(); break; \ - } }) -#endif - -#else -#error "Please upgrade to GCC 4.2 or newer." -#endif +#define os_atomic_force_dependency_on(p, e) (p) +#define os_atomic_load_with_dependency_on(p, e) \ + os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed) +#define os_atomic_load_with_dependency_on2o(p, f, e) \ + os_atomic_load_with_dependency_on(&(p)->f, e) #pragma mark - #pragma mark generic diff --git a/include/os/lock_private.h b/include/os/lock_private.h index f1daee2..8c32ea6 100644 --- a/include/os/lock_private.h +++ b/include/os/lock_private.h @@ -414,7 +414,7 @@ OS_ASSUME_NONNULL_END * function calls to the os_unfair_lock API entrypoints add measurable overhead. * * Do not use in frameworks to implement synchronization API primitives that are - * exposed to developers, that would lead to false primitives for that API from + * exposed to developers, that would lead to false positives for that API from * tools such as ThreadSanitizer. * * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!! diff --git a/src/os/lock.c b/src/os/lock.c index 8f3f7a9..3d66adf 100644 --- a/src/os/lock.c +++ b/src/os/lock.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #pragma mark - @@ -88,26 +89,50 @@ OS_ATOMIC_EXPORT void OSSpinLockUnlock(volatile OSSpinLock *l); #define OS_LOCK_SPIN_SPIN_TRIES 1000 #define OS_LOCK_SPIN_PAUSE() os_hardware_pause() #endif -#define OS_LOCK_SPIN_YIELD_TRIES 100 static const OSSpinLock _OSSpinLockLocked = TARGET_OS_EMBEDDED ? 1 : -1; +OS_ALWAYS_INLINE +static uint64_t +_os_lock_yield_deadline(mach_msg_timeout_t timeout) +{ + uint64_t abstime = timeout * NSEC_PER_MSEC; +#if !(defined(__i386__) || defined(__x86_64__)) + mach_timebase_info_data_t tbi; + kern_return_t kr = mach_timebase_info(&tbi); + if (kr) return UINT64_MAX; + abstime *= tbi.denom; + abstime /= tbi.numer; +#endif + return mach_absolute_time() + abstime; +} + +OS_ALWAYS_INLINE +static bool +_os_lock_yield_until(uint64_t deadline) +{ + return mach_absolute_time() < deadline; +} + OS_NOINLINE static void _OSSpinLockLockYield(volatile OSSpinLock *l) { int option = SWITCH_OPTION_DEPRESS; mach_msg_timeout_t timeout = 1; - uint32_t tries = OS_LOCK_SPIN_YIELD_TRIES; + uint64_t deadline = _os_lock_yield_deadline(timeout); OSSpinLock lock; while (unlikely(lock = *l)) { _yield: if (unlikely(lock != _OSSpinLockLocked)) { _os_lock_corruption_abort((void *)l, (uintptr_t)lock); } - // Yield until tries first hits zero, then permanently switch to wait - if (unlikely(!tries--)) option = SWITCH_OPTION_WAIT; thread_switch(MACH_PORT_NULL, option, timeout); + if (option == SWITCH_OPTION_WAIT) { + timeout++; + } else if (!_os_lock_yield_until(deadline)) { + option = SWITCH_OPTION_WAIT; + } } bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire); if (likely(r)) return; -- 2.45.2