* @APPLE_APACHE_LICENSE_HEADER_END@
*/
+#define OS_UNFAIR_LOCK_INLINE 1
#include "lock_internal.h"
+#include "os/internal.h"
+#include "resolver.h"
#include "libkern/OSAtomic.h"
#include "os/lock.h"
#include "os/lock_private.h"
#include "os/once_private.h"
-#include "resolver.h"
#include <mach/mach_init.h>
#include <mach/mach_traps.h>
#include <mach/thread_switch.h>
+#include <mach/mach_time.h>
#include <os/tsd.h>
#pragma mark -
#pragma mark _os_lock_base_t
-#if !OS_VARIANT_ONLY
+OS_NOINLINE OS_NORETURN OS_COLD
+void _os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value);
+
OS_LOCK_STRUCT_DECL_INTERNAL(base);
OS_USED static OS_LOCK_TYPE_STRUCT_DECL(base);
return l._osl_base->osl_type->osl_unlock(l);
}
-#endif //!OS_VARIANT_ONLY
-
OS_NOINLINE OS_NORETURN OS_COLD
-static void
+void
_os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value)
{
__LIBPLATFORM_CLIENT_CRASH__(lock_value, "os_lock is corrupt");
}
+
#pragma mark -
#pragma mark OSSpinLock
-#ifdef OS_LOCK_VARIANT_SELECTOR
-void _OSSpinLockLockSlow(volatile OSSpinLock *l);
-#else
-OS_NOINLINE OS_USED static void _OSSpinLockLockSlow(volatile OSSpinLock *l);
-#endif // OS_LOCK_VARIANT_SELECTOR
+OS_NOEXPORT OS_NOINLINE void _OSSpinLockLockSlow(volatile OSSpinLock *l);
OS_ATOMIC_EXPORT void OSSpinLockLock(volatile OSSpinLock *l);
OS_ATOMIC_EXPORT bool OSSpinLockTry(volatile OSSpinLock *l);
OS_ATOMIC_EXPORT int spin_lock_try(volatile OSSpinLock *l);
OS_ATOMIC_EXPORT void OSSpinLockUnlock(volatile OSSpinLock *l);
+#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
+static const OSSpinLock _OSSpinLockLocked = 1;
+#else
+static const OSSpinLock _OSSpinLockLocked = -1;
+#endif
+
+
+
#if OS_ATOMIC_UP
// Don't spin on UP
-#elif OS_ATOMIC_WFE
-#define OS_LOCK_SPIN_SPIN_TRIES 100
-#define OS_LOCK_SPIN_PAUSE() os_hardware_wfe()
#else
#define OS_LOCK_SPIN_SPIN_TRIES 1000
#define OS_LOCK_SPIN_PAUSE() os_hardware_pause()
#endif
-#define OS_LOCK_SPIN_YIELD_TRIES 100
-static const OSSpinLock _OSSpinLockLocked = TARGET_OS_EMBEDDED ? 1 : -1;
+OS_ALWAYS_INLINE
+static uint64_t
+_os_lock_yield_deadline(mach_msg_timeout_t timeout)
+{
+ uint64_t abstime = timeout * NSEC_PER_MSEC;
+#if !(defined(__i386__) || defined(__x86_64__))
+ mach_timebase_info_data_t tbi;
+ kern_return_t kr = mach_timebase_info(&tbi);
+ if (kr) return UINT64_MAX;
+ abstime *= tbi.denom;
+ abstime /= tbi.numer;
+#endif
+ return mach_absolute_time() + abstime;
+}
+
+OS_ALWAYS_INLINE
+static bool
+_os_lock_yield_until(uint64_t deadline)
+{
+ return mach_absolute_time() < deadline;
+}
OS_NOINLINE
static void
{
int option = SWITCH_OPTION_DEPRESS;
mach_msg_timeout_t timeout = 1;
- uint32_t tries = OS_LOCK_SPIN_YIELD_TRIES;
+ uint64_t deadline = _os_lock_yield_deadline(timeout);
OSSpinLock lock;
while (unlikely(lock = *l)) {
_yield:
if (unlikely(lock != _OSSpinLockLocked)) {
_os_lock_corruption_abort((void *)l, (uintptr_t)lock);
}
- // Yield until tries first hits zero, then permanently switch to wait
- if (unlikely(!tries--)) option = SWITCH_OPTION_WAIT;
thread_switch(MACH_PORT_NULL, option, timeout);
+ if (option == SWITCH_OPTION_WAIT) {
+ timeout++;
+ } else if (!_os_lock_yield_until(deadline)) {
+ option = SWITCH_OPTION_WAIT;
+ }
}
bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire);
if (likely(r)) return;
{
return _OSSpinLockLockYield(l); // Don't spin on UP
}
-#else
+#else // !OS_ATOMIC_UP
void
_OSSpinLockLockSlow(volatile OSSpinLock *l)
{
if (likely(r)) return;
goto _spin;
}
-#endif
+#endif // !OS_ATOMIC_UP
-#ifdef OS_LOCK_VARIANT_SELECTOR
-#undef _OSSpinLockLockSlow
-extern void _OSSpinLockLockSlow(volatile OSSpinLock *l);
-#endif
-#if !OS_LOCK_VARIANT_ONLY
#if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR
typedef struct _os_nospin_lock_s *_os_nospin_lock_t;
-void _os_nospin_lock_lock(_os_nospin_lock_t lock);
-bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
-void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
+
+OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock);
+OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
+OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
void
OSSpinLockLock(volatile OSSpinLock *l)
os_atomic_store(l, 0, release);
}
+
#pragma mark -
#pragma mark os_lock_spin_t
OS_LOCK_STRUCT_DECL_INTERNAL(spin,
OSSpinLock volatile osl_spinlock;
);
-#if !OS_VARIANT_ONLY
OS_LOCK_METHODS_DECL(spin);
OS_LOCK_TYPE_INSTANCE(spin);
-#endif // !OS_VARIANT_ONLY
-
-#ifdef OS_VARIANT_SELECTOR
-#define _os_lock_spin_lock \
- OS_VARIANT(_os_lock_spin_lock, OS_VARIANT_SELECTOR)
-#define _os_lock_spin_trylock \
- OS_VARIANT(_os_lock_spin_trylock, OS_VARIANT_SELECTOR)
-#define _os_lock_spin_unlock \
- OS_VARIANT(_os_lock_spin_unlock, OS_VARIANT_SELECTOR)
-OS_LOCK_METHODS_DECL(spin);
-#endif // OS_VARIANT_SELECTOR
void
_os_lock_spin_lock(_os_lock_spin_t l)
return OSSpinLockUnlock(&l->osl_spinlock);
}
+
#pragma mark -
#pragma mark os_lock_owner_t
#endif
typedef mach_port_name_t os_lock_owner_t;
+#define OS_LOCK_NO_OWNER MACH_PORT_NULL
-OS_ALWAYS_INLINE
+
+OS_ALWAYS_INLINE OS_CONST
static inline os_lock_owner_t
_os_lock_owner_get_self(void)
{
return self;
}
-#define OS_LOCK_NO_OWNER MACH_PORT_NULL
-
-#if !OS_LOCK_VARIANT_ONLY
OS_NOINLINE OS_NORETURN OS_COLD
static void
"os_lock");
}
-#endif //!OS_LOCK_VARIANT_ONLY
#pragma mark -
#pragma mark os_lock_handoff_t
OS_LOCK_STRUCT_DECL_INTERNAL(handoff,
os_lock_owner_t volatile osl_owner;
);
-#if !OS_VARIANT_ONLY
OS_LOCK_METHODS_DECL(handoff);
OS_LOCK_TYPE_INSTANCE(handoff);
-#endif // !OS_VARIANT_ONLY
-
-#ifdef OS_VARIANT_SELECTOR
-#define _os_lock_handoff_lock \
- OS_VARIANT(_os_lock_handoff_lock, OS_VARIANT_SELECTOR)
-#define _os_lock_handoff_trylock \
- OS_VARIANT(_os_lock_handoff_trylock, OS_VARIANT_SELECTOR)
-#define _os_lock_handoff_unlock \
- OS_VARIANT(_os_lock_handoff_unlock, OS_VARIANT_SELECTOR)
-OS_LOCK_METHODS_DECL(handoff);
-#endif // OS_VARIANT_SELECTOR
#define OS_LOCK_HANDOFF_YIELD_TRIES 100
os_atomic_store2o(l, osl_owner, MACH_PORT_NULL, release);
}
+
#pragma mark -
#pragma mark os_ulock_value_t
#define OS_ULOCK_OWNER(value) ((value) | OS_ULOCK_NOWAITERS_BIT)
#define OS_ULOCK_ANONYMOUS_OWNER MACH_PORT_DEAD
-#define OS_ULOCK_IS_OWNER(value, self) ({ \
- os_lock_owner_t _owner = OS_ULOCK_OWNER(value); \
- (_owner == (self) && _owner != OS_ULOCK_ANONYMOUS_OWNER); })
-#define OS_ULOCK_IS_NOT_OWNER(value, self) ({ \
- os_lock_owner_t _owner = OS_ULOCK_OWNER(value); \
- (_owner != (self) && _owner != OS_ULOCK_ANONYMOUS_OWNER); })
-
+#define OS_ULOCK_IS_OWNER(value, self, allow_anonymous_owner) ({ \
+ os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner == (self)) && \
+ (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); })
+#define OS_ULOCK_IS_NOT_OWNER(value, self, allow_anonymous_owner) ({ \
+ os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner != (self)) && \
+ (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); })
#pragma mark -
#pragma mark os_unfair_lock
os_unfair_lock_t lock);
OS_ATOMIC_EXPORT void os_unfair_lock_unlock_no_tsd_4libpthread(
os_unfair_lock_t lock);
+OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options_4Libc(
+ os_unfair_lock_t lock, os_unfair_lock_options_t options);
+OS_ATOMIC_EXPORT void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock);
+
+OS_NOINLINE OS_NORETURN OS_COLD
+void _os_unfair_lock_recursive_abort(os_lock_owner_t owner);
+OS_NOINLINE OS_NORETURN OS_COLD
+void _os_unfair_lock_unowned_abort(os_lock_owner_t owner);
+OS_NOINLINE OS_NORETURN OS_COLD
+void _os_unfair_lock_corruption_abort(os_ulock_value_t current);
_Static_assert(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION ==
ULF_WAIT_WORKQ_DATA_CONTENTION,
"check value for OS_UNFAIR_LOCK_OPTIONS_MASK");
#define OS_UNFAIR_LOCK_OPTIONS_MASK \
(os_unfair_lock_options_t)(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION)
+#define OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER 0x01000000u
+
OS_NOINLINE OS_NORETURN OS_COLD
-static void
+void
_os_unfair_lock_recursive_abort(os_lock_owner_t owner)
{
__LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
}
OS_NOINLINE OS_NORETURN OS_COLD
-static void
+void
_os_unfair_lock_unowned_abort(os_lock_owner_t owner)
{
__LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_unfair_lock not "
}
OS_NOINLINE OS_NORETURN OS_COLD
-static void
+void
_os_unfair_lock_corruption_abort(os_ulock_value_t current)
{
__LIBPLATFORM_CLIENT_CRASH__(current, "os_unfair_lock is corrupt");
}
+
OS_NOINLINE
static void
_os_unfair_lock_lock_slow(_os_unfair_lock_t l, os_lock_owner_t self,
os_unfair_lock_options_t options)
{
- os_ulock_value_t current, new, waiters_mask = 0;
+ os_unfair_lock_options_t allow_anonymous_owner =
+ options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
+ options &= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
if (unlikely(options & ~OS_UNFAIR_LOCK_OPTIONS_MASK)) {
__LIBPLATFORM_CLIENT_CRASH__(options, "Invalid options");
}
+ os_ulock_value_t current, new, waiters_mask = 0;
while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
OS_LOCK_NO_OWNER)) {
_retry:
- if (unlikely(OS_ULOCK_IS_OWNER(current, self))) {
+ if (unlikely(OS_ULOCK_IS_OWNER(current, self, allow_anonymous_owner))) {
return _os_unfair_lock_recursive_abort(self);
}
new = current & ~OS_ULOCK_NOWAITERS_BIT;
__LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
}
}
- // If there are more waiters, unset nowaiters bit when acquiring lock
- waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
+ if (ret > 0) {
+ // If there are more waiters, unset nowaiters bit when acquiring lock
+ waiters_mask = OS_ULOCK_NOWAITERS_BIT;
+ }
}
new = self & ~waiters_mask;
bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
OS_NOINLINE
static void
_os_unfair_lock_unlock_slow(_os_unfair_lock_t l, os_ulock_value_t current,
- os_lock_owner_t self)
+ os_lock_owner_t self, os_unfair_lock_options_t options)
{
- if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self))) {
+ os_unfair_lock_options_t allow_anonymous_owner =
+ options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
+ options &= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER;
+ if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, allow_anonymous_owner))) {
return _os_unfair_lock_unowned_abort(OS_ULOCK_OWNER(current));
}
if (current & OS_ULOCK_NOWAITERS_BIT) {
os_ulock_value_t current;
current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
if (likely(current == self)) return;
- return _os_unfair_lock_unlock_slow(l, current, self);
+ return _os_unfair_lock_unlock_slow(l, current, self, 0);
}
void
bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
if (likely(r)) return;
return _os_unfair_lock_lock_slow(l, self,
- OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION);
+ OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION|
+ OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER);
}
void
os_ulock_value_t current;
current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
if (likely(current == self)) return;
- return _os_unfair_lock_unlock_slow(l, current, self);
+ return _os_unfair_lock_unlock_slow(l, current, self,
+ OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER);
}
-#pragma mark -
-#pragma mark _os_lock_unfair_t 4Libc // <rdar://problem/27138264>
-
-OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options_4Libc(
- os_unfair_lock_t lock, os_unfair_lock_options_t options);
-OS_ATOMIC_EXPORT void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock);
-OS_NOINLINE
-static void
-_os_unfair_lock_lock_slow_4Libc(_os_unfair_lock_t l, os_lock_owner_t self,
- os_unfair_lock_options_t options)
+void
+os_unfair_lock_assert_owner(os_unfair_lock_t lock)
{
- os_ulock_value_t current, new, waiters_mask = 0;
- if (unlikely(options & ~OS_UNFAIR_LOCK_OPTIONS_MASK)) {
- __LIBPLATFORM_CLIENT_CRASH__(options, "Invalid options");
- }
- while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
- OS_LOCK_NO_OWNER)) {
-_retry:
- if (unlikely(OS_ULOCK_IS_OWNER(current, self))) {
- return _os_unfair_lock_recursive_abort(self);
- }
- new = current & ~OS_ULOCK_NOWAITERS_BIT;
- if (current != new) {
- // Clear nowaiters bit in lock value before waiting
- if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, ¤t,
- relaxed)){
- continue;
- }
- current = new;
- }
- int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO | options,
- l, current, 0);
- if (unlikely(ret < 0)) {
- switch (-ret) {
- case EINTR:
- case EFAULT:
- continue;
- case EOWNERDEAD:
- // if we get an `EOWNERDEAD` it could be corruption of the lock
- // so for the Libc locks, if we can steal the lock, assume
- // it is corruption and pretend we got the lock with contention
- new = self & ~OS_ULOCK_NOWAITERS_BIT;
- if (os_atomic_cmpxchgv2o(l, oul_value, current, new, ¤t,
- acquire)) {
- return;
- }
- break;
- default:
- __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
- }
- }
- // If there are more waiters, unset nowaiters bit when acquiring lock
- waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
+ _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
+ os_lock_owner_t self = _os_lock_owner_get_self();
+ os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
+ if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) {
+ __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
+ "Lock unexpectedly not owned by current thread");
}
- new = self & ~waiters_mask;
- bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
- ¤t, acquire);
- if (unlikely(!r)) goto _retry;
}
-OS_NOINLINE
-static void
-_os_unfair_lock_unlock_slow_4Libc(_os_unfair_lock_t l)
+void
+os_unfair_lock_assert_not_owner(os_unfair_lock_t lock)
{
- for (;;) {
- int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, l, 0);
- if (unlikely(ret < 0)) {
- switch (-ret) {
- case EINTR:
- continue;
- case ENOENT:
- break;
- default:
- __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
- }
- }
- break;
+ _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
+ os_lock_owner_t self = _os_lock_owner_get_self();
+ os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
+ if (unlikely(OS_ULOCK_IS_OWNER(current, self, 0))) {
+ __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
+ "Lock unexpectedly owned by current thread");
}
}
-void
-os_unfair_lock_lock_with_options_4Libc(os_unfair_lock_t lock,
- os_unfair_lock_options_t options)
+
+#pragma mark -
+#pragma mark os_unfair_recursive_lock
+
+OS_ATOMIC_EXPORT
+void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
+ os_unfair_lock_options_t options);
+
+OS_ATOMIC_EXPORT
+bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock);
+
+OS_ATOMIC_EXPORT
+void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock);
+
+OS_ATOMIC_EXPORT
+bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock);
+
+
+static inline os_lock_owner_t
+_os_unfair_lock_owner(os_unfair_lock_t lock)
{
_os_unfair_lock_t l = (_os_unfair_lock_t)lock;
- os_lock_owner_t self = _os_lock_owner_get_self();
- bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
- if (likely(r)) return;
- return _os_unfair_lock_lock_slow_4Libc(l, self, options);
+ return OS_ULOCK_OWNER(os_atomic_load(&l->oul_value, relaxed));
}
void
-os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock)
+os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
+ os_unfair_lock_options_t options)
{
+ os_lock_owner_t cur, self = _os_lock_owner_get_self();
+ _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock;
+
+ if (likely(os_atomic_cmpxchgv2o(l, oul_value,
+ OS_LOCK_NO_OWNER, self, &cur, acquire))) {
+ return;
+ }
+
+ if (OS_ULOCK_OWNER(cur) == self) {
+ lock->ourl_count++;
+ return;
+ }
+
+ return _os_unfair_lock_lock_slow(l, self, options);
+}
+
+bool
+os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock)
+{
+ os_lock_owner_t cur, self = _os_lock_owner_get_self();
+ _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock;
+
+ if (likely(os_atomic_cmpxchgv2o(l, oul_value,
+ OS_LOCK_NO_OWNER, self, &cur, acquire))) {
+ return true;
+ }
+
+ if (likely(OS_ULOCK_OWNER(cur) == self)) {
+ lock->ourl_count++;
+ return true;
+ }
+
+ return false;
+}
+
+
+OS_ALWAYS_INLINE
+static inline void
+_os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock,
+ os_lock_owner_t self)
+{
+ if (unlikely(lock->ourl_count)) {
+ os_lock_owner_t cur = _os_unfair_lock_owner(&lock->ourl_lock);
+ if (unlikely(cur != self)) {
+ _os_unfair_lock_unowned_abort(cur);
+ }
+ lock->ourl_count--;
+ return;
+ }
+
_os_unfair_lock_t l = (_os_unfair_lock_t)lock;
- os_lock_owner_t self = _os_lock_owner_get_self();
os_ulock_value_t current;
current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
if (likely(current == self)) return;
- return _os_unfair_lock_unlock_slow_4Libc(l);
+ return _os_unfair_lock_unlock_slow(l, current, self, 0);
}
-#if !OS_VARIANT_ONLY
void
-os_unfair_lock_assert_owner(os_unfair_lock_t lock)
+os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock)
{
- _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
os_lock_owner_t self = _os_lock_owner_get_self();
- os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
- if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self))) {
- __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
- "Lock unexpectedly not owned by current thread");
- }
+ _os_unfair_recursive_lock_unlock(lock, self);
}
-void
-os_unfair_lock_assert_not_owner(os_unfair_lock_t lock)
+bool
+os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock)
{
- _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
+ os_lock_owner_t cur = _os_unfair_lock_owner(&lock->ourl_lock);
os_lock_owner_t self = _os_lock_owner_get_self();
- os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
- if (unlikely(OS_ULOCK_IS_OWNER(current, self))) {
- __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
- "Lock unexpectedly owned by current thread");
+ if (likely(cur == self)) {
+ _os_unfair_recursive_lock_unlock(lock, self);
+ return true;
}
+ return false;
}
-#endif
+
#pragma mark -
#pragma mark _os_lock_unfair_t
OS_LOCK_STRUCT_DECL_INTERNAL(unfair,
os_unfair_lock osl_unfair_lock;
);
-#if !OS_VARIANT_ONLY
OS_LOCK_METHODS_DECL(unfair);
OS_LOCK_TYPE_INSTANCE(unfair);
-#endif // !OS_VARIANT_ONLY
-
-#ifdef OS_VARIANT_SELECTOR
-#define _os_lock_unfair_lock \
- OS_VARIANT(_os_lock_unfair_lock, OS_VARIANT_SELECTOR)
-#define _os_lock_unfair_trylock \
- OS_VARIANT(_os_lock_unfair_trylock, OS_VARIANT_SELECTOR)
-#define _os_lock_unfair_unlock \
- OS_VARIANT(_os_lock_unfair_unlock, OS_VARIANT_SELECTOR)
-OS_LOCK_METHODS_DECL(unfair);
-#endif // OS_VARIANT_SELECTOR
void
_os_lock_unfair_lock(_os_lock_unfair_t l)
return os_unfair_lock_unlock(&l->osl_unfair_lock);
}
+
#pragma mark -
#pragma mark _os_nospin_lock
OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
+
OS_NOINLINE
static void
_os_nospin_lock_lock_slow(_os_nospin_lock_t l)
__LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
}
}
- // If there are more waiters, unset nowaiters bit when acquiring lock
- waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
+ if (ret > 0) {
+ // If there are more waiters, unset nowaiters bit when acquiring lock
+ waiters_mask = OS_ULOCK_NOWAITERS_BIT;
+ }
}
new = self & ~waiters_mask;
bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
return _os_nospin_lock_unlock_slow(l, current);
}
+
#pragma mark -
#pragma mark _os_lock_nospin_t
OS_LOCK_STRUCT_DECL_INTERNAL(nospin,
_os_nospin_lock osl_nospin_lock;
);
-#if !OS_VARIANT_ONLY
OS_LOCK_METHODS_DECL(nospin);
OS_LOCK_TYPE_INSTANCE(nospin);
-#endif // !OS_VARIANT_ONLY
-
-#ifdef OS_VARIANT_SELECTOR
-#define _os_lock_nospin_lock \
- OS_VARIANT(_os_lock_nospin_lock, OS_VARIANT_SELECTOR)
-#define _os_lock_nospin_trylock \
- OS_VARIANT(_os_lock_nospin_trylock, OS_VARIANT_SELECTOR)
-#define _os_lock_nospin_unlock \
- OS_VARIANT(_os_lock_nospin_unlock, OS_VARIANT_SELECTOR)
-OS_LOCK_METHODS_DECL(nospin);
-#endif // OS_VARIANT_SELECTOR
void
_os_lock_nospin_lock(_os_lock_nospin_t l)
return _os_nospin_lock_unlock(&l->osl_nospin_lock);
}
+
#pragma mark -
#pragma mark os_once_t
typedef struct os_once_gate_s {
union {
os_ulock_value_t ogo_lock;
- os_once_t ogo_once;
+ uintptr_t ogo_once;
};
} os_once_gate_s, *os_once_gate_t;
-#define OS_ONCE_INIT ((os_once_t)0l)
-#define OS_ONCE_DONE (~(os_once_t)0l)
+#define OS_ONCE_INIT ((uintptr_t)0l)
+#define OS_ONCE_DONE (~(uintptr_t)0l)
+
+#if defined(__i386__) || defined(__x86_64__)
+#define OS_ONCE_USE_QUIESCENT_COUNTER 0
+#else
+#define OS_ONCE_USE_QUIESCENT_COUNTER 1
+#endif
OS_ATOMIC_EXPORT void _os_once(os_once_t *val, void *ctxt, os_function_t func);
OS_ATOMIC_EXPORT void __os_once_reset(os_once_t *val);
OS_NOINLINE OS_NORETURN OS_COLD
-static void
+void _os_once_gate_recursive_abort(os_lock_owner_t owner);
+OS_NOINLINE OS_NORETURN OS_COLD
+void _os_once_gate_unowned_abort(os_lock_owner_t owner);
+OS_NOINLINE OS_NORETURN OS_COLD
+void _os_once_gate_corruption_abort(os_ulock_value_t current);
+
+
+OS_NOINLINE OS_NORETURN OS_COLD
+void
_os_once_gate_recursive_abort(os_lock_owner_t owner)
{
__LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
}
OS_NOINLINE OS_NORETURN OS_COLD
-static void
+void
_os_once_gate_unowned_abort(os_lock_owner_t owner)
{
__LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_once_t not "
}
OS_NOINLINE OS_NORETURN OS_COLD
-static void
+void
_os_once_gate_corruption_abort(os_ulock_value_t current)
{
__LIBPLATFORM_CLIENT_CRASH__(current, "os_once_t is corrupt");
}
-OS_NOINLINE
-static void
-_os_once_gate_wait_slow(os_ulock_value_t *gate, os_lock_owner_t self)
+
+#if OS_ONCE_USE_QUIESCENT_COUNTER
+#define OS_ONCE_MAKE_GEN(gen) (((gen) << 2) + OS_ULOCK_NOWAITERS_BIT)
+#define OS_ONCE_IS_GEN(gen) (((gen) & 3) == OS_ULOCK_NOWAITERS_BIT)
+
+// the _COMM_PAGE_CPU_QUIESCENT_COUNTER value is incremented every time
+// all CPUs have performed a context switch.
+//
+// To make sure all CPUs context switched at least once since `gen`,
+// we need to observe 4 increments, see libdispatch/src/shims/lock.h
+#define OS_ONCE_GEN_SAFE_DELTA (4 << 2)
+
+OS_ALWAYS_INLINE
+static inline uintptr_t
+_os_once_generation(void)
{
- os_ulock_value_t tid_old, tid_new;
+ uintptr_t value = *(volatile uintptr_t *)_COMM_PAGE_CPU_QUIESCENT_COUNTER;
+ return OS_ONCE_MAKE_GEN(value);
+}
- for (;;) {
- os_atomic_rmw_loop(gate, tid_old, tid_new, relaxed, {
- switch (tid_old) {
- case (os_ulock_value_t)OS_ONCE_INIT: // raced with __os_once_reset()
- case (os_ulock_value_t)OS_ONCE_DONE: // raced with _os_once()
- os_atomic_rmw_loop_give_up(return);
- }
- tid_new = tid_old & ~OS_ULOCK_NOWAITERS_BIT;
- if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break);
- });
- if (unlikely(OS_ULOCK_IS_OWNER(tid_old, self))) {
- return _os_once_gate_recursive_abort(self);
- }
- int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO,
- gate, tid_new, 0);
- if (unlikely(ret < 0)) {
- switch (-ret) {
- case EINTR:
- case EFAULT:
- continue;
- case EOWNERDEAD:
- _os_once_gate_corruption_abort(tid_old);
- break;
- default:
- __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
- }
- }
+OS_ALWAYS_INLINE
+static inline uintptr_t
+_os_once_mark_quiescing(os_once_gate_t og)
+{
+ return os_atomic_xchg(&og->ogo_once, _os_once_generation(), release);
+}
+
+OS_ALWAYS_INLINE
+static void
+_os_once_mark_done_if_quiesced(os_once_gate_t og, uintptr_t gen)
+{
+ if (_os_once_generation() - gen >= OS_ONCE_GEN_SAFE_DELTA) {
+ os_atomic_store(&og->ogo_once, OS_ONCE_DONE, relaxed);
}
}
+#else
+OS_ALWAYS_INLINE
+static inline uintptr_t
+_os_once_mark_done(os_once_gate_t og)
+{
+ return os_atomic_xchg(&og->ogo_once, OS_ONCE_DONE, release);
+}
+#endif
OS_NOINLINE
static void
-_os_once_gate_broadcast_slow(os_ulock_value_t *gate, os_ulock_value_t current,
+_os_once_gate_broadcast(os_once_gate_t og, os_ulock_value_t current,
os_lock_owner_t self)
{
- if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self))) {
+ if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) {
return _os_once_gate_unowned_abort(OS_ULOCK_OWNER(current));
}
if (current & OS_ULOCK_NOWAITERS_BIT) {
}
for (;;) {
int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO | ULF_WAKE_ALL,
- gate, 0);
+ &og->ogo_lock, 0);
if (unlikely(ret < 0)) {
switch (-ret) {
case EINTR:
}
}
-OS_ALWAYS_INLINE
+OS_NOINLINE
static void
-_os_once_gate_set_value_and_broadcast(os_once_gate_t og, os_lock_owner_t self,
- os_once_t value)
-{
- // The next barrier must be long and strong.
- //
- // The scenario: SMP systems with weakly ordered memory models
- // and aggressive out-of-order instruction execution.
- //
- // The problem:
- //
- // The os_once*() wrapper macro causes the callee's
- // instruction stream to look like this (pseudo-RISC):
- //
- // load r5, pred-addr
- // cmpi r5, -1
- // beq 1f
- // call os_once*()
- // 1f:
- // load r6, data-addr
- //
- // May be re-ordered like so:
- //
- // load r6, data-addr
- // load r5, pred-addr
- // cmpi r5, -1
- // beq 1f
- // call os_once*()
- // 1f:
- //
- // Normally, a barrier on the read side is used to workaround
- // the weakly ordered memory model. But barriers are expensive
- // and we only need to synchronize once! After func(ctxt)
- // completes, the predicate will be marked as "done" and the
- // branch predictor will correctly skip the call to
- // os_once*().
- //
- // A far faster alternative solution: Defeat the speculative
- // read-ahead of peer CPUs.
- //
- // Modern architectures will throw away speculative results
- // once a branch mis-prediction occurs. Therefore, if we can
- // ensure that the predicate is not marked as being complete
- // until long after the last store by func(ctxt), then we have
- // defeated the read-ahead of peer CPUs.
- //
- // In other words, the last "store" by func(ctxt) must complete
- // and then N cycles must elapse before ~0l is stored to *val.
- // The value of N is whatever is sufficient to defeat the
- // read-ahead mechanism of peer CPUs.
- //
- // On some CPUs, the most fully synchronizing instruction might
- // need to be issued.
- os_atomic_maximally_synchronizing_barrier();
-
- // above assumed to contain release barrier
- os_ulock_value_t current =
- (os_ulock_value_t)os_atomic_xchg(&og->ogo_once, value, relaxed);
- if (likely(current == self)) return;
- _os_once_gate_broadcast_slow(&og->ogo_lock, current, self);
+_os_once_callout(os_once_gate_t og, void *ctxt, os_function_t func,
+ os_lock_owner_t self)
+{
+ uintptr_t v;
+
+ func(ctxt);
+
+#if OS_ONCE_USE_QUIESCENT_COUNTER
+ v = _os_once_mark_quiescing(og);
+#else
+ v = _os_once_mark_done(og);
+#endif
+ if (likely((os_ulock_value_t)v == self)) return;
+ _os_once_gate_broadcast(og, (os_ulock_value_t)v, self);
+}
+
+OS_NOINLINE
+static void
+_os_once_gate_wait(os_once_gate_t og, void *ctxt, os_function_t func,
+ os_lock_owner_t self)
+{
+ uintptr_t old, new;
+
+ for (;;) {
+ os_atomic_rmw_loop(&og->ogo_once, old, new, relaxed, {
+ if (old == OS_ONCE_DONE) {
+ os_atomic_rmw_loop_give_up(return);
+#if OS_ONCE_USE_QUIESCENT_COUNTER
+ } else if (OS_ONCE_IS_GEN(old)) {
+ os_atomic_rmw_loop_give_up({
+ os_atomic_thread_fence(acquire);
+ return _os_once_mark_done_if_quiesced(og, old);
+ });
+#endif
+ } else if (old == OS_ONCE_INIT) {
+ // __os_once_reset was used, try to become the new initializer
+ new = (uintptr_t)self;
+ } else {
+ new = old & ~(uintptr_t)OS_ULOCK_NOWAITERS_BIT;
+ if (new == old) os_atomic_rmw_loop_give_up(break);
+ }
+ });
+ if (old == OS_ONCE_INIT) {
+ // see comment in _os_once, pairs with the release barrier
+ // in __os_once_reset()
+ os_atomic_thread_fence(acquire);
+ return _os_once_callout(og, ctxt, func, self);
+ }
+ if (unlikely(OS_ULOCK_IS_OWNER((os_lock_owner_t)old, self, 0))) {
+ return _os_once_gate_recursive_abort(self);
+ }
+ int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO,
+ &og->ogo_lock, (os_ulock_value_t)new, 0);
+ if (unlikely(ret < 0)) {
+ switch (-ret) {
+ case EINTR:
+ case EFAULT:
+ continue;
+ case EOWNERDEAD:
+ _os_once_gate_corruption_abort((os_lock_owner_t)old);
+ break;
+ default:
+ __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
+ }
+ }
+ }
}
// Atomically resets the once value to zero and then signals all
-// pending waiters to return from their _os_once_gate_wait_slow()
+// pending waiters to return from their __ulock_wait()
void
__os_once_reset(os_once_t *val)
{
os_once_gate_t og = (os_once_gate_t)val;
os_lock_owner_t self = _os_lock_owner_get_self();
- _os_once_gate_set_value_and_broadcast(og, self, OS_ONCE_INIT);
+ uintptr_t v;
+
+ v = os_atomic_xchg(&og->ogo_once, OS_ONCE_INIT, release);
+ if (likely((os_ulock_value_t)v == self)) return;
+ return _os_once_gate_broadcast(og, (os_ulock_value_t)v, self);
}
void
_os_once(os_once_t *val, void *ctxt, os_function_t func)
{
os_once_gate_t og = (os_once_gate_t)val;
- os_lock_owner_t self = _os_lock_owner_get_self();
- os_once_t v = (os_once_t)self;
+ os_lock_owner_t self;
+ uintptr_t v;
+
+#if OS_ONCE_USE_QUIESCENT_COUNTER
+ v = os_atomic_load(&og->ogo_once, acquire);
+ if (likely(OS_ONCE_IS_GEN(v))) {
+ return _os_once_mark_done_if_quiesced(og, v);
+ }
+#endif
+
+ self = _os_lock_owner_get_self();
+ v = (uintptr_t)self;
- if (likely(os_atomic_cmpxchg(&og->ogo_once, OS_ONCE_INIT, v, relaxed))) {
- func(ctxt);
- _os_once_gate_set_value_and_broadcast(og, self, OS_ONCE_DONE);
- } else {
- _os_once_gate_wait_slow(&og->ogo_lock, self);
+ // The acquire barrier pairs with the release in __os_once_reset()
+ // for cases when a previous initializer failed.
+ if (likely(os_atomic_cmpxchg(&og->ogo_once, OS_ONCE_INIT, v, acquire))) {
+ return _os_once_callout(og, ctxt, func, self);
}
+ return _os_once_gate_wait(og, ctxt, func, self);
}
-#if !OS_VARIANT_ONLY
#pragma mark -
#pragma mark os_lock_eliding_t
OS_LOCK_TYPE_INSTANCE(transactional);
#endif // !TARGET_OS_IPHONE
-#endif // !OS_VARIANT_ONLY
-#endif // !OS_LOCK_VARIANT_ONLY