+#pragma mark ulock
+
+OS_ALWAYS_INLINE
+static inline uint32_t
+_pthread_mutex_ulock_self_owner_value(void)
+{
+ mach_port_t self_port = _pthread_mach_thread_self_direct();
+ return self_port & _PTHREAD_MUTEX_ULOCK_OWNER_MASK;
+}
+
+OS_NOINLINE
+static int
+_pthread_mutex_ulock_lock_slow(pthread_mutex_t *mutex, uint32_t self_ownerval,
+ uint32_t state)
+{
+ bool success = false, kernel_waiters = false;
+
+ uint32_t wait_op = UL_UNFAIR_LOCK | ULF_NO_ERRNO;
+ if (__pthread_mutex_ulock_adaptive_spin) {
+ wait_op |= ULF_WAIT_ADAPTIVE_SPIN;
+ }
+
+ PLOCKSTAT_MUTEX_BLOCK((pthread_mutex_t *)mutex);
+ do {
+ bool owner_dead = false;
+
+ do {
+ uint32_t current_ownerval = state & _PTHREAD_MUTEX_ULOCK_OWNER_MASK;
+ if (os_unlikely(owner_dead)) {
+ // TODO: PTHREAD_STRICT candidate
+ //
+ // For a non-recursive mutex, this indicates that it's really
+ // being used as a semaphore: even though we're the current
+ // owner, in reality we're expecting another thread to 'unlock'
+ // this mutex on our behalf later.
+ //
+ // __ulock_wait(2) doesn't permit you to wait for yourself, so
+ // we need to first swap our ownership for the anonymous owner
+ current_ownerval =
+ MACH_PORT_DEAD & _PTHREAD_MUTEX_ULOCK_OWNER_MASK;
+ owner_dead = false;
+ }
+ uint32_t new_state =
+ current_ownerval | _PTHREAD_MUTEX_ULOCK_WAITERS_BIT;
+ success = os_atomic_cmpxchgv(&mutex->ulock.uval, state, new_state,
+ &state, relaxed);
+ if (!success) {
+ continue;
+ }
+
+ int rc = __ulock_wait(wait_op, &mutex->ulock, new_state, 0);
+
+ PTHREAD_TRACE(ulmutex_lock_wait, mutex, new_state, rc, 0);
+
+ if (os_unlikely(rc < 0)) {
+ switch (-rc) {
+ case EINTR:
+ case EFAULT:
+ break;
+ case EOWNERDEAD:
+ owner_dead = true;
+ continue;
+ default:
+ PTHREAD_INTERNAL_CRASH(rc, "ulock_wait failure");
+ }
+ } else if (rc > 0) {
+ kernel_waiters = true;
+ }
+
+ state = os_atomic_load(&mutex->ulock.uval, relaxed);
+ } while (state != _PTHREAD_MUTEX_ULOCK_UNLOCKED_VALUE);
+
+ uint32_t locked_state = self_ownerval;
+ if (kernel_waiters) {
+ locked_state |= _PTHREAD_MUTEX_ULOCK_WAITERS_BIT;
+ }
+
+ success = os_atomic_cmpxchgv(&mutex->ulock.uval, state, locked_state,
+ &state, acquire);
+ } while (!success);
+ PLOCKSTAT_MUTEX_BLOCKED((pthread_mutex_t *)mutex, BLOCK_SUCCESS_PLOCKSTAT);
+
+ return 0;
+}
+
+PTHREAD_NOEXPORT_VARIANT
+int
+_pthread_mutex_ulock_lock(pthread_mutex_t *mutex, bool trylock)
+{
+ uint32_t unlocked = _PTHREAD_MUTEX_ULOCK_UNLOCKED_VALUE;
+ uint32_t locked = _pthread_mutex_ulock_self_owner_value();
+ uint32_t state;
+
+ bool success = os_atomic_cmpxchgv(&mutex->ulock.uval, unlocked, locked,
+ &state, acquire);
+
+ if (trylock) {
+ PTHREAD_TRACE(ulmutex_trylock, mutex, locked, state, success);
+ } else {
+ PTHREAD_TRACE(ulmutex_lock, mutex, locked, state, success);
+ }
+
+ int rc = 0;
+ if (!success) {
+ if (trylock) {
+ rc = EBUSY;
+ } else {
+ rc = _pthread_mutex_ulock_lock_slow(mutex, locked, state);
+ }
+ }
+
+ if (rc) {
+ PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, rc);
+ } else {
+ PLOCKSTAT_MUTEX_ACQUIRE((pthread_mutex_t *)mutex, /* recursive */ 0, 0);
+ }
+
+ return rc;
+}
+
+OS_NOINLINE
+static int
+_pthread_mutex_ulock_unlock_slow(pthread_mutex_t *mutex, uint32_t self_ownerval,
+ uint32_t orig_state)
+{
+ if (os_unlikely(orig_state == _PTHREAD_MUTEX_ULOCK_UNLOCKED_VALUE)) {
+ // XXX This is illegal, but psynch permitted it...
+ // TODO: PTHREAD_STRICT candidate
+ return 0;
+ }
+
+ uint32_t wake_flags = 0;
+
+ uint32_t orig_ownerval = orig_state & _PTHREAD_MUTEX_ULOCK_OWNER_MASK;
+ bool orig_waiters = orig_state & _PTHREAD_MUTEX_ULOCK_WAITERS_BIT;
+ if (os_unlikely(orig_ownerval != self_ownerval)) {
+ // XXX This is illegal, but psynch permitted it...
+ // TODO: PTHREAD_STRICT candidate
+ if (!orig_waiters) {
+ return 0;
+ }
+
+ wake_flags |= ULF_WAKE_ALLOW_NON_OWNER;
+ } else if (os_unlikely(!orig_waiters)) {
+ PTHREAD_INTERNAL_CRASH(0, "unlock_slow without orig_waiters");
+ }
+
+ for (;;) {
+ int rc = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO | wake_flags,
+ &mutex->ulock, 0);
+
+ PTHREAD_TRACE(ulmutex_unlock_wake, mutex, rc, 0, 0);
+
+ if (os_unlikely(rc < 0)) {
+ switch (-rc) {
+ case EINTR:
+ continue;
+ case ENOENT:
+ break;
+ default:
+ PTHREAD_INTERNAL_CRASH(-rc, "ulock_wake failure");
+ }
+ }
+ break;
+ }
+
+ return 0;
+}
+
+PTHREAD_NOEXPORT_VARIANT
+int
+_pthread_mutex_ulock_unlock(pthread_mutex_t *mutex)
+{
+ uint32_t locked_uncontended = _pthread_mutex_ulock_self_owner_value();
+ uint32_t unlocked = _PTHREAD_MUTEX_ULOCK_UNLOCKED_VALUE;
+ uint32_t state = os_atomic_xchg(&mutex->ulock.uval, unlocked, release);
+
+ PTHREAD_TRACE(ulmutex_unlock, mutex, locked_uncontended, state, 0);
+
+ int rc = 0;
+ if (state != locked_uncontended) {
+ rc = _pthread_mutex_ulock_unlock_slow(mutex, locked_uncontended,
+ state);
+ }
+
+ if (rc) {
+ PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, rc);
+ } else {
+ PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, /* recursive */ 0);
+ }
+
+ return rc;
+}
+