+
+ return 0;
+}
+
+OS_NOINLINE
+static int
+_pthread_mutex_fairshare_unlock(pthread_mutex_t *mutex)
+{
+#if ENABLE_USERSPACE_TRACE
+ return _pthread_mutex_fairshare_unlock_slow(mutex);
+#elif PLOCKSTAT
+ if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
+ return _pthread_mutex_fairshare_unlock_slow(mutex);
+ }
+#endif
+
+ uint64_t *tidaddr;
+ MUTEX_GETTID_ADDR(mutex, &tidaddr);
+
+ mutex_seq *seqaddr;
+ MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
+
+ mutex_seq oldseq, newseq;
+ mutex_seq_load(seqaddr, &oldseq);
+
+ int numwaiters = diff_genseq(oldseq.lgenval, oldseq.ugenval);
+ if (os_unlikely(numwaiters == 0)) {
+ // spurious unlock (unlock of unlocked lock)
+ return 0;
+ }
+
+ // We're giving up the mutex one way or the other, so go ahead and
+ // update the owner to 0 so that once the CAS below succeeds, there
+ // is no stale ownership information. If the CAS of the seqaddr
+ // fails, we may loop, but it's still valid for the owner to be
+ // SWITCHING/0
+ os_atomic_store_wide(tidaddr, 0, relaxed);
+
+ do {
+ newseq = oldseq;
+ newseq.ugenval += PTHRW_INC;
+
+ if (os_likely((oldseq.lgenval & PTHRW_COUNT_MASK) ==
+ (newseq.ugenval & PTHRW_COUNT_MASK))) {
+ // if we succeed in performing the CAS we can be sure of a fast
+ // path (only needing the CAS) unlock, if:
+ // a. our lock and unlock sequence are equal
+ // b. we don't need to clear an unlock prepost from the kernel
+
+ // do not reset Ibit, just K&E
+ newseq.lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
+ } else {
+ return _pthread_mutex_fairshare_unlock_slow(mutex);
+ }
+ } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
+ release)));
+
+ return 0;
+}
+
+#pragma mark ulock
+
+OS_ALWAYS_INLINE
+static inline uint32_t
+_pthread_mutex_ulock_self_owner_value(void)
+{
+ mach_port_t self_port = _pthread_mach_thread_self_direct();
+ return self_port & _PTHREAD_MUTEX_ULOCK_OWNER_MASK;
+}
+
+OS_NOINLINE
+static int
+_pthread_mutex_ulock_lock_slow(pthread_mutex_t *mutex, uint32_t self_ownerval,
+ uint32_t state)
+{
+ bool success = false, kernel_waiters = false;
+
+ uint32_t wait_op = UL_UNFAIR_LOCK | ULF_NO_ERRNO;
+ if (__pthread_mutex_ulock_adaptive_spin) {
+ wait_op |= ULF_WAIT_ADAPTIVE_SPIN;
+ }
+
+ PLOCKSTAT_MUTEX_BLOCK((pthread_mutex_t *)mutex);
+ do {
+ bool owner_dead = false;
+
+ do {
+ uint32_t current_ownerval = state & _PTHREAD_MUTEX_ULOCK_OWNER_MASK;
+ if (os_unlikely(owner_dead)) {
+ // TODO: PTHREAD_STRICT candidate
+ //
+ // For a non-recursive mutex, this indicates that it's really
+ // being used as a semaphore: even though we're the current
+ // owner, in reality we're expecting another thread to 'unlock'
+ // this mutex on our behalf later.
+ //
+ // __ulock_wait(2) doesn't permit you to wait for yourself, so
+ // we need to first swap our ownership for the anonymous owner
+ current_ownerval =
+ MACH_PORT_DEAD & _PTHREAD_MUTEX_ULOCK_OWNER_MASK;
+ owner_dead = false;
+ }
+ uint32_t new_state =
+ current_ownerval | _PTHREAD_MUTEX_ULOCK_WAITERS_BIT;
+ success = os_atomic_cmpxchgv(&mutex->ulock.uval, state, new_state,
+ &state, relaxed);
+ if (!success) {
+ continue;
+ }
+
+ int rc = __ulock_wait(wait_op, &mutex->ulock, new_state, 0);
+
+ PTHREAD_TRACE(ulmutex_lock_wait, mutex, new_state, rc, 0);
+
+ if (os_unlikely(rc < 0)) {
+ switch (-rc) {
+ case EINTR:
+ case EFAULT:
+ break;
+ case EOWNERDEAD:
+ owner_dead = true;
+ continue;
+ default:
+ PTHREAD_INTERNAL_CRASH(rc, "ulock_wait failure");
+ }
+ } else if (rc > 0) {
+ kernel_waiters = true;
+ }
+
+ state = os_atomic_load(&mutex->ulock.uval, relaxed);
+ } while (state != _PTHREAD_MUTEX_ULOCK_UNLOCKED_VALUE);
+
+ uint32_t locked_state = self_ownerval;
+ if (kernel_waiters) {
+ locked_state |= _PTHREAD_MUTEX_ULOCK_WAITERS_BIT;
+ }
+
+ success = os_atomic_cmpxchgv(&mutex->ulock.uval, state, locked_state,
+ &state, acquire);
+ } while (!success);
+ PLOCKSTAT_MUTEX_BLOCKED((pthread_mutex_t *)mutex, BLOCK_SUCCESS_PLOCKSTAT);
+
+ return 0;