+
+out:
+#if PLOCKSTAT
+ if (res == 0) {
+ PLOCKSTAT_MUTEX_ACQUIRE((pthread_mutex_t *)mutex, recursive, 0);
+ } else {
+ PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, res);
+ }
+#endif
+ return res;
+}
+
+#pragma mark fast path
+
+OS_NOINLINE
+int
+_pthread_mutex_droplock(pthread_mutex_t *mutex, uint32_t *flagsp,
+ uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
+{
+ if (_pthread_mutex_is_fairshare(mutex)) {
+ return _pthread_mutex_fairshare_unlock_updatebits(mutex, flagsp,
+ pmtxp, mgenp, ugenp);
+ }
+ return _pthread_mutex_firstfit_unlock_updatebits(mutex, flagsp, pmtxp,
+ mgenp, ugenp);
+}
+
+OS_NOINLINE
+int
+_pthread_mutex_lock_init_slow(pthread_mutex_t *mutex, bool trylock)
+{
+ int res;
+
+ res = _pthread_mutex_check_init(mutex);
+ if (res != 0) return res;
+
+ if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
+ return _pthread_mutex_fairshare_lock_slow(mutex, trylock);
+ } else if (os_unlikely(_pthread_mutex_uses_ulock(mutex))) {
+ return _pthread_mutex_ulock_lock(mutex, trylock);
+ }
+ return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
+}
+
+OS_NOINLINE
+static int
+_pthread_mutex_unlock_init_slow(pthread_mutex_t *mutex)
+{
+ int res;
+
+ // Initialize static mutexes for compatibility with misbehaving
+ // applications (unlock should not be the first operation on a mutex).
+ res = _pthread_mutex_check_init(mutex);
+ if (res != 0) return res;
+
+ if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
+ return _pthread_mutex_fairshare_unlock_slow(mutex);
+ } else if (os_unlikely(_pthread_mutex_uses_ulock(mutex))) {
+ return _pthread_mutex_ulock_unlock(mutex);
+ }
+ return _pthread_mutex_firstfit_unlock_slow(mutex);
+}
+
+PTHREAD_NOEXPORT_VARIANT
+int
+pthread_mutex_unlock(pthread_mutex_t *mutex)
+{
+ if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex))) {
+ return _pthread_mutex_unlock_init_slow(mutex);
+ }
+
+ if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
+ return _pthread_mutex_fairshare_unlock(mutex);
+ }
+
+ if (os_unlikely(_pthread_mutex_uses_ulock(mutex))) {
+ return _pthread_mutex_ulock_unlock(mutex);
+ }
+
+#if ENABLE_USERSPACE_TRACE
+ return _pthread_mutex_firstfit_unlock_slow(mutex);
+#elif PLOCKSTAT
+ if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
+ return _pthread_mutex_firstfit_unlock_slow(mutex);
+ }
+#endif
+
+ /*
+ * This is the first-fit fast path. The fairshare fast-ish path is in
+ * _pthread_mutex_firstfit_unlock()
+ */
+ uint64_t *tidaddr;
+ MUTEX_GETTID_ADDR(mutex, &tidaddr);
+
+ mutex_seq *seqaddr;
+ MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
+
+ mutex_seq oldseq, newseq;
+ mutex_seq_load(seqaddr, &oldseq);
+
+ // We're giving up the mutex one way or the other, so go ahead and
+ // update the owner to 0 so that once the CAS below succeeds, there
+ // is no stale ownership information. If the CAS of the seqaddr
+ // fails, we may loop, but it's still valid for the owner to be
+ // SWITCHING/0
+ os_atomic_store_wide(tidaddr, 0, relaxed);
+
+ do {
+ newseq = oldseq;
+
+ if (diff_genseq(oldseq.lgenval, oldseq.ugenval) == 0) {
+ // No outstanding waiters in kernel, we can simply drop the E-bit
+ // and return.
+ newseq.lgenval &= ~PTH_RWL_EBIT;
+ } else {
+ return _pthread_mutex_firstfit_unlock_slow(mutex);
+ }
+ } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
+ release)));
+
+ return 0;
+}
+
+OS_ALWAYS_INLINE
+static inline int
+_pthread_mutex_firstfit_lock(pthread_mutex_t *mutex, bool trylock)
+{
+ /*
+ * This is the first-fit fast path. The fairshare fast-ish path is in
+ * _pthread_mutex_fairshare_lock()
+ */
+ uint64_t *tidaddr;
+ MUTEX_GETTID_ADDR(mutex, &tidaddr);
+ uint64_t selfid = _pthread_threadid_self_np_direct();
+
+ mutex_seq *seqaddr;
+ MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
+
+ mutex_seq oldseq, newseq;
+ mutex_seq_load(seqaddr, &oldseq);
+
+ if (os_unlikely(!trylock && (oldseq.lgenval & PTH_RWL_EBIT))) {
+ return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
+ }
+
+ bool gotlock;
+ do {
+ newseq = oldseq;
+ gotlock = is_rwl_ebit_clear(oldseq.lgenval);
+
+ if (trylock && !gotlock) {
+#if __LP64__
+ // The sequence load is atomic, so we can bail here without writing
+ // it and avoid some unnecessary coherence traffic - rdar://57259033
+ os_atomic_thread_fence(acquire);
+ return EBUSY;
+#else
+ // A trylock on a held lock will fail immediately. But since
+ // we did not load the sequence words atomically, perform a
+ // no-op CAS64 to ensure that nobody has unlocked concurrently.
+#endif
+ } else if (os_likely(gotlock)) {
+ // In first-fit, getting the lock simply adds the E-bit
+ newseq.lgenval |= PTH_RWL_EBIT;
+ } else {
+ return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
+ }
+ } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
+ acquire)));
+
+ if (os_likely(gotlock)) {
+ os_atomic_store_wide(tidaddr, selfid, relaxed);
+ return 0;
+ } else if (trylock) {
+ return EBUSY;
+ } else {
+ __builtin_trap();
+ }
+}
+
+OS_ALWAYS_INLINE
+static inline int
+_pthread_mutex_lock(pthread_mutex_t *mutex, bool trylock)
+{
+ if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex))) {
+ return _pthread_mutex_lock_init_slow(mutex, trylock);
+ }
+
+ if (os_unlikely(_pthread_mutex_is_fairshare(mutex))) {
+ return _pthread_mutex_fairshare_lock(mutex, trylock);
+ }
+
+ if (os_unlikely(_pthread_mutex_uses_ulock(mutex))) {
+ return _pthread_mutex_ulock_lock(mutex, trylock);
+ }
+
+#if ENABLE_USERSPACE_TRACE
+ return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
+#elif PLOCKSTAT
+ if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
+ return _pthread_mutex_firstfit_lock_slow(mutex, trylock);
+ }
+#endif
+
+ return _pthread_mutex_firstfit_lock(mutex, trylock);
+}
+
+PTHREAD_NOEXPORT_VARIANT
+int
+pthread_mutex_lock(pthread_mutex_t *mutex)
+{
+ return _pthread_mutex_lock(mutex, false);
+}
+
+PTHREAD_NOEXPORT_VARIANT
+int
+pthread_mutex_trylock(pthread_mutex_t *mutex)
+{
+ return _pthread_mutex_lock(mutex, true);
+}
+
+
+OS_ALWAYS_INLINE
+static inline int
+_pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr,
+ uint32_t static_type)
+{
+ mutex->mtxopts.value = 0;
+ mutex->mtxopts.options.mutex = 1;
+ if (attr) {
+ if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
+ return EINVAL;
+ }
+ mutex->prioceiling = (int16_t)attr->prioceiling;
+ mutex->mtxopts.options.protocol = attr->protocol;
+ mutex->mtxopts.options.policy = attr->opt;
+ mutex->mtxopts.options.type = attr->type;
+ mutex->mtxopts.options.pshared = attr->pshared;
+ } else {
+ switch (static_type) {
+ case 1:
+ mutex->mtxopts.options.type = PTHREAD_MUTEX_ERRORCHECK;
+ break;
+ case 2:
+ mutex->mtxopts.options.type = PTHREAD_MUTEX_RECURSIVE;
+ break;
+ case 3:
+ /* firstfit fall thru */
+ case 7:
+ mutex->mtxopts.options.type = PTHREAD_MUTEX_DEFAULT;
+ break;
+ default:
+ return EINVAL;
+ }
+
+ mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
+ mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
+ if (static_type != 3) {
+ mutex->mtxopts.options.policy = __pthread_mutex_default_opt_policy;
+ } else {
+ mutex->mtxopts.options.policy = _PTHREAD_MTX_OPT_POLICY_FIRSTFIT;
+ }
+ mutex->mtxopts.options.pshared = _PTHREAD_DEFAULT_PSHARED;
+ }
+
+ mutex->priority = 0;
+