+#endif // OS_UP_VARIANT_ONLY
+
+PTHREAD_ALWAYS_INLINE
+static inline int
+_pthread_mutex_lock(pthread_mutex_t *omutex, bool trylock)
+{
+#if PLOCKSTAT || DEBUG_TRACE_POINTS
+ if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED() ||
+ DEBUG_TRACE_POINTS) {
+ return _pthread_mutex_lock_slow(omutex, trylock);
+ }
+#endif
+ _pthread_mutex *mutex = (_pthread_mutex *)omutex;
+ if (!_pthread_mutex_check_signature_fast(mutex)) {
+ return _pthread_mutex_lock_slow(omutex, trylock);
+ }
+
+ uint64_t oldtid;
+ volatile uint64_t *tidaddr;
+ MUTEX_GETTID_ADDR(mutex, &tidaddr);
+ uint64_t selfid = _pthread_selfid_direct();
+
+ uint64_t oldval64, newval64;
+ volatile uint64_t *seqaddr;
+ MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
+
+ uint32_t lgenval, ugenval;
+ bool gotlock = false;
+
+ do {
+ oldval64 = *seqaddr;
+ oldtid = *tidaddr;
+ lgenval = (uint32_t)oldval64;
+ ugenval = (uint32_t)(oldval64 >> 32);
+
+ gotlock = ((lgenval & PTH_RWL_EBIT) == 0);
+
+ if (trylock && !gotlock) {
+ // A trylock on a held lock will fail immediately. But since
+ // we did not load the sequence words atomically, perform a
+ // no-op CAS64 to ensure that nobody has unlocked concurrently.
+ } else {
+ // Increment the lock sequence number and force the lock into E+K
+ // mode, whether "gotlock" is true or not.
+ lgenval += PTHRW_INC;
+ lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
+ }
+
+ newval64 = (((uint64_t)ugenval) << 32);
+ newval64 |= lgenval;
+
+ // Set S and B bit
+ } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, acquire));
+
+ if (os_fastpath(gotlock)) {
+ os_atomic_store(tidaddr, selfid, relaxed);
+ return 0;
+ } else if (trylock) {
+ return EBUSY;
+ } else {
+ return _pthread_mutex_lock_wait(omutex, newval64, oldtid);
+ }
+}
+
+PTHREAD_NOEXPORT_VARIANT