+ disable_preemption();
+ if (os_atomic_cmpxchg(&mutex->lck_mtx_state, prev, state, acquire))
+ break;
+ enable_preemption();
+ cpu_pause();
+ state = ordered_load_mtx_state(mutex);
+ }
+ *new_state = state;
+ return;
+}
+
+static inline void
+lck_mtx_interlock_lock_clear_flags(
+ lck_mtx_t *mutex,
+ uint32_t and_flags,
+ uint32_t *new_state)
+{
+ return lck_mtx_interlock_lock_set_and_clear_flags(mutex, 0, and_flags, new_state);
+}
+
+static inline void
+lck_mtx_interlock_lock(
+ lck_mtx_t *mutex,
+ uint32_t *new_state)
+{
+ return lck_mtx_interlock_lock_set_and_clear_flags(mutex, 0, 0, new_state);
+}
+
+static inline int
+lck_mtx_interlock_try_lock_set_flags(
+ lck_mtx_t *mutex,
+ uint32_t or_flags,
+ uint32_t *new_state)
+{
+ uint32_t state, prev;
+ state = *new_state;
+
+ /* have to wait for interlock to clear */
+ if (state & (LCK_MTX_ILOCKED_MSK | or_flags)) {
+ return 0;
+ }
+ prev = state; /* prev contains snapshot for exchange */
+ state |= LCK_MTX_ILOCKED_MSK | or_flags; /* pick up interlock */
+ disable_preemption();
+ if (os_atomic_cmpxchg(&mutex->lck_mtx_state, prev, state, acquire)) {
+ *new_state = state;
+ return 1;
+ }
+
+ enable_preemption();
+ return 0;
+}
+
+static inline int
+lck_mtx_interlock_try_lock(
+ lck_mtx_t *mutex,
+ uint32_t *new_state)
+{
+ return lck_mtx_interlock_try_lock_set_flags(mutex, 0, new_state);
+}
+
+static inline int
+lck_mtx_interlock_try_lock_disable_interrupts(
+ lck_mtx_t *mutex,
+ boolean_t *istate)
+{
+ uint32_t state;
+
+ *istate = ml_set_interrupts_enabled(FALSE);
+ state = ordered_load_mtx_state(mutex);
+
+ if (lck_mtx_interlock_try_lock(mutex, &state)) {
+ return 1;
+ } else {
+ ml_set_interrupts_enabled(*istate);
+ return 0;
+ }
+}
+
+static inline void
+lck_mtx_interlock_unlock_enable_interrupts(
+ lck_mtx_t *mutex,
+ boolean_t istate)
+{
+ lck_mtx_ilk_unlock(mutex);
+ ml_set_interrupts_enabled(istate);
+}
+
+__attribute__((noinline))
+static void
+lck_mtx_lock_contended(
+ lck_mtx_t *lock,
+ boolean_t indirect,
+ boolean_t *first_miss)
+{
+ lck_mtx_spinwait_ret_type_t ret;
+ uint32_t state;
+ thread_t thread;
+ struct turnstile *ts = NULL;
+
+try_again:
+
+ if (indirect) {
+ lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, first_miss);
+ }
+
+ ret = lck_mtx_lock_spinwait_x86(lock);
+ state = ordered_load_mtx_state(lock);
+ switch (ret) {
+ case LCK_MTX_SPINWAIT_NO_SPIN:
+ /*
+ * owner not on core, lck_mtx_lock_spinwait_x86 didn't even
+ * try to spin.
+ */
+ if (indirect) {
+ lck_grp_mtx_update_direct_wait((struct _lck_mtx_ext_*)lock);
+ }
+
+ /* just fall through case LCK_MTX_SPINWAIT_SPUN */
+ case LCK_MTX_SPINWAIT_SPUN:
+ /*
+ * mutex not acquired but lck_mtx_lock_spinwait_x86 tried to spin
+ * interlock not held
+ */
+ lck_mtx_interlock_lock(lock, &state);
+ assert(state & LCK_MTX_ILOCKED_MSK);
+
+ if (state & LCK_MTX_MLOCKED_MSK) {
+ if (indirect) {
+ lck_grp_mtx_update_wait((struct _lck_mtx_ext_*)lock, first_miss);
+ }
+ lck_mtx_lock_wait_x86(lock, &ts);
+ /*
+ * interlock is not held here.
+ */
+ goto try_again;
+ } else {
+
+ /* grab the mutex */
+ state |= LCK_MTX_MLOCKED_MSK;
+ ordered_store_mtx_state_release(lock, state);
+ thread = current_thread();
+ ordered_store_mtx_owner(lock, (uintptr_t)thread);
+#if MACH_LDEBUG
+ if (thread) {
+ thread->mutex_count++;
+ }
+#endif /* MACH_LDEBUG */
+ }
+
+ break;
+ case LCK_MTX_SPINWAIT_ACQUIRED:
+ /*
+ * mutex has been acquired by lck_mtx_lock_spinwait_x86
+ * interlock is held and preemption disabled
+ * owner is set and mutex marked as locked
+ * statistics updated too
+ */
+ break;
+ default:
+ panic("lck_mtx_lock_spinwait_x86 returned %d for mutex %p\n", ret, lock);
+ }
+
+ /*
+ * interlock is already acquired here
+ */
+
+ /* mutex has been acquired */
+ thread = (thread_t)lock->lck_mtx_owner;
+ if (state & LCK_MTX_WAITERS_MSK) {
+ /*
+ * lck_mtx_lock_acquire_tail will call
+ * turnstile_complete.
+ */
+ return lck_mtx_lock_acquire_tail(lock, indirect, ts);
+ }
+
+ if (ts != NULL) {
+ turnstile_complete((uintptr_t)lock, NULL, NULL, TURNSTILE_KERNEL_MUTEX);
+ }
+
+ assert(current_thread()->turnstile != NULL);
+
+ /* release the interlock */
+ lck_mtx_lock_finish_inline_with_cleanup(lock, ordered_load_mtx_state(lock), indirect);
+}
+
+/*
+ * Helper noinline functions for calling
+ * panic to optimize compiled code.
+ */
+
+__attribute__((noinline)) __abortlike
+static void
+lck_mtx_destroyed(
+ lck_mtx_t *lock)
+{
+ panic("trying to interlock destroyed mutex (%p)", lock);
+}
+
+__attribute__((noinline))
+static boolean_t
+lck_mtx_try_destroyed(
+ lck_mtx_t *lock)
+{
+ panic("trying to interlock destroyed mutex (%p)", lock);
+ return FALSE;
+}
+
+__attribute__((always_inline))
+static boolean_t
+lck_mtx_lock_wait_interlock_to_clear(
+ lck_mtx_t *lock,
+ uint32_t* new_state)
+{
+ uint32_t state;
+
+ for ( ; ; ) {
+ cpu_pause();
+ state = ordered_load_mtx_state(lock);
+ if (!(state & (LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK))) {
+ *new_state = state;
+ return TRUE;
+ }
+ if (state & LCK_MTX_MLOCKED_MSK) {
+ /* if it is held as mutex, just fail */
+ return FALSE;
+ }
+ }
+}
+
+__attribute__((always_inline))
+static boolean_t
+lck_mtx_try_lock_wait_interlock_to_clear(
+ lck_mtx_t *lock,
+ uint32_t* new_state)
+{
+ uint32_t state;
+
+ for ( ; ; ) {
+ cpu_pause();
+ state = ordered_load_mtx_state(lock);
+ if (state & (LCK_MTX_MLOCKED_MSK | LCK_MTX_SPIN_MSK)) {
+ /* if it is held as mutex or spin, just fail */
+ return FALSE;
+ }
+ if (!(state & LCK_MTX_ILOCKED_MSK)) {
+ *new_state = state;
+ return TRUE;
+ }
+ }
+}
+
+/*
+ * Routine: lck_mtx_lock_slow
+ *
+ * Locks a mutex for current thread.
+ * If the lock is contended this function might
+ * sleep.
+ *
+ * Called with interlock not held.
+ */
+__attribute__((noinline))
+void
+lck_mtx_lock_slow(
+ lck_mtx_t *lock)
+{
+ boolean_t indirect = FALSE;
+ uint32_t state;
+ int first_miss = 0;
+
+ state = ordered_load_mtx_state(lock);
+
+ /* is the interlock or mutex held */
+ if (__improbable(state & ((LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK)))) {
+ /*
+ * Note: both LCK_MTX_TAG_DESTROYED and LCK_MTX_TAG_INDIRECT
+ * have LCK_MTX_ILOCKED_MSK and LCK_MTX_MLOCKED_MSK
+ * set in state (state == lck_mtx_tag)
+ */
+
+
+ /* is the mutex already held and not indirect */
+ if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))){
+ /* no, must have been the mutex */
+ return lck_mtx_lock_contended(lock, indirect, &first_miss);
+ }
+
+ /* check to see if it is marked destroyed */
+ if (__improbable(state == LCK_MTX_TAG_DESTROYED)) {
+ lck_mtx_destroyed(lock);
+ }
+
+ /* Is this an indirect mutex? */
+ if (__improbable(state == LCK_MTX_TAG_INDIRECT)) {
+ indirect = get_indirect_mutex(&lock, &state);
+
+ first_miss = 0;
+ lck_grp_mtx_update_held((struct _lck_mtx_ext_*)lock);
+
+ if (state & LCK_MTX_SPIN_MSK) {
+ /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */
+ assert(state & LCK_MTX_ILOCKED_MSK);
+ lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss);
+ }
+ }
+
+ if (!lck_mtx_lock_wait_interlock_to_clear(lock, &state)) {
+ return lck_mtx_lock_contended(lock, indirect, &first_miss);
+ }
+ }
+
+ /* no - can't be INDIRECT, DESTROYED or locked */
+ while (__improbable(!lck_mtx_interlock_try_lock_set_flags(lock, LCK_MTX_MLOCKED_MSK, &state))) {
+ if (!lck_mtx_lock_wait_interlock_to_clear(lock, &state)) {
+ return lck_mtx_lock_contended(lock, indirect, &first_miss);
+ }
+ }
+
+ /* lock and interlock acquired */
+
+ thread_t thread = current_thread();
+ /* record owner of mutex */
+ ordered_store_mtx_owner(lock, (uintptr_t)thread);
+
+#if MACH_LDEBUG
+ if (thread) {
+ thread->mutex_count++; /* lock statistic */
+ }
+#endif
+ /*
+ * Check if there are waiters to
+ * inherit their priority.
+ */
+ if (__improbable(state & LCK_MTX_WAITERS_MSK)) {
+ return lck_mtx_lock_acquire_tail(lock, indirect, NULL);
+ }
+
+ /* release the interlock */
+ lck_mtx_lock_finish_inline(lock, ordered_load_mtx_state(lock), indirect);
+
+ return;
+}
+
+__attribute__((noinline))
+boolean_t
+lck_mtx_try_lock_slow(
+ lck_mtx_t *lock)
+{
+ boolean_t indirect = FALSE;
+ uint32_t state;
+ int first_miss = 0;
+
+ state = ordered_load_mtx_state(lock);
+
+ /* is the interlock or mutex held */
+ if (__improbable(state & ((LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK)))) {
+ /*
+ * Note: both LCK_MTX_TAG_DESTROYED and LCK_MTX_TAG_INDIRECT
+ * have LCK_MTX_ILOCKED_MSK and LCK_MTX_MLOCKED_MSK
+ * set in state (state == lck_mtx_tag)
+ */
+
+ /* is the mutex already held and not indirect */
+ if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))){
+ return FALSE;
+ }
+
+ /* check to see if it is marked destroyed */
+ if (__improbable(state == LCK_MTX_TAG_DESTROYED)) {
+ lck_mtx_try_destroyed(lock);
+ }
+
+ /* Is this an indirect mutex? */
+ if (__improbable(state == LCK_MTX_TAG_INDIRECT)) {
+ indirect = get_indirect_mutex(&lock, &state);
+
+ first_miss = 0;
+ lck_grp_mtx_update_held((struct _lck_mtx_ext_*)lock);
+ }
+
+ if (!lck_mtx_try_lock_wait_interlock_to_clear(lock, &state)) {
+ if (indirect)
+ lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss);
+ return FALSE;
+ }
+ }
+
+ /* no - can't be INDIRECT, DESTROYED or locked */
+ while (__improbable(!lck_mtx_interlock_try_lock_set_flags(lock, LCK_MTX_MLOCKED_MSK, &state))) {
+ if (!lck_mtx_try_lock_wait_interlock_to_clear(lock, &state)) {
+ if (indirect)
+ lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss);
+ return FALSE;
+ }
+ }
+
+ /* lock and interlock acquired */
+
+ thread_t thread = current_thread();
+ /* record owner of mutex */
+ ordered_store_mtx_owner(lock, (uintptr_t)thread);
+
+#if MACH_LDEBUG
+ if (thread) {
+ thread->mutex_count++; /* lock statistic */
+ }
+#endif
+ /*
+ * Check if there are waiters to
+ * inherit their priority.
+ */
+ if (__improbable(state & LCK_MTX_WAITERS_MSK)) {
+ return lck_mtx_try_lock_acquire_tail(lock);
+ }
+
+ /* release the interlock */
+ lck_mtx_try_lock_finish_inline(lock, ordered_load_mtx_state(lock));
+
+ return TRUE;
+
+}
+
+__attribute__((noinline))
+void
+lck_mtx_lock_spin_slow(
+ lck_mtx_t *lock)
+{
+ boolean_t indirect = FALSE;
+ uint32_t state;
+ int first_miss = 0;
+
+ state = ordered_load_mtx_state(lock);
+
+ /* is the interlock or mutex held */
+ if (__improbable(state & ((LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK)))) {
+ /*
+ * Note: both LCK_MTX_TAG_DESTROYED and LCK_MTX_TAG_INDIRECT
+ * have LCK_MTX_ILOCKED_MSK and LCK_MTX_MLOCKED_MSK
+ * set in state (state == lck_mtx_tag)
+ */
+
+
+ /* is the mutex already held and not indirect */
+ if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))){
+ /* no, must have been the mutex */
+ return lck_mtx_lock_contended(lock, indirect, &first_miss);
+ }
+
+ /* check to see if it is marked destroyed */
+ if (__improbable(state == LCK_MTX_TAG_DESTROYED)) {
+ lck_mtx_destroyed(lock);
+ }
+
+ /* Is this an indirect mutex? */
+ if (__improbable(state == LCK_MTX_TAG_INDIRECT)) {
+ indirect = get_indirect_mutex(&lock, &state);
+
+ first_miss = 0;
+ lck_grp_mtx_update_held((struct _lck_mtx_ext_*)lock);
+
+ if (state & LCK_MTX_SPIN_MSK) {
+ /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */
+ assert(state & LCK_MTX_ILOCKED_MSK);
+ lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss);
+ }
+ }
+
+ if (!lck_mtx_lock_wait_interlock_to_clear(lock, &state)) {
+ return lck_mtx_lock_contended(lock, indirect, &first_miss);
+ }
+ }
+
+ /* no - can't be INDIRECT, DESTROYED or locked */
+ while (__improbable(!lck_mtx_interlock_try_lock_set_flags(lock, LCK_MTX_SPIN_MSK, &state) )) {
+ if (!lck_mtx_lock_wait_interlock_to_clear(lock, &state)) {
+ return lck_mtx_lock_contended(lock, indirect, &first_miss);
+ }
+ }
+
+ /* lock as spinlock and interlock acquired */
+
+ thread_t thread = current_thread();
+ /* record owner of mutex */
+ ordered_store_mtx_owner(lock, (uintptr_t)thread);
+
+#if MACH_LDEBUG
+ if (thread) {
+ thread->mutex_count++; /* lock statistic */
+ }
+#endif
+
+#if CONFIG_DTRACE
+ LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, lock, 0);
+#endif
+ /* return with the interlock held and preemption disabled */
+ return;
+}
+
+__attribute__((noinline))
+boolean_t
+lck_mtx_try_lock_spin_slow(
+ lck_mtx_t *lock)
+{
+ boolean_t indirect = FALSE;
+ uint32_t state;
+ int first_miss = 0;
+
+ state = ordered_load_mtx_state(lock);
+
+ /* is the interlock or mutex held */
+ if (__improbable(state & ((LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK)))) {
+ /*
+ * Note: both LCK_MTX_TAG_DESTROYED and LCK_MTX_TAG_INDIRECT
+ * have LCK_MTX_ILOCKED_MSK and LCK_MTX_MLOCKED_MSK
+ * set in state (state == lck_mtx_tag)
+ */
+
+ /* is the mutex already held and not indirect */
+ if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))){
+ return FALSE;
+ }
+
+ /* check to see if it is marked destroyed */
+ if (__improbable(state == LCK_MTX_TAG_DESTROYED)) {
+ lck_mtx_try_destroyed(lock);
+ }
+
+ /* Is this an indirect mutex? */
+ if (__improbable(state == LCK_MTX_TAG_INDIRECT)) {
+ indirect = get_indirect_mutex(&lock, &state);
+
+ first_miss = 0;
+ lck_grp_mtx_update_held((struct _lck_mtx_ext_*)lock);
+ }
+
+ if (!lck_mtx_try_lock_wait_interlock_to_clear(lock, &state)) {
+ if (indirect)
+ lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss);
+ return FALSE;
+ }
+ }
+
+ /* no - can't be INDIRECT, DESTROYED or locked */
+ while (__improbable(!lck_mtx_interlock_try_lock_set_flags(lock, LCK_MTX_SPIN_MSK, &state))) {
+ if (!lck_mtx_try_lock_wait_interlock_to_clear(lock, &state)) {
+ if (indirect)
+ lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss);
+ return FALSE;
+ }
+ }
+
+ /* lock and interlock acquired */
+
+ thread_t thread = current_thread();
+ /* record owner of mutex */
+ ordered_store_mtx_owner(lock, (uintptr_t)thread);
+
+#if MACH_LDEBUG
+ if (thread) {
+ thread->mutex_count++; /* lock statistic */
+ }
+#endif
+
+#if CONFIG_DTRACE
+ LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, lock, 0);
+#endif
+ return TRUE;
+
+}
+
+__attribute__((noinline))
+void
+lck_mtx_convert_spin(
+ lck_mtx_t *lock)
+{
+ uint32_t state;
+
+ state = ordered_load_mtx_state(lock);
+
+ /* Is this an indirect mutex? */
+ if (__improbable(state == LCK_MTX_TAG_INDIRECT)) {
+ /* If so, take indirection */
+ get_indirect_mutex(&lock, &state);
+ }
+
+ assertf((thread_t)lock->lck_mtx_owner == current_thread(), "lock %p not owned by thread %p (current owner %p)", lock, current_thread(), (thread_t)lock->lck_mtx_owner );
+
+ if (__improbable(state & LCK_MTX_MLOCKED_MSK)) {
+ /* already owned as a mutex, just return */
+ return;
+ }
+
+ assert(get_preemption_level() > 0);
+ assert(state & LCK_MTX_ILOCKED_MSK);
+ assert(state & LCK_MTX_SPIN_MSK);
+
+ /*
+ * Check if there are waiters to
+ * inherit their priority.
+ */
+ if (__improbable(state & LCK_MTX_WAITERS_MSK)) {
+ return lck_mtx_convert_spin_acquire_tail(lock);
+ }
+
+ lck_mtx_convert_spin_finish_inline(lock, ordered_load_mtx_state(lock));
+
+ return;
+}
+
+static inline boolean_t
+lck_mtx_lock_grab_mutex(
+ lck_mtx_t *lock)
+{
+ uint32_t state;
+
+ state = ordered_load_mtx_state(lock);
+
+ if (!lck_mtx_interlock_try_lock_set_flags(lock, LCK_MTX_MLOCKED_MSK, &state)) {
+ return FALSE;
+ }
+
+ /* lock and interlock acquired */
+
+ thread_t thread = current_thread();
+ /* record owner of mutex */
+ ordered_store_mtx_owner(lock, (uintptr_t)thread);
+
+#if MACH_LDEBUG
+ if (thread) {
+ thread->mutex_count++; /* lock statistic */
+ }
+#endif
+ return TRUE;
+}
+
+__attribute__((noinline))
+void
+lck_mtx_assert(
+ lck_mtx_t *lock,
+ unsigned int type)
+{
+ thread_t thread, owner;
+ uint32_t state;
+
+ thread = current_thread();
+ state = ordered_load_mtx_state(lock);
+
+ if (state == LCK_MTX_TAG_INDIRECT) {
+ get_indirect_mutex(&lock, &state);
+ }
+
+ owner = (thread_t)lock->lck_mtx_owner;
+
+ if (type == LCK_MTX_ASSERT_OWNED) {
+ if (owner != thread || !(state & (LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK)))
+ panic("mutex (%p) not owned\n", lock);
+ } else {
+ assert (type == LCK_MTX_ASSERT_NOTOWNED);
+ if (owner == thread)
+ panic("mutex (%p) owned\n", lock);
+ }
+}
+
+/*
+ * Routine: lck_mtx_lock_spinwait_x86
+ *
+ * Invoked trying to acquire a mutex when there is contention but
+ * the holder is running on another processor. We spin for up to a maximum
+ * time waiting for the lock to be released.
+ *
+ * Called with the interlock unlocked.
+ * returns LCK_MTX_SPINWAIT_ACQUIRED if mutex acquired
+ * returns LCK_MTX_SPINWAIT_SPUN if we spun
+ * returns LCK_MTX_SPINWAIT_NO_SPIN if we didn't spin due to the holder not running
+ */
+__attribute__((noinline))
+lck_mtx_spinwait_ret_type_t
+lck_mtx_lock_spinwait_x86(
+ lck_mtx_t *mutex)
+{
+ __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(mutex);
+ thread_t holder;
+ uint64_t overall_deadline;
+ uint64_t check_owner_deadline;
+ uint64_t cur_time;
+ lck_mtx_spinwait_ret_type_t retval = LCK_MTX_SPINWAIT_SPUN;
+ int loopcount = 0;
+
+ KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_SPIN_CODE) | DBG_FUNC_START,
+ trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner), mutex->lck_mtx_waiters, 0, 0);
+
+ cur_time = mach_absolute_time();
+ overall_deadline = cur_time + MutexSpin;