+__attribute__((noinline))
+static void
+lck_mtx_lock_contended(
+ lck_mtx_t *lock,
+ boolean_t indirect,
+ boolean_t *first_miss)
+{
+ lck_mtx_spinwait_ret_type_t ret;
+ uint32_t state;
+ thread_t thread;
+ struct turnstile *ts = NULL;
+
+try_again:
+
+ if (indirect) {
+ lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, first_miss);
+ }
+
+ ret = lck_mtx_lock_spinwait_x86(lock);
+ state = ordered_load_mtx_state(lock);
+ switch (ret) {
+ case LCK_MTX_SPINWAIT_NO_SPIN:
+ /*
+ * owner not on core, lck_mtx_lock_spinwait_x86 didn't even
+ * try to spin.
+ */
+ if (indirect) {
+ lck_grp_mtx_update_direct_wait((struct _lck_mtx_ext_*)lock);
+ }
+
+ /* just fall through case LCK_MTX_SPINWAIT_SPUN */
+ case LCK_MTX_SPINWAIT_SPUN:
+ /*
+ * mutex not acquired but lck_mtx_lock_spinwait_x86 tried to spin
+ * interlock not held
+ */
+ lck_mtx_interlock_lock(lock, &state);
+ assert(state & LCK_MTX_ILOCKED_MSK);
+
+ if (state & LCK_MTX_MLOCKED_MSK) {
+ if (indirect) {
+ lck_grp_mtx_update_wait((struct _lck_mtx_ext_*)lock, first_miss);
+ }
+ lck_mtx_lock_wait_x86(lock, &ts);
+ /*
+ * interlock is not held here.
+ */
+ goto try_again;
+ } else {
+
+ /* grab the mutex */
+ state |= LCK_MTX_MLOCKED_MSK;
+ ordered_store_mtx_state_release(lock, state);
+ thread = current_thread();
+ ordered_store_mtx_owner(lock, (uintptr_t)thread);
+#if MACH_LDEBUG
+ if (thread) {
+ thread->mutex_count++;
+ }
+#endif /* MACH_LDEBUG */
+ }
+
+ break;
+ case LCK_MTX_SPINWAIT_ACQUIRED:
+ /*
+ * mutex has been acquired by lck_mtx_lock_spinwait_x86
+ * interlock is held and preemption disabled
+ * owner is set and mutex marked as locked
+ * statistics updated too
+ */
+ break;
+ default:
+ panic("lck_mtx_lock_spinwait_x86 returned %d for mutex %p\n", ret, lock);
+ }
+
+ /*
+ * interlock is already acquired here
+ */
+
+ /* mutex has been acquired */
+ thread = (thread_t)lock->lck_mtx_owner;
+ if (state & LCK_MTX_WAITERS_MSK) {
+ /*
+ * lck_mtx_lock_acquire_tail will call
+ * turnstile_complete.
+ */
+ return lck_mtx_lock_acquire_tail(lock, indirect, ts);
+ }
+
+ if (ts != NULL) {
+ turnstile_complete((uintptr_t)lock, NULL, NULL, TURNSTILE_KERNEL_MUTEX);
+ }
+
+ assert(current_thread()->turnstile != NULL);
+
+ /* release the interlock */
+ lck_mtx_lock_finish_inline_with_cleanup(lock, ordered_load_mtx_state(lock), indirect);
+}
+
+/*
+ * Helper noinline functions for calling
+ * panic to optimize compiled code.
+ */
+
+__attribute__((noinline)) __abortlike
+static void
+lck_mtx_destroyed(
+ lck_mtx_t *lock)
+{
+ panic("trying to interlock destroyed mutex (%p)", lock);
+}
+
+__attribute__((noinline))
+static boolean_t
+lck_mtx_try_destroyed(
+ lck_mtx_t *lock)
+{
+ panic("trying to interlock destroyed mutex (%p)", lock);
+ return FALSE;
+}
+
+__attribute__((always_inline))
+static boolean_t
+lck_mtx_lock_wait_interlock_to_clear(
+ lck_mtx_t *lock,
+ uint32_t* new_state)
+{
+ uint32_t state;
+
+ for ( ; ; ) {
+ cpu_pause();
+ state = ordered_load_mtx_state(lock);
+ if (!(state & (LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK))) {
+ *new_state = state;
+ return TRUE;
+ }
+ if (state & LCK_MTX_MLOCKED_MSK) {
+ /* if it is held as mutex, just fail */
+ return FALSE;
+ }
+ }
+}
+
+__attribute__((always_inline))
+static boolean_t
+lck_mtx_try_lock_wait_interlock_to_clear(
+ lck_mtx_t *lock,
+ uint32_t* new_state)
+{
+ uint32_t state;
+
+ for ( ; ; ) {
+ cpu_pause();
+ state = ordered_load_mtx_state(lock);
+ if (state & (LCK_MTX_MLOCKED_MSK | LCK_MTX_SPIN_MSK)) {
+ /* if it is held as mutex or spin, just fail */
+ return FALSE;
+ }
+ if (!(state & LCK_MTX_ILOCKED_MSK)) {
+ *new_state = state;
+ return TRUE;
+ }
+ }
+}
+
+/*
+ * Routine: lck_mtx_lock_slow
+ *
+ * Locks a mutex for current thread.
+ * If the lock is contended this function might
+ * sleep.
+ *
+ * Called with interlock not held.
+ */
+__attribute__((noinline))
+void
+lck_mtx_lock_slow(
+ lck_mtx_t *lock)
+{
+ boolean_t indirect = FALSE;
+ uint32_t state;
+ int first_miss = 0;
+
+ state = ordered_load_mtx_state(lock);
+
+ /* is the interlock or mutex held */
+ if (__improbable(state & ((LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK)))) {
+ /*
+ * Note: both LCK_MTX_TAG_DESTROYED and LCK_MTX_TAG_INDIRECT
+ * have LCK_MTX_ILOCKED_MSK and LCK_MTX_MLOCKED_MSK
+ * set in state (state == lck_mtx_tag)
+ */
+
+
+ /* is the mutex already held and not indirect */
+ if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))){
+ /* no, must have been the mutex */
+ return lck_mtx_lock_contended(lock, indirect, &first_miss);
+ }
+
+ /* check to see if it is marked destroyed */
+ if (__improbable(state == LCK_MTX_TAG_DESTROYED)) {
+ lck_mtx_destroyed(lock);
+ }
+
+ /* Is this an indirect mutex? */
+ if (__improbable(state == LCK_MTX_TAG_INDIRECT)) {
+ indirect = get_indirect_mutex(&lock, &state);
+
+ first_miss = 0;
+ lck_grp_mtx_update_held((struct _lck_mtx_ext_*)lock);
+
+ if (state & LCK_MTX_SPIN_MSK) {
+ /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */
+ assert(state & LCK_MTX_ILOCKED_MSK);
+ lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss);
+ }
+ }
+
+ if (!lck_mtx_lock_wait_interlock_to_clear(lock, &state)) {
+ return lck_mtx_lock_contended(lock, indirect, &first_miss);
+ }
+ }
+
+ /* no - can't be INDIRECT, DESTROYED or locked */
+ while (__improbable(!lck_mtx_interlock_try_lock_set_flags(lock, LCK_MTX_MLOCKED_MSK, &state))) {
+ if (!lck_mtx_lock_wait_interlock_to_clear(lock, &state)) {
+ return lck_mtx_lock_contended(lock, indirect, &first_miss);
+ }
+ }
+
+ /* lock and interlock acquired */
+
+ thread_t thread = current_thread();
+ /* record owner of mutex */
+ ordered_store_mtx_owner(lock, (uintptr_t)thread);
+
+#if MACH_LDEBUG
+ if (thread) {
+ thread->mutex_count++; /* lock statistic */
+ }
+#endif
+ /*
+ * Check if there are waiters to
+ * inherit their priority.
+ */
+ if (__improbable(state & LCK_MTX_WAITERS_MSK)) {
+ return lck_mtx_lock_acquire_tail(lock, indirect, NULL);
+ }
+
+ /* release the interlock */
+ lck_mtx_lock_finish_inline(lock, ordered_load_mtx_state(lock), indirect);
+
+ return;
+}
+
+__attribute__((noinline))
+boolean_t
+lck_mtx_try_lock_slow(
+ lck_mtx_t *lock)
+{
+ boolean_t indirect = FALSE;
+ uint32_t state;
+ int first_miss = 0;
+
+ state = ordered_load_mtx_state(lock);
+
+ /* is the interlock or mutex held */
+ if (__improbable(state & ((LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK)))) {
+ /*
+ * Note: both LCK_MTX_TAG_DESTROYED and LCK_MTX_TAG_INDIRECT
+ * have LCK_MTX_ILOCKED_MSK and LCK_MTX_MLOCKED_MSK
+ * set in state (state == lck_mtx_tag)
+ */
+
+ /* is the mutex already held and not indirect */
+ if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))){
+ return FALSE;
+ }
+
+ /* check to see if it is marked destroyed */
+ if (__improbable(state == LCK_MTX_TAG_DESTROYED)) {
+ lck_mtx_try_destroyed(lock);
+ }
+
+ /* Is this an indirect mutex? */
+ if (__improbable(state == LCK_MTX_TAG_INDIRECT)) {
+ indirect = get_indirect_mutex(&lock, &state);
+
+ first_miss = 0;
+ lck_grp_mtx_update_held((struct _lck_mtx_ext_*)lock);
+ }
+
+ if (!lck_mtx_try_lock_wait_interlock_to_clear(lock, &state)) {
+ if (indirect)
+ lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss);
+ return FALSE;
+ }
+ }
+
+ /* no - can't be INDIRECT, DESTROYED or locked */
+ while (__improbable(!lck_mtx_interlock_try_lock_set_flags(lock, LCK_MTX_MLOCKED_MSK, &state))) {
+ if (!lck_mtx_try_lock_wait_interlock_to_clear(lock, &state)) {
+ if (indirect)
+ lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss);
+ return FALSE;
+ }
+ }
+
+ /* lock and interlock acquired */
+
+ thread_t thread = current_thread();
+ /* record owner of mutex */
+ ordered_store_mtx_owner(lock, (uintptr_t)thread);
+
+#if MACH_LDEBUG
+ if (thread) {
+ thread->mutex_count++; /* lock statistic */
+ }
+#endif
+ /*
+ * Check if there are waiters to
+ * inherit their priority.
+ */
+ if (__improbable(state & LCK_MTX_WAITERS_MSK)) {
+ return lck_mtx_try_lock_acquire_tail(lock);
+ }
+
+ /* release the interlock */
+ lck_mtx_try_lock_finish_inline(lock, ordered_load_mtx_state(lock));
+
+ return TRUE;
+
+}
+
+__attribute__((noinline))
+void
+lck_mtx_lock_spin_slow(
+ lck_mtx_t *lock)
+{
+ boolean_t indirect = FALSE;
+ uint32_t state;
+ int first_miss = 0;
+
+ state = ordered_load_mtx_state(lock);
+
+ /* is the interlock or mutex held */
+ if (__improbable(state & ((LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK)))) {
+ /*
+ * Note: both LCK_MTX_TAG_DESTROYED and LCK_MTX_TAG_INDIRECT
+ * have LCK_MTX_ILOCKED_MSK and LCK_MTX_MLOCKED_MSK
+ * set in state (state == lck_mtx_tag)
+ */
+
+
+ /* is the mutex already held and not indirect */
+ if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))){
+ /* no, must have been the mutex */
+ return lck_mtx_lock_contended(lock, indirect, &first_miss);
+ }
+
+ /* check to see if it is marked destroyed */
+ if (__improbable(state == LCK_MTX_TAG_DESTROYED)) {
+ lck_mtx_destroyed(lock);
+ }
+
+ /* Is this an indirect mutex? */
+ if (__improbable(state == LCK_MTX_TAG_INDIRECT)) {
+ indirect = get_indirect_mutex(&lock, &state);
+
+ first_miss = 0;
+ lck_grp_mtx_update_held((struct _lck_mtx_ext_*)lock);
+
+ if (state & LCK_MTX_SPIN_MSK) {
+ /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */
+ assert(state & LCK_MTX_ILOCKED_MSK);
+ lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss);
+ }
+ }
+
+ if (!lck_mtx_lock_wait_interlock_to_clear(lock, &state)) {
+ return lck_mtx_lock_contended(lock, indirect, &first_miss);
+ }
+ }
+
+ /* no - can't be INDIRECT, DESTROYED or locked */
+ while (__improbable(!lck_mtx_interlock_try_lock_set_flags(lock, LCK_MTX_SPIN_MSK, &state) )) {
+ if (!lck_mtx_lock_wait_interlock_to_clear(lock, &state)) {
+ return lck_mtx_lock_contended(lock, indirect, &first_miss);
+ }
+ }
+
+ /* lock as spinlock and interlock acquired */
+
+ thread_t thread = current_thread();
+ /* record owner of mutex */
+ ordered_store_mtx_owner(lock, (uintptr_t)thread);
+
+#if MACH_LDEBUG
+ if (thread) {
+ thread->mutex_count++; /* lock statistic */
+ }
+#endif
+
+#if CONFIG_DTRACE
+ LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, lock, 0);
+#endif
+ /* return with the interlock held and preemption disabled */
+ return;
+}
+
+__attribute__((noinline))
+boolean_t
+lck_mtx_try_lock_spin_slow(
+ lck_mtx_t *lock)
+{
+ boolean_t indirect = FALSE;
+ uint32_t state;
+ int first_miss = 0;
+
+ state = ordered_load_mtx_state(lock);
+
+ /* is the interlock or mutex held */
+ if (__improbable(state & ((LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK)))) {
+ /*
+ * Note: both LCK_MTX_TAG_DESTROYED and LCK_MTX_TAG_INDIRECT
+ * have LCK_MTX_ILOCKED_MSK and LCK_MTX_MLOCKED_MSK
+ * set in state (state == lck_mtx_tag)
+ */
+
+ /* is the mutex already held and not indirect */
+ if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))){
+ return FALSE;
+ }
+
+ /* check to see if it is marked destroyed */
+ if (__improbable(state == LCK_MTX_TAG_DESTROYED)) {
+ lck_mtx_try_destroyed(lock);
+ }
+
+ /* Is this an indirect mutex? */
+ if (__improbable(state == LCK_MTX_TAG_INDIRECT)) {
+ indirect = get_indirect_mutex(&lock, &state);
+
+ first_miss = 0;
+ lck_grp_mtx_update_held((struct _lck_mtx_ext_*)lock);
+ }
+
+ if (!lck_mtx_try_lock_wait_interlock_to_clear(lock, &state)) {
+ if (indirect)
+ lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss);
+ return FALSE;
+ }
+ }
+
+ /* no - can't be INDIRECT, DESTROYED or locked */
+ while (__improbable(!lck_mtx_interlock_try_lock_set_flags(lock, LCK_MTX_SPIN_MSK, &state))) {
+ if (!lck_mtx_try_lock_wait_interlock_to_clear(lock, &state)) {
+ if (indirect)
+ lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss);
+ return FALSE;
+ }
+ }
+
+ /* lock and interlock acquired */
+
+ thread_t thread = current_thread();
+ /* record owner of mutex */
+ ordered_store_mtx_owner(lock, (uintptr_t)thread);
+
+#if MACH_LDEBUG
+ if (thread) {
+ thread->mutex_count++; /* lock statistic */
+ }
+#endif
+
+#if CONFIG_DTRACE
+ LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, lock, 0);
+#endif
+ return TRUE;
+
+}
+
+__attribute__((noinline))
+void
+lck_mtx_convert_spin(
+ lck_mtx_t *lock)
+{
+ uint32_t state;
+
+ state = ordered_load_mtx_state(lock);
+
+ /* Is this an indirect mutex? */
+ if (__improbable(state == LCK_MTX_TAG_INDIRECT)) {
+ /* If so, take indirection */
+ get_indirect_mutex(&lock, &state);
+ }
+
+ assertf((thread_t)lock->lck_mtx_owner == current_thread(), "lock %p not owned by thread %p (current owner %p)", lock, current_thread(), (thread_t)lock->lck_mtx_owner );
+
+ if (__improbable(state & LCK_MTX_MLOCKED_MSK)) {
+ /* already owned as a mutex, just return */
+ return;
+ }
+
+ assert(get_preemption_level() > 0);
+ assert(state & LCK_MTX_ILOCKED_MSK);
+ assert(state & LCK_MTX_SPIN_MSK);
+
+ /*
+ * Check if there are waiters to
+ * inherit their priority.
+ */
+ if (__improbable(state & LCK_MTX_WAITERS_MSK)) {
+ return lck_mtx_convert_spin_acquire_tail(lock);
+ }
+
+ lck_mtx_convert_spin_finish_inline(lock, ordered_load_mtx_state(lock));
+
+ return;
+}
+
+static inline boolean_t
+lck_mtx_lock_grab_mutex(
+ lck_mtx_t *lock)
+{
+ uint32_t state;
+
+ state = ordered_load_mtx_state(lock);
+
+ if (!lck_mtx_interlock_try_lock_set_flags(lock, LCK_MTX_MLOCKED_MSK, &state)) {
+ return FALSE;
+ }
+
+ /* lock and interlock acquired */
+
+ thread_t thread = current_thread();
+ /* record owner of mutex */
+ ordered_store_mtx_owner(lock, (uintptr_t)thread);
+
+#if MACH_LDEBUG
+ if (thread) {
+ thread->mutex_count++; /* lock statistic */
+ }
+#endif
+ return TRUE;
+}
+
+__attribute__((noinline))
+void
+lck_mtx_assert(
+ lck_mtx_t *lock,
+ unsigned int type)
+{
+ thread_t thread, owner;
+ uint32_t state;
+
+ thread = current_thread();
+ state = ordered_load_mtx_state(lock);
+
+ if (state == LCK_MTX_TAG_INDIRECT) {
+ get_indirect_mutex(&lock, &state);
+ }
+
+ owner = (thread_t)lock->lck_mtx_owner;
+
+ if (type == LCK_MTX_ASSERT_OWNED) {
+ if (owner != thread || !(state & (LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK)))
+ panic("mutex (%p) not owned\n", lock);
+ } else {
+ assert (type == LCK_MTX_ASSERT_NOTOWNED);
+ if (owner == thread)
+ panic("mutex (%p) owned\n", lock);
+ }
+}
+
+/*
+ * Routine: lck_mtx_lock_spinwait_x86
+ *
+ * Invoked trying to acquire a mutex when there is contention but
+ * the holder is running on another processor. We spin for up to a maximum
+ * time waiting for the lock to be released.
+ *
+ * Called with the interlock unlocked.
+ * returns LCK_MTX_SPINWAIT_ACQUIRED if mutex acquired
+ * returns LCK_MTX_SPINWAIT_SPUN if we spun
+ * returns LCK_MTX_SPINWAIT_NO_SPIN if we didn't spin due to the holder not running
+ */
+__attribute__((noinline))
+lck_mtx_spinwait_ret_type_t
+lck_mtx_lock_spinwait_x86(
+ lck_mtx_t *mutex)
+{
+ __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(mutex);
+ thread_t holder;
+ uint64_t overall_deadline;
+ uint64_t check_owner_deadline;
+ uint64_t cur_time;
+ lck_mtx_spinwait_ret_type_t retval = LCK_MTX_SPINWAIT_SPUN;
+ int loopcount = 0;
+
+ KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_SPIN_CODE) | DBG_FUNC_START,
+ trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner), mutex->lck_mtx_waiters, 0, 0);
+
+ cur_time = mach_absolute_time();
+ overall_deadline = cur_time + MutexSpin;
+ check_owner_deadline = cur_time;
+
+ /*
+ * Spin while:
+ * - mutex is locked, and
+ * - its locked as a spin lock, and
+ * - owner is running on another processor, and
+ * - owner (processor) is not idling, and
+ * - we haven't spun for long enough.
+ */
+ do {
+ if (__probable(lck_mtx_lock_grab_mutex(mutex))) {
+ retval = LCK_MTX_SPINWAIT_ACQUIRED;
+ break;
+ }
+ cur_time = mach_absolute_time();
+
+ if (cur_time >= overall_deadline)
+ break;
+
+ if (cur_time >= check_owner_deadline && mutex->lck_mtx_owner) {
+ boolean_t istate;
+
+ /*
+ * We will repeatedly peek at the state of the lock while spinning,
+ * and we will acquire the interlock to do so.
+ * The thread that will unlock the mutex will also need to acquire
+ * the interlock, and we want to avoid to slow it down.
+ * To avoid to get an interrupt while holding the interlock
+ * and increase the time we are holding it, we
+ * will try to acquire the interlock with interrupts disabled.
+ * This is safe because it is a "try_lock", if we can't acquire
+ * the interlock we re-enable the interrupts and fail, so it is
+ * ok to call it even if the interlock was already held.
+ */
+ if (lck_mtx_interlock_try_lock_disable_interrupts(mutex, &istate)) {
+
+ if ((holder = (thread_t) mutex->lck_mtx_owner) != NULL) {
+
+ if ( !(holder->machine.specFlags & OnProc) ||
+ (holder->state & TH_IDLE)) {
+
+ lck_mtx_interlock_unlock_enable_interrupts(mutex, istate);
+
+ if (loopcount == 0)
+ retval = LCK_MTX_SPINWAIT_NO_SPIN;
+ break;
+ }
+ }
+ lck_mtx_interlock_unlock_enable_interrupts(mutex, istate);
+
+ check_owner_deadline = cur_time + (MutexSpin / 4);
+ }
+ }
+ cpu_pause();
+
+ loopcount++;
+
+ } while (TRUE);
+
+#if CONFIG_DTRACE
+ /*
+ * We've already kept a count via overall_deadline of how long we spun.
+ * If dtrace is active, then we compute backwards to decide how
+ * long we spun.
+ *
+ * Note that we record a different probe id depending on whether
+ * this is a direct or indirect mutex. This allows us to
+ * penalize only lock groups that have debug/stats enabled
+ * with dtrace processing if desired.
+ */
+ if (__probable(mutex->lck_mtx_is_ext == 0)) {
+ LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN, mutex,
+ mach_absolute_time() - (overall_deadline - MutexSpin));
+ } else {
+ LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_SPIN, mutex,
+ mach_absolute_time() - (overall_deadline - MutexSpin));
+ }
+ /* The lockstat acquire event is recorded by the assembly code beneath us. */
+#endif
+
+ KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_SPIN_CODE) | DBG_FUNC_END,
+ trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner), mutex->lck_mtx_waiters, retval, 0);
+
+ return retval;
+}
+
+
+
+/*
+ * Routine: lck_mtx_lock_wait_x86
+ *
+ * Invoked in order to wait on contention.
+ *
+ * Called with the interlock locked and
+ * preemption disabled...
+ * returns it unlocked and with preemption enabled
+ *
+ * lck_mtx_waiters is 1:1 with a wakeup needing to occur.
+ * A runnable waiter can exist between wait and acquire
+ * without a waiters count being set.
+ * This allows us to never make a spurious wakeup call.
+ *
+ * Priority:
+ * This avoids taking the thread lock if the owning thread is the same priority.
+ * This optimizes the case of same-priority threads contending on a lock.
+ * However, that allows the owning thread to drop in priority while holding the lock,
+ * because there is no state that the priority change can notice that
+ * says that the targeted thread holds a contended mutex.
+ *
+ * One possible solution: priority changes could look for some atomic tag
+ * on the thread saying 'holding contended lock', and then set up a promotion.
+ * Needs a story for dropping that promotion - the last contended unlock
+ * has to notice that this has happened.
+ */
+__attribute__((noinline))
+void
+lck_mtx_lock_wait_x86 (
+ lck_mtx_t *mutex,
+ struct turnstile **ts)
+{
+ thread_t self = current_thread();
+
+#if CONFIG_DTRACE
+ uint64_t sleep_start = 0;
+
+ if (lockstat_probemap[LS_LCK_MTX_LOCK_BLOCK] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_BLOCK]) {
+ sleep_start = mach_absolute_time();
+ }
+#endif
+ __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(mutex);
+
+ KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START,
+ trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner),
+ mutex->lck_mtx_waiters, 0, 0);
+
+ assert(self->waiting_for_mutex == NULL);
+ self->waiting_for_mutex = mutex;
+ mutex->lck_mtx_waiters++;
+
+ thread_t holder = (thread_t)mutex->lck_mtx_owner;
+ assert(holder != NULL);
+
+ /*
+ * lck_mtx_lock_wait_x86 might be called on a loop. Call prepare just once and reuse
+ * the same turnstile while looping, the matching turnstile compleate will be called
+ * by lck_mtx_lock_contended when finally acquiring the lock.
+ */
+ if (*ts == NULL) {
+ *ts = turnstile_prepare((uintptr_t)mutex, NULL, TURNSTILE_NULL, TURNSTILE_KERNEL_MUTEX);
+ }
+
+ struct turnstile *turnstile = *ts;
+ thread_set_pending_block_hint(self, kThreadWaitKernelMutex);
+ turnstile_update_inheritor(turnstile, holder, (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
+
+ waitq_assert_wait64(&turnstile->ts_waitq, CAST_EVENT64_T(LCK_MTX_EVENT(mutex)), THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
+
+ lck_mtx_ilk_unlock(mutex);
+
+ turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
+
+ thread_block(THREAD_CONTINUE_NULL);
+
+ self->waiting_for_mutex = NULL;
+
+ KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END,
+ trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner),
+ mutex->lck_mtx_waiters, 0, 0);
+
+#if CONFIG_DTRACE
+ /*
+ * Record the Dtrace lockstat probe for blocking, block time
+ * measured from when we were entered.
+ */
+ if (sleep_start) {
+ if (mutex->lck_mtx_is_ext == 0) {
+ LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_BLOCK, mutex,
+ mach_absolute_time() - sleep_start);
+ } else {
+ LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_BLOCK, mutex,
+ mach_absolute_time() - sleep_start);
+ }
+ }
+#endif
+}
+
+/*
+ * Routine: kdp_lck_mtx_lock_spin_is_acquired
+ * NOT SAFE: To be used only by kernel debugger to avoid deadlock.
+ * Returns: TRUE if lock is acquired.
+ */
+boolean_t
+kdp_lck_mtx_lock_spin_is_acquired(lck_mtx_t *lck)
+{
+ if (not_in_kdp) {
+ panic("panic: kdp_lck_mtx_lock_spin_is_acquired called outside of kernel debugger");
+ }
+
+ if (lck->lck_mtx_ilocked || lck->lck_mtx_mlocked) {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+void
+kdp_lck_mtx_find_owner(__unused struct waitq * waitq, event64_t event, thread_waitinfo_t * waitinfo)
+{
+ lck_mtx_t * mutex = LCK_EVENT_TO_MUTEX(event);
+ waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(mutex);
+ thread_t holder = (thread_t)mutex->lck_mtx_owner;
+ waitinfo->owner = thread_tid(holder);
+}
+
+void
+kdp_rwlck_find_owner(__unused struct waitq * waitq, event64_t event, thread_waitinfo_t * waitinfo)
+{
+ lck_rw_t *rwlck = NULL;
+ switch(waitinfo->wait_type) {
+ case kThreadWaitKernelRWLockRead:
+ rwlck = READ_EVENT_TO_RWLOCK(event);
+ break;
+ case kThreadWaitKernelRWLockWrite:
+ case kThreadWaitKernelRWLockUpgrade:
+ rwlck = WRITE_EVENT_TO_RWLOCK(event);
+ break;
+ default:
+ panic("%s was called with an invalid blocking type", __FUNCTION__);
+ break;
+ }
+ waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(rwlck);
+ waitinfo->owner = 0;
+}