+ thread_t holder;
+ volatile lck_mtx_t *mutex;
+ uint64_t deadline;
+
+ if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
+ mutex = lck;
+ else
+ mutex = &lck->lck_mtx_ptr->lck_mtx;
+
+ KERNEL_DEBUG(
+ MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_SPIN) | DBG_FUNC_NONE,
+ (int)lck, (int)mutex->lck_mtx_locked, 0, 0, 0);
+
+ deadline = mach_absolute_time() + MutexSpin;
+ /*
+ * Spin while:
+ * - mutex is locked, and
+ * - its locked as a spin lock, or
+ * - owner is running on another processor, and
+ * - owner (processor) is not idling, and
+ * - we haven't spun for long enough.
+ */
+ while ((holder = (thread_t) mutex->lck_mtx_locked) != NULL) {
+ if ((holder == (thread_t)MUTEX_LOCKED_AS_SPIN) ||
+ ((holder->machine.specFlags & OnProc) != 0 &&
+ (holder->state & TH_IDLE) == 0 &&
+ mach_absolute_time() < deadline)) {
+ cpu_pause();
+ continue;
+ }
+ break;
+ }
+#if CONFIG_DTRACE
+ /*
+ * We've already kept a count via deadline of how long we spun.
+ * If dtrace is active, then we compute backwards to decide how
+ * long we spun.
+ *
+ * Note that we record a different probe id depending on whether
+ * this is a direct or indirect mutex. This allows us to
+ * penalize only lock groups that have debug/stats enabled
+ * with dtrace processing if desired.
+ */
+ if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
+ LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN, lck,
+ mach_absolute_time() - (deadline - MutexSpin));
+ } else {
+ LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_SPIN, lck,
+ mach_absolute_time() - (deadline - MutexSpin));
+ }
+ /* The lockstat acquire event is recorded by the assembly code beneath us. */
+#endif