+ thread_t holder;
+ uint64_t deadline;
+ int retval = 1;
+ int loopcount = 0;
+
+
+ KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_SPIN_CODE) | DBG_FUNC_START,
+ mutex, mutex->lck_mtx_owner, mutex->lck_mtx_waiters, 0, 0);
+
+ deadline = mach_absolute_time() + MutexSpin;
+
+ /*
+ * Spin while:
+ * - mutex is locked, and
+ * - its locked as a spin lock, and
+ * - owner is running on another processor, and
+ * - owner (processor) is not idling, and
+ * - we haven't spun for long enough.
+ */
+ do {
+ if (__probable(lck_mtx_lock_grab_mutex(mutex))) {
+ retval = 0;
+ break;
+ }
+ if ((holder = (thread_t) mutex->lck_mtx_owner) != NULL) {
+
+ if ( !(holder->machine.specFlags & OnProc) ||
+ (holder->state & TH_IDLE)) {
+ if (loopcount == 0)
+ retval = 2;
+ break;
+ }
+ }
+ cpu_pause();
+
+ loopcount++;
+
+ } while (mach_absolute_time() < deadline);
+
+
+#if CONFIG_DTRACE
+ /*
+ * We've already kept a count via deadline of how long we spun.
+ * If dtrace is active, then we compute backwards to decide how
+ * long we spun.
+ *
+ * Note that we record a different probe id depending on whether
+ * this is a direct or indirect mutex. This allows us to
+ * penalize only lock groups that have debug/stats enabled
+ * with dtrace processing if desired.
+ */
+ if (__probable(mutex->lck_mtx_is_ext == 0)) {
+ LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN, mutex,
+ mach_absolute_time() - (deadline - MutexSpin));
+ } else {
+ LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_SPIN, mutex,
+ mach_absolute_time() - (deadline - MutexSpin));
+ }
+ /* The lockstat acquire event is recorded by the assembly code beneath us. */
+#endif
+
+ KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_SPIN_CODE) | DBG_FUNC_END,
+ mutex, mutex->lck_mtx_owner, mutex->lck_mtx_waiters, retval, 0);
+
+ return retval;