]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/locks_i386.c
xnu-3789.51.2.tar.gz
[apple/xnu.git] / osfmk / i386 / locks_i386.c
index 4dd253e01dea0f8226683c58b155f83bef11fb97..9b4639c24da971cdb307344ed94604be70f4dba9 100644 (file)
@@ -126,6 +126,15 @@ decl_simple_lock_data(extern , panic_lock)
 #endif /* USLOCK_DEBUG */
 
 extern unsigned int not_in_kdp;
+extern void kdp_lck_mtx_find_owner(
+       struct waitq *          waitq,
+       event64_t               event,
+       thread_waitinfo_t *     waitinfo);
+
+extern void kdp_rwlck_find_owner(
+       struct waitq *          waitq,
+       event64_t               event,
+       thread_waitinfo_t *     waitinfo);
 
 /*
  *     We often want to know the addresses of the callers
@@ -287,16 +296,56 @@ boolean_t
 lck_spin_try_lock(
        lck_spin_t      *lck)
 {
-       return((boolean_t)usimple_lock_try((usimple_lock_t) lck));
+       boolean_t lrval = (boolean_t)usimple_lock_try((usimple_lock_t) lck);
+#if    DEVELOPMENT || DEBUG
+       if (lrval) {
+               pltrace(FALSE);
+       }
+#endif
+       return(lrval);
 }
 
 /*
- *      Routine: lck_spin_is_acquired
+ *     Routine:        lck_spin_assert
+ */
+void
+lck_spin_assert(lck_spin_t *lock, unsigned int type)
+{
+       thread_t thread, holder;
+       uintptr_t state;
+
+       if (__improbable(type != LCK_ASSERT_OWNED && type != LCK_ASSERT_NOTOWNED)) {
+               panic("lck_spin_assert(): invalid arg (%u)", type);
+       }
+
+       state = lock->interlock;
+       holder = (thread_t)state;
+       thread = current_thread();
+       if (type == LCK_ASSERT_OWNED) {
+               if (__improbable(holder == THREAD_NULL)) {
+                       panic("Lock not owned %p = %lx", lock, state);
+               }
+               if (__improbable(holder != thread)) {
+                       panic("Lock not owned by current thread %p = %lx", lock, state);
+               }
+       } else if (type == LCK_ASSERT_NOTOWNED) {
+               if (__improbable(holder != THREAD_NULL)) {
+                       if (holder == thread) {
+                               panic("Lock owned by current thread %p = %lx", lock, state);
+                       } else {
+                               panic("Lock %p owned by thread %p", lock, holder);
+                       }
+               }
+       }
+}
+
+/*
+ *      Routine: kdp_lck_spin_is_acquired
  *      NOT SAFE: To be used only by kernel debugger to avoid deadlock.
  *      Returns: TRUE if lock is acquired.
  */
 boolean_t
-lck_spin_is_acquired(lck_spin_t *lck) {
+kdp_lck_spin_is_acquired(lck_spin_t *lck) {
        if (not_in_kdp) {
                panic("panic: spinlock acquired check done outside of kernel debugger");
        }
@@ -378,6 +427,10 @@ usimple_lock(
                        panic("Spinlock acquisition timed out: lock=%p, lock owner thread=0x%lx, current_thread: %p, lock owner active on CPU 0x%x, current owner: 0x%lx", l, lowner,  current_thread(), lock_cpu, (uintptr_t)l->interlock.lock_data);
                }
        }
+#if DEVELOPMENT || DEBUG
+               pltrace(FALSE);
+#endif
+
        USLDBG(usld_lock_post(l, pc));
 #else
        simple_lock((simple_lock_t)l);
@@ -401,6 +454,9 @@ usimple_unlock(
 
        OBTAIN_PC(pc);
        USLDBG(usld_unlock(l, pc));
+#if DEVELOPMENT || DEBUG
+               pltrace(TRUE);
+#endif
        hw_lock_unlock(&l->interlock);
 #else
        simple_unlock_rwmb((simple_lock_t)l);
@@ -431,7 +487,10 @@ usimple_lock_try(
        OBTAIN_PC(pc);
        USLDBG(usld_lock_try_pre(l, pc));
        if ((success = hw_lock_try(&l->interlock))) {
-               USLDBG(usld_lock_try_post(l, pc));
+#if DEVELOPMENT || DEBUG
+               pltrace(FALSE);
+#endif
+       USLDBG(usld_lock_try_post(l, pc));
        }
        return success;
 #else
@@ -439,6 +498,22 @@ usimple_lock_try(
 #endif
 }
 
+/*
+ * Acquire a usimple_lock while polling for pending TLB flushes
+ * and spinning on a lock.
+ *
+ */
+void
+usimple_lock_try_lock_loop(usimple_lock_t l)
+{
+       boolean_t istate = ml_get_interrupts_enabled();
+       while (!simple_lock_try((l))) {
+               if (!istate)
+                       handle_pending_TLB_flushes();
+               cpu_pause();
+       }
+}
+
 #if    USLOCK_DEBUG
 /*
  *     States of a usimple_lock.  The default when initializing
@@ -548,7 +623,7 @@ usld_lock_post(
        usimple_lock_t  l,
        pc_t            pc)
 {
-       register int    mycpu;
+       int     mycpu;
        char    caller[] = "successful usimple_lock";
 
 
@@ -585,7 +660,7 @@ usld_unlock(
        usimple_lock_t  l,
        pc_t            pc)
 {
-       register int    mycpu;
+       int     mycpu;
        char    caller[] = "usimple_unlock";
 
 
@@ -650,7 +725,7 @@ usld_lock_try_post(
        usimple_lock_t  l,
        pc_t            pc)
 {
-       register int    mycpu;
+       int     mycpu;
        char    caller[] = "successful usimple_lock_try";
 
        if (!usld_lock_common_checks(l, caller))
@@ -782,12 +857,6 @@ lck_rw_destroy(
 
 #define DECREMENTER_TIMEOUT 1000000
 
-#define RW_LOCK_READER_EVENT(x)                \
-               ((event_t) (((unsigned char*) (x)) + (offsetof(lck_rw_t, lck_rw_tag))))
-
-#define RW_LOCK_WRITER_EVENT(x)                \
-               ((event_t) (((unsigned char*) (x)) + (offsetof(lck_rw_t, lck_rw_pad8))))
-
 /*
  * We disable interrupts while holding the RW interlock to prevent an
  * interrupt from exacerbating hold time.
@@ -860,6 +929,7 @@ void
 lck_rw_lock_exclusive_gen(
        lck_rw_t        *lck)
 {
+       __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck);
        uint64_t        deadline = 0;
        int             slept = 0;
        int             gotlock = 0;
@@ -900,12 +970,12 @@ lck_rw_lock_exclusive_gen(
 
                deadline = lck_rw_deadline_for_spin(lck);
 
-               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_SPIN_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
+               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_SPIN_CODE) | DBG_FUNC_START, trace_lck, 0, 0, 0, 0);
                
                while (((gotlock = lck_rw_grab_want(lck)) == 0) && mach_absolute_time() < deadline)
                        lck_rw_lock_pause(istate);
 
-               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_SPIN_CODE) | DBG_FUNC_END, (int)lck, 0, 0, gotlock, 0);
+               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_SPIN_CODE) | DBG_FUNC_END, trace_lck, 0, 0, gotlock, 0);
 
                if (gotlock)
                        break;
@@ -920,10 +990,11 @@ lck_rw_lock_exclusive_gen(
 
                        if (lck->lck_rw_want_write) {
 
-                               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_WAIT_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
+                               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_WAIT_CODE) | DBG_FUNC_START, trace_lck, 0, 0, 0, 0);
 
                                lck->lck_w_waiting = TRUE;
 
+                               thread_set_pending_block_hint(current_thread(), kThreadWaitKernelRWLockWrite);
                                res = assert_wait(RW_LOCK_WRITER_EVENT(lck), THREAD_UNINT);
                                lck_interlock_unlock(lck, istate);
 
@@ -931,7 +1002,7 @@ lck_rw_lock_exclusive_gen(
                                        res = thread_block(THREAD_CONTINUE_NULL);
                                        slept++;
                                }
-                               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_WAIT_CODE) | DBG_FUNC_END, (int)lck, res, slept, 0, 0);
+                               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_WAIT_CODE) | DBG_FUNC_END, trace_lck, res, slept, 0, 0);
                        } else {
                                lck->lck_rw_want_write = TRUE;
                                lck_interlock_unlock(lck, istate);
@@ -979,12 +1050,12 @@ lck_rw_lock_exclusive_gen(
 
                deadline = lck_rw_deadline_for_spin(lck);
 
-               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_READER_SPIN_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
+               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_READER_SPIN_CODE) | DBG_FUNC_START, trace_lck, 0, 0, 0, 0);
 
                while ((lockheld = lck_rw_held_read_or_upgrade(lck)) && mach_absolute_time() < deadline)
                        lck_rw_lock_pause(istate);
 
-               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_READER_SPIN_CODE) | DBG_FUNC_END, (int)lck, 0, 0, lockheld, 0);
+               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_READER_SPIN_CODE) | DBG_FUNC_END, trace_lck, 0, 0, lockheld, 0);
 
                if ( !lockheld)
                        break;
@@ -998,10 +1069,11 @@ lck_rw_lock_exclusive_gen(
                        istate = lck_interlock_lock(lck);
 
                        if (lck->lck_rw_shared_count != 0 || lck->lck_rw_want_upgrade) {
-                               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_READER_WAIT_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
+                               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_READER_WAIT_CODE) | DBG_FUNC_START, trace_lck, 0, 0, 0, 0);
 
                                lck->lck_w_waiting = TRUE;
 
+                               thread_set_pending_block_hint(current_thread(), kThreadWaitKernelRWLockWrite);
                                res = assert_wait(RW_LOCK_WRITER_EVENT(lck), THREAD_UNINT);
                                lck_interlock_unlock(lck, istate);
 
@@ -1009,7 +1081,7 @@ lck_rw_lock_exclusive_gen(
                                        res = thread_block(THREAD_CONTINUE_NULL);
                                        slept++;
                                }
-                               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_READER_WAIT_CODE) | DBG_FUNC_END, (int)lck, res, slept, 0, 0);
+                               KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_READER_WAIT_CODE) | DBG_FUNC_END, trace_lck, res, slept, 0, 0);
                        } else {
                                lck_interlock_unlock(lck, istate);
                                /*
@@ -1150,7 +1222,7 @@ lck_rw_unlock_shared(
        ret = lck_rw_done(lck);
 
        if (ret != LCK_RW_TYPE_SHARED)
-               panic("lck_rw_unlock(): lock held in mode: %d\n", ret);
+               panic("lck_rw_unlock_shared(): lock %p held in mode: %d\n", lck, ret);
 }
 
 
@@ -1198,12 +1270,13 @@ void
 lck_rw_lock_shared_gen(
        lck_rw_t        *lck)
 {
+       __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck);
        uint64_t        deadline = 0;
        int             gotlock = 0;
        int             slept = 0;
        wait_result_t   res = 0;
        boolean_t       istate = -1;
-       
+
 #if    CONFIG_DTRACE
        uint64_t wait_interval = 0;
        int readers_at_sleep = 0;
@@ -1235,13 +1308,13 @@ lck_rw_lock_shared_gen(
                deadline = lck_rw_deadline_for_spin(lck);
 
                KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_SPIN_CODE) | DBG_FUNC_START,
-                            (int)lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, 0, 0);
+                            trace_lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, 0, 0);
 
                while (((gotlock = lck_rw_grab_shared(lck)) == 0) && mach_absolute_time() < deadline)
                        lck_rw_lock_pause(istate);
 
                KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_SPIN_CODE) | DBG_FUNC_END,
-                            (int)lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, gotlock, 0);
+                            trace_lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, gotlock, 0);
 
                if (gotlock)
                        break;
@@ -1258,10 +1331,11 @@ lck_rw_lock_shared_gen(
                            ((lck->lck_rw_shared_count == 0) || lck->lck_rw_priv_excl)) {
 
                                KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_WAIT_CODE) | DBG_FUNC_START,
-                                            (int)lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, 0, 0);
+                                            trace_lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, 0, 0);
 
                                lck->lck_r_waiting = TRUE;
 
+                               thread_set_pending_block_hint(current_thread(), kThreadWaitKernelRWLockRead);
                                res = assert_wait(RW_LOCK_READER_EVENT(lck), THREAD_UNINT);
                                lck_interlock_unlock(lck, istate);
 
@@ -1270,7 +1344,7 @@ lck_rw_lock_shared_gen(
                                        slept++;
                                }
                                KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_WAIT_CODE) | DBG_FUNC_END,
-                                            (int)lck, res, slept, 0, 0);
+                                            trace_lck, res, slept, 0, 0);
                        } else {
                                lck->lck_rw_shared_count++;
                                lck_interlock_unlock(lck, istate);
@@ -1340,7 +1414,7 @@ lck_rw_lock_shared_to_exclusive_failure(
                thread_wakeup(RW_LOCK_WRITER_EVENT(lck));
        }
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_NONE,
-                    (int)lck, lck->lck_rw_shared_count, lck->lck_rw_want_upgrade, 0, 0);
+                    VM_KERNEL_UNSLIDE_OR_PERM(lck), lck->lck_rw_shared_count, lck->lck_rw_want_upgrade, 0, 0);
 
        return (FALSE);
 }
@@ -1358,6 +1432,7 @@ boolean_t
 lck_rw_lock_shared_to_exclusive_success(
        lck_rw_t        *lck)
 {
+       __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck);
        uint64_t        deadline = 0;
        int             slept = 0;
        int             still_shared = 0;
@@ -1395,13 +1470,13 @@ lck_rw_lock_shared_to_exclusive_success(
                deadline = lck_rw_deadline_for_spin(lck);
 
                KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_SPIN_CODE) | DBG_FUNC_START,
-                            (int)lck, lck->lck_rw_shared_count, 0, 0, 0);
+                            trace_lck, lck->lck_rw_shared_count, 0, 0, 0);
 
                while ((still_shared = lck->lck_rw_shared_count) && mach_absolute_time() < deadline)
                        lck_rw_lock_pause(istate);
 
                KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_SPIN_CODE) | DBG_FUNC_END,
-                            (int)lck, lck->lck_rw_shared_count, 0, 0, 0);
+                            trace_lck, lck->lck_rw_shared_count, 0, 0, 0);
 
                if ( !still_shared)
                        break;
@@ -1416,10 +1491,11 @@ lck_rw_lock_shared_to_exclusive_success(
                        
                        if (lck->lck_rw_shared_count != 0) {
                                KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_WAIT_CODE) | DBG_FUNC_START,
-                                            (int)lck, lck->lck_rw_shared_count, 0, 0, 0);
+                                            trace_lck, lck->lck_rw_shared_count, 0, 0, 0);
 
                                lck->lck_w_waiting = TRUE;
 
+                               thread_set_pending_block_hint(current_thread(), kThreadWaitKernelRWLockUpgrade);
                                res = assert_wait(RW_LOCK_WRITER_EVENT(lck), THREAD_UNINT);
                                lck_interlock_unlock(lck, istate);
 
@@ -1428,7 +1504,7 @@ lck_rw_lock_shared_to_exclusive_success(
                                        slept++;
                                }
                                KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_WAIT_CODE) | DBG_FUNC_END,
-                                            (int)lck, res, slept, 0, 0);
+                                            trace_lck, res, slept, 0, 0);
                        } else {
                                lck_interlock_unlock(lck, istate);
                                break;
@@ -1467,7 +1543,8 @@ lck_rw_lock_exclusive_to_shared_gen(
        lck_rw_t        *lck,
        int             prior_lock_state)
 {
-       lck_rw_t        *fake_lck;
+       __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck);
+       lck_rw_t                *fake_lck;
 
        /*
         * prior_lock state is a snapshot of the 1st word of the
@@ -1478,7 +1555,7 @@ lck_rw_lock_exclusive_to_shared_gen(
        fake_lck = (lck_rw_t *)&prior_lock_state;
 
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_START,
-                            (int)lck, fake_lck->lck_rw_want_write, fake_lck->lck_rw_want_upgrade, 0, 0);
+                            trace_lck, fake_lck->lck_rw_want_write, fake_lck->lck_rw_want_upgrade, 0, 0);
 
        /*
         * don't wake up anyone waiting to take the lock exclusively
@@ -1492,7 +1569,7 @@ lck_rw_lock_exclusive_to_shared_gen(
                thread_wakeup(RW_LOCK_READER_EVENT(lck));
 
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_END,
-                            (int)lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, lck->lck_rw_shared_count, 0);
+                            trace_lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, lck->lck_rw_shared_count, 0);
 
 #if CONFIG_DTRACE
        LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE, lck, 0);
@@ -1572,6 +1649,19 @@ lck_rw_clear_promotions_x86(thread_t thread)
 }
 
 
+/*
+ * Routine: kdp_lck_rw_lock_is_acquired_exclusive
+ * NOT SAFE: To be used only by kernel debugger to avoid deadlock.
+ */
+boolean_t
+kdp_lck_rw_lock_is_acquired_exclusive(lck_rw_t *lck) {
+       if (not_in_kdp) {
+               panic("panic: rw lock exclusive check done outside of kernel debugger");
+       }
+       return ((lck->lck_rw_want_upgrade || lck->lck_rw_want_write) && (lck->lck_rw_shared_count == 0)) ? TRUE : FALSE;
+}
+
+
 #ifdef MUTEX_ZONE
 extern zone_t lck_mtx_zone;
 #endif
@@ -1632,7 +1722,7 @@ lck_mtx_ext_init(
                lck->lck_mtx_attr |= LCK_MTX_ATTR_STAT;
 
        lck->lck_mtx.lck_mtx_is_ext = 1;
-       lck->lck_mtx.lck_mtx_sw.lck_mtxd.lck_mtxd_pad32 = 0xFFFFFFFF;
+       lck->lck_mtx.lck_mtx_pad32 = 0xFFFFFFFF;
 }
 
 /*
@@ -1662,7 +1752,7 @@ lck_mtx_init(
                lck->lck_mtx_owner = 0;
                lck->lck_mtx_state = 0;
        }
-       lck->lck_mtx_sw.lck_mtxd.lck_mtxd_pad32 = 0xFFFFFFFF;
+       lck->lck_mtx_pad32 = 0xFFFFFFFF;
        lck_grp_reference(grp);
        lck_grp_lckcnt_incr(grp, LCK_TYPE_MTX);
 }
@@ -1692,7 +1782,7 @@ lck_mtx_init_ext(
                lck->lck_mtx_owner = 0;
                lck->lck_mtx_state = 0;
        }
-       lck->lck_mtx_sw.lck_mtxd.lck_mtxd_pad32 = 0xFFFFFFFF;
+       lck->lck_mtx_pad32 = 0xFFFFFFFF;
 
        lck_grp_reference(grp);
        lck_grp_lckcnt_incr(grp, LCK_TYPE_MTX);
@@ -1747,7 +1837,8 @@ lck_mtx_unlock_wakeup_x86 (
        lck_mtx_t       *mutex,
        int             prior_lock_state)
 {
-       lck_mtx_t       fake_lck;
+       __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(mutex);
+       lck_mtx_t               fake_lck;
 
        /*
         * prior_lock state is a snapshot of the 2nd word of the
@@ -1758,13 +1849,13 @@ lck_mtx_unlock_wakeup_x86 (
        fake_lck.lck_mtx_state = prior_lock_state;
 
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAKEUP_CODE) | DBG_FUNC_START,
-                    mutex, fake_lck.lck_mtx_promoted, fake_lck.lck_mtx_waiters, fake_lck.lck_mtx_pri, 0);
+                    trace_lck, fake_lck.lck_mtx_promoted, fake_lck.lck_mtx_waiters, fake_lck.lck_mtx_pri, 0);
 
        if (__probable(fake_lck.lck_mtx_waiters)) {
                if (fake_lck.lck_mtx_waiters > 1)
-                       thread_wakeup_one_with_pri((event_t)(((unsigned int*)mutex)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)), fake_lck.lck_mtx_pri);
+                       thread_wakeup_one_with_pri(LCK_MTX_EVENT(mutex), fake_lck.lck_mtx_pri);
                else
-                       thread_wakeup_one((event_t)(((unsigned int*)mutex)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
+                       thread_wakeup_one(LCK_MTX_EVENT(mutex));
        }
 
        if (__improbable(fake_lck.lck_mtx_promoted)) {
@@ -1787,16 +1878,16 @@ lck_mtx_unlock_wakeup_x86 (
                                        /* Thread still has a RW lock promotion */
                                } else if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
                                        KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEMOTE) | DBG_FUNC_NONE,
-                                                             thread->sched_pri, DEPRESSPRI, 0, mutex, 0);
+                                                             thread->sched_pri, DEPRESSPRI, 0, trace_lck, 0);
 
                                        set_sched_pri(thread, DEPRESSPRI);
                                }
                                else {
-                                       if (thread->priority < thread->sched_pri) {
+                                       if (thread->base_pri < thread->sched_pri) {
                                                KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEMOTE) | DBG_FUNC_NONE,
-                                                                     thread->sched_pri, thread->priority, 0, mutex, 0);
+                                                                     thread->sched_pri, thread->base_pri, 0, trace_lck, 0);
 
-                                               SCHED(compute_priority)(thread, FALSE);
+                                               thread_recompute_sched_pri(thread, FALSE);
                                        }
                                }
                        }
@@ -1805,7 +1896,7 @@ lck_mtx_unlock_wakeup_x86 (
                }
        }
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAKEUP_CODE) | DBG_FUNC_END,
-                    mutex, 0, mutex->lck_mtx_waiters, 0, 0);
+                    trace_lck, 0, mutex->lck_mtx_waiters, 0, 0);
 }
 
 
@@ -1823,12 +1914,13 @@ void
 lck_mtx_lock_acquire_x86(
        lck_mtx_t       *mutex)
 {
-       thread_t        thread;
-       integer_t       priority;
-       spl_t           s;
+       __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(mutex);
+       thread_t                thread;
+       integer_t               priority;
+       spl_t                   s;
 
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_ACQUIRE_CODE) | DBG_FUNC_START,
-                    mutex, thread->was_promoted_on_wakeup, mutex->lck_mtx_waiters, mutex->lck_mtx_pri, 0);
+                    trace_lck, thread->was_promoted_on_wakeup, mutex->lck_mtx_waiters, mutex->lck_mtx_pri, 0);
 
        if (mutex->lck_mtx_waiters)
                priority = mutex->lck_mtx_pri;
@@ -1840,7 +1932,7 @@ lck_mtx_lock_acquire_x86(
        if (thread->sched_pri < priority || thread->was_promoted_on_wakeup) {
 
                KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PROMOTE) | DBG_FUNC_NONE,
-                                     thread->sched_pri, priority, thread->was_promoted_on_wakeup, mutex, 0);
+                                     thread->sched_pri, priority, thread->was_promoted_on_wakeup, trace_lck, 0);
 
                s = splsched();
                thread_lock(thread);
@@ -1862,10 +1954,31 @@ lck_mtx_lock_acquire_x86(
                splx(s);
        }
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_ACQUIRE_CODE) | DBG_FUNC_END,
-                    mutex, 0, mutex->lck_mtx_waiters, 0, 0);
+                    trace_lck, 0, mutex->lck_mtx_waiters, 0, 0);
 }
 
 
+static int
+lck_mtx_interlock_try_lock(lck_mtx_t *mutex, boolean_t *istate)
+{
+       int             retval;
+
+       *istate = ml_set_interrupts_enabled(FALSE);
+       retval = lck_mtx_ilk_try_lock(mutex);
+
+       if (retval == 0)
+               ml_set_interrupts_enabled(*istate);
+
+       return retval;
+}
+
+static void
+lck_mtx_interlock_unlock(lck_mtx_t *mutex, boolean_t istate)
+{               
+       lck_mtx_ilk_unlock(mutex);
+       ml_set_interrupts_enabled(istate);
+}
+
 
 /*
  * Routine:    lck_mtx_lock_spinwait_x86
@@ -1883,16 +1996,20 @@ int
 lck_mtx_lock_spinwait_x86(
        lck_mtx_t       *mutex)
 {
+       __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(mutex);
        thread_t        holder;
-       uint64_t        deadline;
+       uint64_t        overall_deadline;
+       uint64_t        check_owner_deadline;
+       uint64_t        cur_time;
        int             retval = 1;
        int             loopcount = 0;
 
-
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_SPIN_CODE) | DBG_FUNC_START,
-                    mutex, mutex->lck_mtx_owner, mutex->lck_mtx_waiters, 0, 0);
+                    trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner), mutex->lck_mtx_waiters, 0, 0);
 
-       deadline = mach_absolute_time() + MutexSpin;
+       cur_time = mach_absolute_time();
+       overall_deadline = cur_time + MutexSpin;
+       check_owner_deadline = cur_time;
 
        /*
         * Spin while:
@@ -1907,25 +2024,42 @@ lck_mtx_lock_spinwait_x86(
                        retval = 0;
                        break;
                }
-               if ((holder = (thread_t) mutex->lck_mtx_owner) != NULL) {
+               cur_time = mach_absolute_time();
 
-                       if ( !(holder->machine.specFlags & OnProc) ||
-                            (holder->state & TH_IDLE)) {
-                               if (loopcount == 0)
-                                       retval = 2;
-                               break;
+               if (cur_time >= overall_deadline)
+                       break;
+
+               if (cur_time >= check_owner_deadline && mutex->lck_mtx_owner) {
+                       boolean_t       istate;
+
+                       if (lck_mtx_interlock_try_lock(mutex, &istate)) {
+
+                               if ((holder = (thread_t) mutex->lck_mtx_owner) != NULL) {
+
+                                       if ( !(holder->machine.specFlags & OnProc) ||
+                                            (holder->state & TH_IDLE)) {
+
+                                               lck_mtx_interlock_unlock(mutex, istate);
+
+                                               if (loopcount == 0)
+                                                       retval = 2;
+                                               break;
+                                       }
+                               }
+                               lck_mtx_interlock_unlock(mutex, istate);
+
+                               check_owner_deadline = cur_time + (MutexSpin / 4);
                        }
                }
                cpu_pause();
 
                loopcount++;
 
-       } while (mach_absolute_time() < deadline);
-
+       } while (TRUE);
 
 #if    CONFIG_DTRACE
        /*
-        * We've already kept a count via deadline of how long we spun.
+        * We've already kept a count via overall_deadline of how long we spun.
         * If dtrace is active, then we compute backwards to decide how
         * long we spun.
         *
@@ -1936,16 +2070,16 @@ lck_mtx_lock_spinwait_x86(
         */
        if (__probable(mutex->lck_mtx_is_ext == 0)) {
                LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN, mutex,
-                   mach_absolute_time() - (deadline - MutexSpin));
+                       mach_absolute_time() - (overall_deadline - MutexSpin));
        } else {
                LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_SPIN, mutex,
-                   mach_absolute_time() - (deadline - MutexSpin));
+                       mach_absolute_time() - (overall_deadline - MutexSpin));
        }
        /* The lockstat acquire event is recorded by the assembly code beneath us. */
 #endif
 
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_SPIN_CODE) | DBG_FUNC_END,
-                    mutex, mutex->lck_mtx_owner, mutex->lck_mtx_waiters, retval, 0);
+                    trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner), mutex->lck_mtx_waiters, retval, 0);
 
        return retval;
 }
@@ -1965,6 +2099,7 @@ void
 lck_mtx_lock_wait_x86 (
        lck_mtx_t       *mutex)
 {
+       __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(mutex);
        thread_t        self = current_thread();
        thread_t        holder;
        integer_t       priority;
@@ -1977,12 +2112,12 @@ lck_mtx_lock_wait_x86 (
        }
 #endif
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START,
-                    mutex, mutex->lck_mtx_owner, mutex->lck_mtx_waiters, mutex->lck_mtx_pri, 0);
+                    trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner), mutex->lck_mtx_waiters, mutex->lck_mtx_pri, 0);
 
        priority = self->sched_pri;
 
-       if (priority < self->priority)
-               priority = self->priority;
+       if (priority < self->base_pri)
+               priority = self->base_pri;
        if (priority < BASEPRI_DEFAULT)
                priority = BASEPRI_DEFAULT;
 
@@ -2004,7 +2139,7 @@ lck_mtx_lock_wait_x86 (
                if (holder->sched_pri < mutex->lck_mtx_pri) {
                        KERNEL_DEBUG_CONSTANT(
                                MACHDBG_CODE(DBG_MACH_SCHED, MACH_PROMOTE) | DBG_FUNC_NONE,
-                               holder->sched_pri, priority, thread_tid(holder), mutex, 0);
+                               holder->sched_pri, priority, thread_tid(holder), trace_lck, 0);
                        /* Assert that we're not altering the priority of a
                         * thread above the MAXPRI_PROMOTE band
                         */
@@ -2021,14 +2156,15 @@ lck_mtx_lock_wait_x86 (
                thread_unlock(holder);
                splx(s);
        }
-       assert_wait((event_t)(((unsigned int*)mutex)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
+       thread_set_pending_block_hint(self, kThreadWaitKernelMutex);
+       assert_wait(LCK_MTX_EVENT(mutex), THREAD_UNINT);
 
        lck_mtx_ilk_unlock(mutex);
 
        thread_block(THREAD_CONTINUE_NULL);
 
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END,
-                    mutex, mutex->lck_mtx_owner, mutex->lck_mtx_waiters, mutex->lck_mtx_pri, 0);
+                    trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner), mutex->lck_mtx_waiters, mutex->lck_mtx_pri, 0);
 
 #if    CONFIG_DTRACE
        /*
@@ -2046,3 +2182,51 @@ lck_mtx_lock_wait_x86 (
        }
 #endif
 }
+
+/*
+ *      Routine: kdp_lck_mtx_lock_spin_is_acquired
+ *      NOT SAFE: To be used only by kernel debugger to avoid deadlock.
+ *      Returns: TRUE if lock is acquired.
+ */
+boolean_t
+kdp_lck_mtx_lock_spin_is_acquired(lck_mtx_t    *lck)
+{
+       if (not_in_kdp) {
+               panic("panic: kdp_lck_mtx_lock_spin_is_acquired called outside of kernel debugger");
+       }
+
+       if (lck->lck_mtx_ilocked || lck->lck_mtx_mlocked) {
+               return TRUE;
+       }
+
+       return FALSE;
+}
+
+void
+kdp_lck_mtx_find_owner(__unused struct waitq * waitq, event64_t event, thread_waitinfo_t * waitinfo)
+{
+       lck_mtx_t * mutex = LCK_EVENT_TO_MUTEX(event);
+       waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(mutex);
+       thread_t holder   = (thread_t)mutex->lck_mtx_owner;
+       waitinfo->owner   = thread_tid(holder);
+}
+
+void
+kdp_rwlck_find_owner(__unused struct waitq * waitq, event64_t event, thread_waitinfo_t * waitinfo)
+{
+       lck_rw_t *rwlck = NULL;
+       switch(waitinfo->wait_type) {
+               case kThreadWaitKernelRWLockRead:
+                       rwlck = READ_EVENT_TO_RWLOCK(event);
+                       break;
+               case kThreadWaitKernelRWLockWrite:
+               case kThreadWaitKernelRWLockUpgrade:
+                       rwlck = WRITE_EVENT_TO_RWLOCK(event);
+                       break;
+               default:
+                       panic("%s was called with an invalid blocking type", __FUNCTION__);
+                       break;
+       }
+       waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(rwlck);
+       waitinfo->owner = 0;
+}