]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/locks.c
xnu-1504.7.4.tar.gz
[apple/xnu.git] / osfmk / kern / locks.c
index 1431b00d293ba0dcbd4db888b0447c80607039b2..e31e970c67c414639b9eb75b454fe2b262df7cde 100644 (file)
@@ -1,31 +1,29 @@
 /*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * This file contains Original Code and/or Modifications of Original Code 
- * as defined in and that are subject to the Apple Public Source License 
- * Version 2.0 (the 'License'). You may not use this file except in 
- * compliance with the License.  The rights granted to you under the 
- * License may not be used to create, or enable the creation or 
- * redistribution of, unlawful or unlicensed copies of an Apple operating 
- * system, or to circumvent, violate, or enable the circumvention or 
- * violation of, any terms of an Apple operating system software license 
- * agreement.
- *
- * Please obtain a copy of the License at 
- * http://www.opensource.apple.com/apsl/ and read it before using this 
- * file.
- *
- * The Original Code and all software distributed under the License are 
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
- * Please see the License for the specific language governing rights and 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
  * limitations under the License.
- *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
+ * 
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
 
 #include <sys/kdebug.h>
 
+#if    CONFIG_DTRACE
+/*
+ * We need only enough declarations from the BSD-side to be able to
+ * test if our probe is active, and to call __dtrace_probe().  Setting
+ * NEED_DTRACE_DEFS gets a local copy of those definitions pulled in.
+ */
+#define NEED_DTRACE_DEFS
+#include <../bsd/sys/lockstat.h>
+#endif
+
 #define        LCK_MTX_SLEEP_CODE              0
 #define        LCK_MTX_SLEEP_DEADLINE_CODE     1
 #define        LCK_MTX_LCK_WAIT_CODE           2
 static queue_head_t    lck_grp_queue;
 static unsigned int    lck_grp_cnt;
 
-decl_mutex_data(static,lck_grp_lock)
+decl_lck_mtx_data(static,lck_grp_lock)
+static lck_mtx_ext_t lck_grp_lock_ext;
 
 lck_grp_attr_t LockDefaultGroupAttr;
-lck_grp_t      LockCompatGroup;
-lck_attr_t     LockDefaultLckAttr;
+lck_grp_t              LockCompatGroup;
+lck_attr_t             LockDefaultLckAttr;
 
 /*
  * Routine:    lck_mod_init
@@ -99,11 +108,30 @@ lck_mod_init(
        void)
 {
        queue_init(&lck_grp_queue);
-       mutex_init(&lck_grp_lock, 0);
-       lck_grp_cnt = 0;
-       lck_grp_attr_setdefault( &LockDefaultGroupAttr);
-       lck_grp_init( &LockCompatGroup, "Compatibility APIs", LCK_GRP_ATTR_NULL);
+       
+       /* 
+        * Need to bootstrap the LockCompatGroup instead of calling lck_grp_init() here. This avoids
+        * grabbing the lck_grp_lock before it is initialized.
+        */
+       
+       bzero(&LockCompatGroup, sizeof(lck_grp_t));
+       (void) strncpy(LockCompatGroup.lck_grp_name, "Compatibility APIs", LCK_GRP_MAX_NAME);
+       
+       if (LcksOpts & enaLkStat)
+               LockCompatGroup.lck_grp_attr = LCK_GRP_ATTR_STAT;
+    else
+               LockCompatGroup.lck_grp_attr = LCK_ATTR_NONE;
+       
+       LockCompatGroup.lck_grp_refcnt = 1;
+       
+       enqueue_tail(&lck_grp_queue, (queue_entry_t)&LockCompatGroup);
+       lck_grp_cnt = 1;
+       
+       lck_grp_attr_setdefault(&LockDefaultGroupAttr);
        lck_attr_setdefault(&LockDefaultLckAttr);
+       
+       lck_mtx_init_ext(&lck_grp_lock, &lck_grp_lock_ext, &LockCompatGroup, &LockDefaultLckAttr);
+       
 }
 
 /*
@@ -146,7 +174,7 @@ void
 lck_grp_attr_setstat(
        lck_grp_attr_t  *attr)
 {
-       (void)hw_atomic_or((uint32_t *)&attr->grp_attr_val, LCK_GRP_ATTR_STAT);
+       (void)hw_atomic_or(&attr->grp_attr_val, LCK_GRP_ATTR_STAT);
 }
 
 
@@ -203,10 +231,10 @@ lck_grp_init(
 
        grp->lck_grp_refcnt = 1;
 
-       mutex_lock(&lck_grp_lock);
+       lck_mtx_lock(&lck_grp_lock);
        enqueue_tail(&lck_grp_queue, (queue_entry_t)grp);
        lck_grp_cnt++;
-       mutex_unlock(&lck_grp_lock);
+       lck_mtx_unlock(&lck_grp_lock);
 
 }
 
@@ -219,10 +247,10 @@ void
 lck_grp_free(
        lck_grp_t       *grp)
 {
-       mutex_lock(&lck_grp_lock);
+       lck_mtx_lock(&lck_grp_lock);
        lck_grp_cnt--;
        (void)remque((queue_entry_t)grp);
-       mutex_unlock(&lck_grp_lock);
+       lck_mtx_unlock(&lck_grp_lock);
        lck_grp_deallocate(grp);
 }
 
@@ -235,7 +263,7 @@ void
 lck_grp_reference(
        lck_grp_t       *grp)
 {
-       (void)hw_atomic_add((uint32_t *)(&grp->lck_grp_refcnt), 1);
+       (void)hw_atomic_add(&grp->lck_grp_refcnt, 1);
 }
 
 
@@ -247,7 +275,7 @@ void
 lck_grp_deallocate(
        lck_grp_t       *grp)
 {
-       if (hw_atomic_sub((uint32_t *)(&grp->lck_grp_refcnt), 1) == 0)
+       if (hw_atomic_sub(&grp->lck_grp_refcnt, 1) == 0)
                kfree(grp, sizeof(lck_grp_t));
 }
 
@@ -276,7 +304,7 @@ lck_grp_lckcnt_incr(
                return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type);
        }
 
-       (void)hw_atomic_add((uint32_t *)lckcnt, 1);
+       (void)hw_atomic_add(lckcnt, 1);
 }
 
 /*
@@ -304,7 +332,7 @@ lck_grp_lckcnt_decr(
                return panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type);
        }
 
-       (void)hw_atomic_sub((uint32_t *)lckcnt, 1);
+       (void)hw_atomic_sub(lckcnt, 1);
 }
 
 /*
@@ -333,14 +361,13 @@ lck_attr_setdefault(
        lck_attr_t      *attr)
 {
 #if     !DEBUG
-       if (LcksOpts & enaLkDeb)
-               attr->lck_attr_val =  LCK_ATTR_DEBUG;
-       else
-               attr->lck_attr_val =  LCK_ATTR_NONE;
+       if (LcksOpts & enaLkDeb)
+               attr->lck_attr_val =  LCK_ATTR_DEBUG;
+       else
+               attr->lck_attr_val =  LCK_ATTR_NONE;
 #else
-       attr->lck_attr_val =  LCK_ATTR_DEBUG;
-#endif
-
+       attr->lck_attr_val =  LCK_ATTR_DEBUG;
+#endif /* !DEBUG */
 }
 
 
@@ -351,7 +378,28 @@ void
 lck_attr_setdebug(
        lck_attr_t      *attr)
 {
-       (void)hw_atomic_or((uint32_t *)&attr->lck_attr_val, LCK_ATTR_DEBUG);
+       (void)hw_atomic_or(&attr->lck_attr_val, LCK_ATTR_DEBUG);
+}
+
+/*
+ * Routine:    lck_attr_setdebug
+ */
+void
+lck_attr_cleardebug(
+       lck_attr_t      *attr)
+{
+       (void)hw_atomic_and(&attr->lck_attr_val, ~LCK_ATTR_DEBUG);
+}
+
+
+/*
+ * Routine:    lck_attr_rw_shared_priority
+ */
+void
+lck_attr_rw_shared_priority(
+       lck_attr_t      *attr)
+{
+       (void)hw_atomic_or(&attr->lck_attr_val, LCK_ATTR_RW_SHARED_PRIORITY);
 }
 
 
@@ -449,8 +497,12 @@ lck_mtx_sleep(
        if (res == THREAD_WAITING) {
                lck_mtx_unlock(lck);
                res = thread_block(THREAD_CONTINUE_NULL);
-               if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
-                       lck_mtx_lock(lck);
+               if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
+                       if ((lck_sleep_action & LCK_SLEEP_SPIN))
+                               lck_mtx_lock_spin(lck);
+                       else
+                               lck_mtx_lock(lck);
+               }
        }
        else
        if (lck_sleep_action & LCK_SLEEP_UNLOCK)
@@ -514,6 +566,13 @@ lck_mtx_lock_wait (
        lck_mtx_t               *mutex;
        integer_t               priority;
        spl_t                   s = splsched();
+#if    CONFIG_DTRACE
+       uint64_t                sleep_start = 0;
+
+       if (lockstat_probemap[LS_LCK_MTX_LOCK_BLOCK] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_BLOCK]) {
+               sleep_start = mach_absolute_time();
+       }
+#endif
 
        if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
                mutex = lck;
@@ -525,25 +584,20 @@ lck_mtx_lock_wait (
        priority = self->sched_pri;
        if (priority < self->priority)
                priority = self->priority;
-       if (priority > MINPRI_KERNEL)
-               priority = MINPRI_KERNEL;
-       else
        if (priority < BASEPRI_DEFAULT)
                priority = BASEPRI_DEFAULT;
 
        thread_lock(holder);
        if (mutex->lck_mtx_pri == 0)
                holder->promotions++;
-       if (holder->priority < MINPRI_KERNEL) {
-               holder->sched_mode |= TH_MODE_PROMOTED;
-               if (    mutex->lck_mtx_pri < priority   &&
+       holder->sched_mode |= TH_MODE_PROMOTED;
+       if (            mutex->lck_mtx_pri < priority   &&
                                holder->sched_pri < priority            ) {
-                       KERNEL_DEBUG_CONSTANT(
-                               MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
-                                       holder->sched_pri, priority, (int)holder, (int)lck, 0);
+               KERNEL_DEBUG_CONSTANT(
+                       MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
+                                       holder->sched_pri, priority, holder, lck, 0);
 
-                       set_sched_pri(holder, priority);
-               }
+               set_sched_pri(holder, priority);
        }
        thread_unlock(holder);
        splx(s);
@@ -566,6 +620,21 @@ lck_mtx_lock_wait (
        thread_block(THREAD_CONTINUE_NULL);
 
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+#if    CONFIG_DTRACE
+       /*
+        * Record the Dtrace lockstat probe for blocking, block time
+        * measured from when we were entered.
+        */
+       if (sleep_start) {
+               if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
+                       LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_BLOCK, lck,
+                           mach_absolute_time() - sleep_start);
+               } else {
+                       LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_BLOCK, lck,
+                           mach_absolute_time() - sleep_start);
+               }
+       }
+#endif
 }
 
 /*
@@ -603,15 +672,13 @@ lck_mtx_lock_acquire(
 
                thread_lock(thread);
                thread->promotions++;
-               if (thread->priority < MINPRI_KERNEL) {
-                       thread->sched_mode |= TH_MODE_PROMOTED;
-                       if (thread->sched_pri < priority) {
-                               KERNEL_DEBUG_CONSTANT(
-                                       MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
-                                               thread->sched_pri, priority, 0, (int)lck, 0);
+               thread->sched_mode |= TH_MODE_PROMOTED;
+               if (thread->sched_pri < priority) {
+                       KERNEL_DEBUG_CONSTANT(
+                               MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
+                                               thread->sched_pri, priority, 0, lck, 0);
 
-                               set_sched_pri(thread, priority);
-                       }
+                       set_sched_pri(thread, priority);
                }
                thread_unlock(thread);
                splx(s);
@@ -646,7 +713,7 @@ lck_mtx_unlock_wakeup (
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
 
        if (thread != holder)
-               panic("lck_mtx_unlock_wakeup: mutex %x holder %x\n", mutex, holder);
+               panic("lck_mtx_unlock_wakeup: mutex %p holder %p\n", mutex, holder);
 
        if (thread->promotions > 0) {
                spl_t           s = splsched();
@@ -658,7 +725,7 @@ lck_mtx_unlock_wakeup (
                        if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
                                KERNEL_DEBUG_CONSTANT(
                                        MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
-                                                 thread->sched_pri, DEPRESSPRI, 0, (int)lck, 0);
+                                                 thread->sched_pri, DEPRESSPRI, 0, lck, 0);
 
                                set_sched_pri(thread, DEPRESSPRI);
                        }
@@ -668,7 +735,7 @@ lck_mtx_unlock_wakeup (
                                                MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
                                                                                                                        DBG_FUNC_NONE,
                                                        thread->sched_pri, thread->priority,
-                                                                       0, (int)lck, 0);
+                                                                       0, lck, 0);
                                }
 
                                compute_priority(thread, FALSE);
@@ -683,24 +750,90 @@ lck_mtx_unlock_wakeup (
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
 }
 
+void
+lck_mtx_unlockspin_wakeup (
+       lck_mtx_t                       *lck)
+{
+       assert(lck->lck_mtx_waiters > 0);
+       thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
+
+       KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_NONE, (int)lck, 0, 0, 1, 0);
+#if CONFIG_DTRACE
+       /*
+        * When there are waiters, we skip the hot-patch spot in the
+        * fastpath, so we record it here.
+        */
+       LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, lck, 0);
+#endif
+}
+
+
 /*
  * Routine:    mutex_pause
  *
  * Called by former callers of simple_lock_pause().
  */
+#define MAX_COLLISION_COUNTS   32
+#define MAX_COLLISION  8
+
+unsigned int max_collision_count[MAX_COLLISION_COUNTS];
+
+uint32_t collision_backoffs[MAX_COLLISION] = {
+        10, 50, 100, 200, 400, 600, 800, 1000
+};
+
 
 void
-mutex_pause(void)
+mutex_pause(uint32_t collisions)
 {
        wait_result_t wait_result;
+       uint32_t        back_off;
 
-       wait_result = assert_wait_timeout((event_t)mutex_pause, THREAD_UNINT, 1, 1000*NSEC_PER_USEC);
+       if (collisions >= MAX_COLLISION_COUNTS)
+               collisions = MAX_COLLISION_COUNTS - 1;
+       max_collision_count[collisions]++;
+
+       if (collisions >= MAX_COLLISION)
+               collisions = MAX_COLLISION - 1;
+       back_off = collision_backoffs[collisions];
+
+       wait_result = assert_wait_timeout((event_t)mutex_pause, THREAD_UNINT, back_off, NSEC_PER_USEC);
        assert(wait_result == THREAD_WAITING);
 
        wait_result = thread_block(THREAD_CONTINUE_NULL);
        assert(wait_result == THREAD_TIMED_OUT);
 }
 
+
+unsigned int mutex_yield_wait = 0;
+unsigned int mutex_yield_no_wait = 0;
+
+void
+lck_mtx_yield(
+           lck_mtx_t   *lck)
+{
+       int     waiters;
+       
+#if DEBUG
+       lck_mtx_assert(lck, LCK_MTX_ASSERT_OWNED);
+#endif /* DEBUG */
+       
+       if (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT)
+               waiters = lck->lck_mtx_ptr->lck_mtx.lck_mtx_waiters;
+       else
+               waiters = lck->lck_mtx_waiters;
+
+       if ( !waiters) {
+               mutex_yield_no_wait++;
+       } else {
+               mutex_yield_wait++;
+               lck_mtx_unlock(lck);
+               mutex_pause(0);
+               lck_mtx_lock(lck);
+       }
+}
+
+
 /*
  * Routine:    lck_rw_sleep
  */
@@ -794,13 +927,13 @@ host_lockgroup_info(
        if (host == HOST_NULL)
                return KERN_INVALID_HOST;
 
-       mutex_lock(&lck_grp_lock);
+       lck_mtx_lock(&lck_grp_lock);
 
        lockgroup_info_size = round_page(lck_grp_cnt * sizeof *lockgroup_info);
        kr = kmem_alloc_pageable(ipc_kernel_map,
                                                 &lockgroup_info_addr, lockgroup_info_size);
        if (kr != KERN_SUCCESS) {
-               mutex_unlock(&lck_grp_lock);
+               lck_mtx_unlock(&lck_grp_lock);
                return(kr);
        }
 
@@ -844,7 +977,7 @@ host_lockgroup_info(
        }
 
        *lockgroup_infoCntp = lck_grp_cnt;
-       mutex_unlock(&lck_grp_lock);
+       lck_mtx_unlock(&lck_grp_lock);
 
        used = (*lockgroup_infoCntp) * sizeof *lockgroup_info;
 
@@ -875,23 +1008,19 @@ extern void              lock_write_to_read_EXT(lck_rw_t *lock);
 extern wait_result_t   thread_sleep_lock_write_EXT( 
                                event_t event, lck_rw_t *lock, wait_interrupt_t interruptible);
 
-extern lck_mtx_t       *mutex_alloc_EXT(unsigned short tag);
-extern void            mutex_free_EXT(lck_mtx_t *mutex);
-extern void            mutex_init_EXT(lck_mtx_t *mutex, unsigned short tag);
-extern void            mutex_lock_EXT(lck_mtx_t *mutex);
-extern boolean_t       mutex_try_EXT(lck_mtx_t *mutex);
-extern void            mutex_unlock_EXT(lck_mtx_t *mutex);
-extern wait_result_t   thread_sleep_mutex_EXT(
-                               event_t event, lck_mtx_t *mutex, wait_interrupt_t interruptible);
-extern wait_result_t   thread_sleep_mutex_deadline_EXT(
-                               event_t event, lck_mtx_t *mutex, uint64_t deadline, wait_interrupt_t interruptible);
-
 extern void            usimple_lock_EXT(lck_spin_t *lock);
 extern void            usimple_lock_init_EXT(lck_spin_t *lock, unsigned short tag);
 extern unsigned int    usimple_lock_try_EXT(lck_spin_t *lock);
 extern void            usimple_unlock_EXT(lck_spin_t *lock);
 extern wait_result_t   thread_sleep_usimple_lock_EXT(event_t event, lck_spin_t *lock, wait_interrupt_t interruptible);
 
+
+lck_mtx_t*             mutex_alloc_EXT(__unused unsigned short tag);
+void                   mutex_free_EXT(lck_mtx_t *mutex);
+void                   mutex_init_EXT(lck_mtx_t *mutex, __unused unsigned short tag);
+wait_result_t          thread_sleep_mutex_EXT(event_t event, lck_mtx_t *mutex, wait_interrupt_t interruptible);
+wait_result_t          thread_sleep_mutex_deadline_EXT(event_t event, lck_mtx_t *mutex, uint64_t deadline, wait_interrupt_t interruptible);
+
 lck_rw_t * 
 lock_alloc_EXT(
        __unused boolean_t       can_sleep,
@@ -962,102 +1091,80 @@ thread_sleep_lock_write_EXT(
        return( lck_rw_sleep(lock, LCK_SLEEP_EXCLUSIVE, event, interruptible));
 }
 
-lck_mtx_t *
-mutex_alloc_EXT(
-       __unused unsigned short         tag)
-{
-       return(lck_mtx_alloc_init(&LockCompatGroup, LCK_ATTR_NULL));
-}
-
 void
-mutex_free_EXT(
-       lck_mtx_t               *mutex)
+usimple_lock_EXT(
+       lck_spin_t              *lock)
 {
-       lck_mtx_free(mutex, &LockCompatGroup);  
+       lck_spin_lock(lock);
 }
 
 void
-mutex_init_EXT(
-       lck_mtx_t               *mutex,
+usimple_lock_init_EXT(
+       lck_spin_t              *lock,
        __unused unsigned short tag)
 {
-       lck_mtx_init(mutex, &LockCompatGroup, LCK_ATTR_NULL);   
-}
-
-void
-mutex_lock_EXT(
-       lck_mtx_t               *mutex)
-{
-       lck_mtx_lock(mutex);
+       lck_spin_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
 }
 
-boolean_t
-mutex_try_EXT(
-       lck_mtx_t               *mutex)
+unsigned int
+usimple_lock_try_EXT(
+       lck_spin_t              *lock)
 {
-       return(lck_mtx_try_lock(mutex));
+       return(lck_spin_try_lock(lock));
 }
 
 void
-mutex_unlock_EXT(
-       lck_mtx_t               *mutex)
+usimple_unlock_EXT(
+       lck_spin_t              *lock)
 {
-       lck_mtx_unlock(mutex);
+       lck_spin_unlock(lock);
 }
 
 wait_result_t
-thread_sleep_mutex_EXT(
+thread_sleep_usimple_lock_EXT(
        event_t                 event,
-       lck_mtx_t               *mutex,
-       wait_interrupt_t        interruptible)
+       lck_spin_t              *lock,
+       wait_interrupt_t        interruptible)
 {
-       return( lck_mtx_sleep(mutex, LCK_SLEEP_DEFAULT, event, interruptible));
+       return( lck_spin_sleep(lock, LCK_SLEEP_DEFAULT, event, interruptible));
 }
-
-wait_result_t
-thread_sleep_mutex_deadline_EXT(
-       event_t                 event,
-       lck_mtx_t               *mutex,
-       uint64_t                deadline,
-       wait_interrupt_t        interruptible)
+lck_mtx_t *
+mutex_alloc_EXT(
+        __unused unsigned short         tag) 
 {
-       return( lck_mtx_sleep_deadline(mutex, LCK_SLEEP_DEFAULT, event, interruptible, deadline));
+        return(lck_mtx_alloc_init(&LockCompatGroup, LCK_ATTR_NULL));
 }
 
 void
-usimple_lock_EXT(
-       lck_spin_t              *lock)
+mutex_free_EXT(
+        lck_mtx_t               *mutex)
 {
-       lck_spin_lock(lock);
+        lck_mtx_free(mutex, &LockCompatGroup);  
 }
 
 void
-usimple_lock_init_EXT(
-       lck_spin_t              *lock,
-       __unused unsigned short tag)
-{
-       lck_spin_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
-}
-
-unsigned int
-usimple_lock_try_EXT(
-       lck_spin_t              *lock)
+mutex_init_EXT(
+        lck_mtx_t               *mutex,
+        __unused unsigned short tag) 
 {
-       lck_spin_try_lock(lock);
+        lck_mtx_init(mutex, &LockCompatGroup, LCK_ATTR_NULL);   
 }
 
-void
-usimple_unlock_EXT(
-       lck_spin_t              *lock)
+wait_result_t
+thread_sleep_mutex_EXT(
+       event_t                 event,
+       lck_mtx_t               *mutex,
+       wait_interrupt_t        interruptible)
 {
-       lck_spin_unlock(lock);
+       return( lck_mtx_sleep(mutex, LCK_SLEEP_DEFAULT, event, interruptible));
 }
 
 wait_result_t
-thread_sleep_usimple_lock_EXT(
-       event_t                 event,
-       lck_spin_t              *lock,
-       wait_interrupt_t        interruptible)
+thread_sleep_mutex_deadline_EXT(
+       event_t                 event,
+       lck_mtx_t               *mutex,
+       uint64_t                deadline,
+       wait_interrupt_t        interruptible)
 {
-       return( lck_spin_sleep(lock, LCK_SLEEP_DEFAULT, event, interruptible));
+       return( lck_mtx_sleep_deadline(mutex, LCK_SLEEP_DEFAULT, event, interruptible, deadline));
 }