]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/dev/dtrace/lockstat.c
xnu-3789.60.24.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / lockstat.c
index 82539d98bfe7efc937551a078788401da5def91f..ef1d9f1e775bd0531597c6ca081272b877b22415 100644 (file)
  * CDDL HEADER END
  */
 /*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-/* #pragma ident       "@(#)lockstat.c 1.11    06/03/24 SMI" */
+/* #pragma ident       "@(#)lockstat.c 1.12    08/01/16 SMI" */
 
 
 #ifdef KERNEL
 /*
  * Hot patch values, x86
  */
-#ifdef __i386__
+#if defined(__x86_64__)
 #define        NOP     0x90
 #define        RET     0xc3
 #define LOCKSTAT_AFRAMES 1
-#elif  __ppc__
-#define        NOP     0x60000000
-#define RET    0x4e800020      /* blr */
-#define LOCKSTAT_AFRAMES 2
 #else
 #error "not ported to this architecture"
 #endif
 
-
 typedef struct lockstat_probe {
        const char      *lsp_func;
        const char      *lsp_name;
@@ -77,28 +72,20 @@ typedef struct lockstat_probe {
 
 lockstat_probe_t lockstat_probes[] =
 {
-#ifdef __i386__
-       /* Not implemented yet on PPC... */
+#if defined(__x86_64__)
+       /* Only provide implemented probes for each architecture  */
        { LS_LCK_MTX_LOCK,      LSA_ACQUIRE,    LS_LCK_MTX_LOCK_ACQUIRE, DTRACE_IDNONE },
        { LS_LCK_MTX_LOCK,      LSA_SPIN,       LS_LCK_MTX_LOCK_SPIN, DTRACE_IDNONE },
+       { LS_LCK_MTX_LOCK,      LSA_BLOCK,      LS_LCK_MTX_LOCK_BLOCK, DTRACE_IDNONE }, 
        { LS_LCK_MTX_TRY_LOCK,  LSA_ACQUIRE,    LS_LCK_MTX_TRY_LOCK_ACQUIRE, DTRACE_IDNONE },
        { LS_LCK_MTX_TRY_SPIN_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, DTRACE_IDNONE },
        { LS_LCK_MTX_UNLOCK,    LSA_RELEASE,    LS_LCK_MTX_UNLOCK_RELEASE, DTRACE_IDNONE },
        { LS_LCK_MTX_EXT_LOCK,  LSA_ACQUIRE,    LS_LCK_MTX_EXT_LOCK_ACQUIRE, DTRACE_IDNONE },
        { LS_LCK_MTX_EXT_LOCK,  LSA_SPIN,       LS_LCK_MTX_EXT_LOCK_SPIN, DTRACE_IDNONE },
-       { LS_LCK_MTX_EXT_TRY_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE, DTRACE_IDNONE },
-       { LS_LCK_MTX_UNLOCK,    LSA_RELEASE,    LS_LCK_MTX_EXT_UNLOCK_RELEASE, DTRACE_IDNONE },
-       { LS_MUTEX_LOCK,        LSA_ACQUIRE,    LS_MUTEX_LOCK_ACQUIRE, DTRACE_IDNONE },
-       { LS_MUTEX_UNLOCK,      LSA_RELEASE,    LS_MUTEX_UNLOCK_RELEASE, DTRACE_IDNONE },
-       { LS_MUTEX_TRY_LOCK,    LSA_ACQUIRE,    LS_MUTEX_TRY_LOCK_ACQUIRE, DTRACE_IDNONE },
-       { LS_MUTEX_TRY_SPIN,    LSA_ACQUIRE,    LS_MUTEX_TRY_SPIN_ACQUIRE, DTRACE_IDNONE },
-       { LS_MUTEX_LOCK_SPIN,   LSA_ACQUIRE,    LS_MUTEX_LOCK_SPIN_ACQUIRE, DTRACE_IDNONE },
-#endif
-       { LS_LCK_MTX_LOCK,      LSA_BLOCK,      LS_LCK_MTX_LOCK_BLOCK, DTRACE_IDNONE },
        { LS_LCK_MTX_EXT_LOCK,  LSA_BLOCK,      LS_LCK_MTX_EXT_LOCK_BLOCK, DTRACE_IDNONE },
-
-       { LS_MUTEX_CONVERT_SPIN,        LSA_ACQUIRE,    LS_MUTEX_CONVERT_SPIN_ACQUIRE, DTRACE_IDNONE },
-
+//     { LS_LCK_MTX_EXT_TRY_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE, DTRACE_IDNONE },       
+       { LS_LCK_MTX_EXT_UNLOCK,   LSA_RELEASE, LS_LCK_MTX_EXT_UNLOCK_RELEASE, DTRACE_IDNONE },
+       { LS_LCK_MTX_LOCK_SPIN_LOCK,    LSA_ACQUIRE,    LS_LCK_MTX_LOCK_SPIN_ACQUIRE, DTRACE_IDNONE },
        { LS_LCK_RW_LOCK_SHARED,        LSR_ACQUIRE,    LS_LCK_RW_LOCK_SHARED_ACQUIRE, DTRACE_IDNONE },
        { LS_LCK_RW_LOCK_SHARED,        LSR_BLOCK,      LS_LCK_RW_LOCK_SHARED_BLOCK, DTRACE_IDNONE },
        { LS_LCK_RW_LOCK_SHARED,        LSR_SPIN,       LS_LCK_RW_LOCK_SHARED_SPIN, DTRACE_IDNONE },
@@ -109,11 +96,10 @@ lockstat_probe_t lockstat_probes[] =
        { LS_LCK_RW_TRY_LOCK_SHARED,    LSR_ACQUIRE,    LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, DTRACE_IDNONE },
        { LS_LCK_RW_TRY_LOCK_EXCL,      LSR_ACQUIRE,    LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE, DTRACE_IDNONE },
        { LS_LCK_RW_LOCK_SHARED_TO_EXCL, LSR_UPGRADE,   LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, DTRACE_IDNONE },
-       { LS_LCK_RW_LOCK_SHARED_TO_EXCL,        LSR_BLOCK,      LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK, DTRACE_IDNONE },
        { LS_LCK_RW_LOCK_SHARED_TO_EXCL,        LSR_SPIN,       LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN, DTRACE_IDNONE },
+       { LS_LCK_RW_LOCK_SHARED_TO_EXCL,        LSR_BLOCK,      LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK, DTRACE_IDNONE },   
        { LS_LCK_RW_LOCK_EXCL_TO_SHARED,        LSR_DOWNGRADE,  LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE, DTRACE_IDNONE },
-
-
+#endif
 #ifdef LATER
        /* Interlock and spinlock measurements would be nice, but later */
        { LS_LCK_SPIN_LOCK,     LSS_ACQUIRE,    LS_LCK_SPIN_LOCK_ACQUIRE, DTRACE_IDNONE },
@@ -127,105 +113,124 @@ lockstat_probe_t lockstat_probes[] =
        { LS_LCK_RW_TRY_LOCK_SHARED,    LSA_SPIN,       LS_LCK_RW_TRY_LOCK_SHARED_SPIN, DTRACE_IDNONE },
 #endif
 
-       { NULL }
+       { NULL, NULL, 0, 0 }
 };
 
 dtrace_id_t lockstat_probemap[LS_NPROBES];
 
-extern void lck_mtx_lock_lockstat_patch_point();
-extern void lck_mtx_try_lock_lockstat_patch_point();
-extern void lck_mtx_try_lock_spin_lockstat_patch_point();
-extern void lck_mtx_unlock_lockstat_patch_point();
-extern void lck_mtx_unlock2_lockstat_patch_point();
-extern void mutex_lock_lockstat_patch_point();
-extern void mutex_unlock_lockstat_patch_point();
-extern void mutex_unlock2_lockstat_patch_point();
-extern void mutex_try_lockstat_patch_point();
-extern void mutex_try_spin_lockstat_patch_point();
-extern void mutex_lock_spin_lockstat_patch_point();
-extern void mutex_convert_spin_lockstat_patch_point();
-extern void lck_rw_done_lockstat_patch_point();
-extern void lck_rw_lock_shared_lockstat_patch_point();
-extern void lck_mtx_lock_ext_lockstat_patch_point();
-extern void lck_mtx_ext_unlock_lockstat_patch_point();
-
-vm_offset_t *assembly_probes[] = {
-#if    defined(__i386__)
-       /*
-        * On x86 these points are better done via hot patches, which ensure
-        * there is zero overhead when not in use.  On x86 these patch points
-        * are swapped between the return instruction and a no-op, with the
-        * Dtrace call following the return.
-        */ 
-       (vm_offset_t *) lck_mtx_lock_lockstat_patch_point,
-       (vm_offset_t *) lck_mtx_try_lock_lockstat_patch_point,
-       (vm_offset_t *) lck_mtx_try_lock_spin_lockstat_patch_point,
-       (vm_offset_t *) lck_mtx_unlock_lockstat_patch_point,
-       (vm_offset_t *) lck_mtx_unlock2_lockstat_patch_point,
-       (vm_offset_t *) lck_rw_lock_shared_lockstat_patch_point,
-       (vm_offset_t *) lck_rw_done_lockstat_patch_point,
-       (vm_offset_t *) lck_mtx_lock_ext_lockstat_patch_point,
-       (vm_offset_t *) lck_mtx_ext_unlock_lockstat_patch_point,
-       (vm_offset_t *) mutex_lock_lockstat_patch_point,
-       (vm_offset_t *) mutex_try_spin_lockstat_patch_point,
-       (vm_offset_t *) mutex_try_lockstat_patch_point,
-       (vm_offset_t *) mutex_unlock_lockstat_patch_point,
-       (vm_offset_t *) mutex_unlock2_lockstat_patch_point,
-       (vm_offset_t *) mutex_lock_spin_lockstat_patch_point,
-       (vm_offset_t *) mutex_convert_spin_lockstat_patch_point,
+#if CONFIG_DTRACE
+#if defined(__x86_64__)
+extern void lck_mtx_lock_lockstat_patch_point(void);
+extern void lck_mtx_try_lock_lockstat_patch_point(void);
+extern void lck_mtx_try_lock_spin_lockstat_patch_point(void);
+extern void lck_mtx_unlock_lockstat_patch_point(void);
+extern void lck_mtx_lock_ext_lockstat_patch_point(void);
+extern void lck_mtx_ext_unlock_lockstat_patch_point(void);
+extern void lck_rw_lock_shared_lockstat_patch_point(void);
+extern void lck_rw_lock_exclusive_lockstat_patch_point(void);
+extern void lck_rw_lock_shared_to_exclusive_lockstat_patch_point(void);
+extern void lck_rw_try_lock_shared_lockstat_patch_point(void);
+extern void lck_rw_try_lock_exclusive_lockstat_patch_point(void);
+extern void lck_mtx_lock_spin_lockstat_patch_point(void);
 #endif
-       (vm_offset_t *) lck_mtx_unlock_lockstat_patch_point,
-       NULL
+
+#endif /* CONFIG_DTRACE */
+
+typedef struct lockstat_assembly_probe {
+       int lsap_probe;
+       vm_offset_t * lsap_patch_point;
+} lockstat_assembly_probe_t;
+       
+
+       lockstat_assembly_probe_t assembly_probes[] =
+       {
+#if CONFIG_DTRACE
+#if defined(__x86_64__)
+               /*
+                * On x86 these points are better done via hot patches, which ensure
+                * there is zero overhead when not in use.  On x86 these patch points
+                * are swapped between the return instruction and a no-op, with the
+                * Dtrace call following the return.
+                */ 
+               { LS_LCK_MTX_LOCK_ACQUIRE,              (vm_offset_t *) lck_mtx_lock_lockstat_patch_point },
+               { LS_LCK_MTX_TRY_LOCK_ACQUIRE,          (vm_offset_t *) lck_mtx_try_lock_lockstat_patch_point },
+               { LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE,     (vm_offset_t *) lck_mtx_try_lock_spin_lockstat_patch_point },
+               { LS_LCK_MTX_UNLOCK_RELEASE,            (vm_offset_t *) lck_mtx_unlock_lockstat_patch_point },
+               { LS_LCK_MTX_EXT_LOCK_ACQUIRE,          (vm_offset_t *) lck_mtx_lock_ext_lockstat_patch_point },
+               { LS_LCK_MTX_EXT_UNLOCK_RELEASE,        (vm_offset_t *) lck_mtx_ext_unlock_lockstat_patch_point },
+               { LS_LCK_RW_LOCK_SHARED_ACQUIRE,        (vm_offset_t *) lck_rw_lock_shared_lockstat_patch_point },
+               { LS_LCK_RW_LOCK_EXCL_ACQUIRE,          (vm_offset_t *) lck_rw_lock_exclusive_lockstat_patch_point },
+               { LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE,(vm_offset_t *) lck_rw_lock_shared_to_exclusive_lockstat_patch_point },
+               { LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE,    (vm_offset_t *) lck_rw_try_lock_shared_lockstat_patch_point },
+               { LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE,      (vm_offset_t *) lck_rw_try_lock_exclusive_lockstat_patch_point },
+               { LS_LCK_MTX_LOCK_SPIN_ACQUIRE,         (vm_offset_t *) lck_mtx_lock_spin_lockstat_patch_point },
+#endif
+               /* No assembly patch points for ARM */
+#endif /* CONFIG_DTRACE */
+               { LS_LCK_INVALID, NULL }
 };
+
+
 /*
- * Hot patch switches back and forth the probe points between NOP and RET.
- * The argument indicates whether the probe point is on or off.
+ * APPLE NOTE:
+ * Hot patch is used to manipulate probe points by swapping between
+ * no-op and return instructions.
+ * The active flag indicates whether the probe point will turn on or off.
+ *     on == plant a NOP and thus fall through to the probe call
+ *     off == plant a RET and thus avoid the probe call completely
+ * The ls_probe identifies which probe we will patch.
  */
-void lockstat_hot_patch(boolean_t active)
+static
+void lockstat_hot_patch(boolean_t active, int ls_probe)
 {
 #pragma unused(active)
        int i;
 
-
-       for (i = 0; assembly_probes[i]; i++) {
-#ifdef __i386__
-               uint8_t instr;
-               instr = (active ? NOP : RET );
-               (void) ml_nofault_copy( (vm_offset_t)&instr, *(assembly_probes[i]), 
+       /*
+        * Loop through entire table, in case there are
+        * multiple patch points per probe. 
+        */
+       for (i = 0; assembly_probes[i].lsap_patch_point; i++) {
+               if (ls_probe == assembly_probes[i].lsap_probe)
+#if defined(__x86_64__)
+               {                       
+                       uint8_t instr;
+                       instr = (active ? NOP : RET );
+                       (void) ml_nofault_copy( (vm_offset_t)&instr, *(assembly_probes[i].lsap_patch_point), 
                                                                sizeof(instr));
+               }
 #endif
-#ifdef __ppc__
-               uint32_t instr;
-               instr = (active ? NOP : RET );
-               (void) ml_nofault_copy( (vm_offset_t)&instr, *(assembly_probes[i]), sizeof(instr));
-#endif
-       }
+       } /* for */
 }
 
-
-
 void (*lockstat_probe)(dtrace_id_t, uint64_t, uint64_t,
                                    uint64_t, uint64_t, uint64_t);
 
+
 /*
- * An initial value for lockstat_probe. See lockstat_attach(). Think safety.
+ * APPLE NOTE:
+ * This wrapper is used only by assembler hot patched probes.
  */
-static void
-lockstat_stub(dtrace_id_t id, uint64_t arg0, uint64_t arg1,
-                                   uint64_t arg2, uint64_t arg3, uint64_t arg4)
+void
+lockstat_probe_wrapper(int probe, uintptr_t lp, int rwflag)
 {
-#pragma unused(id,arg0,arg1,arg2,arg3,arg4)
+       dtrace_id_t id;
+       id = lockstat_probemap[probe];
+       if (id != 0)
+       {
+               (*lockstat_probe)(id, (uintptr_t)lp, (uint64_t)rwflag, 0,0,0);
+       }
 }
 
-
 static dev_info_t      *lockstat_devi; /* saved in xxattach() for xxinfo() */
 static dtrace_provider_id_t lockstat_id;
 
 /*ARGSUSED*/
-static void
+static int
 lockstat_enable(void *arg, dtrace_id_t id, void *parg)
 {
-#pragma unused(arg)
+#pragma unused(arg) /* __APPLE__ */
+    
        lockstat_probe_t *probe = parg;
 
        ASSERT(!lockstat_probemap[probe->lsp_probe]);
@@ -233,11 +238,9 @@ lockstat_enable(void *arg, dtrace_id_t id, void *parg)
        lockstat_probemap[probe->lsp_probe] = id;
        membar_producer();
 
-       lockstat_probe = dtrace_probe;
-       membar_producer();
-
-       lockstat_hot_patch(TRUE);
+       lockstat_hot_patch(TRUE, probe->lsp_probe);
        membar_producer();
+       return(0);
 
 }
 
@@ -245,14 +248,15 @@ lockstat_enable(void *arg, dtrace_id_t id, void *parg)
 static void
 lockstat_disable(void *arg, dtrace_id_t id, void *parg)
 {
-#pragma unused(arg,id)
+#pragma unused(arg, id) /* __APPLE__ */
+
        lockstat_probe_t *probe = parg;
        int i;
 
        ASSERT(lockstat_probemap[probe->lsp_probe]);
 
        lockstat_probemap[probe->lsp_probe] = 0;
-       lockstat_hot_patch(FALSE);
+       lockstat_hot_patch(FALSE, probe->lsp_probe);
        membar_producer();
 
        /*
@@ -275,7 +279,8 @@ lockstat_disable(void *arg, dtrace_id_t id, void *parg)
 static void
 lockstat_provide(void *arg, const dtrace_probedesc_t *desc)
 {
-#pragma unused(arg,desc)
+#pragma unused(arg, desc) /* __APPLE__ */
+    
        int i = 0;
 
        for (i = 0; lockstat_probes[i].lsp_func != NULL; i++) {
@@ -297,7 +302,8 @@ lockstat_provide(void *arg, const dtrace_probedesc_t *desc)
 static void
 lockstat_destroy(void *arg, dtrace_id_t id, void *parg)
 {
-#pragma unused(arg,id)
+#pragma unused(arg, id) /* __APPLE__ */
+    
        lockstat_probe_t *probe = parg;
 
        ASSERT(!lockstat_probemap[probe->lsp_probe]);
@@ -345,9 +351,11 @@ lockstat_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
                return (DDI_FAILURE);
        }
 
+       lockstat_probe = dtrace_probe;
+       membar_producer();
+
        ddi_report_dev(devi);
        lockstat_devi = devi;
-       lockstat_probe = lockstat_stub;
        return (DDI_SUCCESS);
 }
 
@@ -399,7 +407,7 @@ void lockstat_init( void )
                        return;
                }
 
-               lockstat_attach( (dev_info_t    *)majdevno, DDI_ATTACH );
+               lockstat_attach( (dev_info_t    *)(uintptr_t)majdevno, DDI_ATTACH );
                gLockstatInited = 1;
        } else
                panic("lockstat_init: called twice!\n");