4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* #pragma ident "@(#)lockstat.c 1.12 08/01/16 SMI" */
31 #define _KERNEL /* Solaris vs. Darwin */
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/errno.h>
39 #include <sys/ioctl.h>
41 #include <sys/fcntl.h>
42 #include <miscfs/devfs/devfs.h>
44 #include <sys/dtrace.h>
45 #include <sys/dtrace_impl.h>
47 #include <sys/dtrace_glue.h>
49 #include <sys/lockstat.h>
51 #include <kern/processor.h>
53 #define membar_producer dtrace_membar_producer
56 * Hot patch values, x86
58 #if defined(__i386__) || defined(__x86_64__)
61 #define LOCKSTAT_AFRAMES 1
63 #error "not ported to this architecture"
67 typedef struct lockstat_probe
{
74 lockstat_probe_t lockstat_probes
[] =
76 #if defined(__i386__) || defined(__x86_64__)
77 /* Not implemented yet on PPC... */
78 { LS_LCK_MTX_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_LOCK_ACQUIRE
, DTRACE_IDNONE
},
79 { LS_LCK_MTX_LOCK
, LSA_SPIN
, LS_LCK_MTX_LOCK_SPIN
, DTRACE_IDNONE
},
80 { LS_LCK_MTX_TRY_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_TRY_LOCK_ACQUIRE
, DTRACE_IDNONE
},
81 { LS_LCK_MTX_TRY_SPIN_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE
, DTRACE_IDNONE
},
82 { LS_LCK_MTX_UNLOCK
, LSA_RELEASE
, LS_LCK_MTX_UNLOCK_RELEASE
, DTRACE_IDNONE
},
83 { LS_LCK_MTX_EXT_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_EXT_LOCK_ACQUIRE
, DTRACE_IDNONE
},
84 { LS_LCK_MTX_EXT_LOCK
, LSA_SPIN
, LS_LCK_MTX_EXT_LOCK_SPIN
, DTRACE_IDNONE
},
85 { LS_LCK_MTX_EXT_TRY_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE
, DTRACE_IDNONE
},
86 { LS_LCK_MTX_UNLOCK
, LSA_RELEASE
, LS_LCK_MTX_EXT_UNLOCK_RELEASE
, DTRACE_IDNONE
},
87 { LS_LCK_MTX_LOCK_SPIN_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_LOCK_SPIN_ACQUIRE
, DTRACE_IDNONE
},
89 { LS_LCK_MTX_LOCK
, LSA_BLOCK
, LS_LCK_MTX_LOCK_BLOCK
, DTRACE_IDNONE
},
90 { LS_LCK_MTX_EXT_LOCK
, LSA_BLOCK
, LS_LCK_MTX_EXT_LOCK_BLOCK
, DTRACE_IDNONE
},
92 { LS_LCK_RW_LOCK_SHARED
, LSR_ACQUIRE
, LS_LCK_RW_LOCK_SHARED_ACQUIRE
, DTRACE_IDNONE
},
93 { LS_LCK_RW_LOCK_SHARED
, LSR_BLOCK
, LS_LCK_RW_LOCK_SHARED_BLOCK
, DTRACE_IDNONE
},
94 { LS_LCK_RW_LOCK_SHARED
, LSR_SPIN
, LS_LCK_RW_LOCK_SHARED_SPIN
, DTRACE_IDNONE
},
95 { LS_LCK_RW_LOCK_EXCL
, LSR_ACQUIRE
, LS_LCK_RW_LOCK_EXCL_ACQUIRE
, DTRACE_IDNONE
},
96 { LS_LCK_RW_LOCK_EXCL
, LSR_BLOCK
, LS_LCK_RW_LOCK_EXCL_BLOCK
, DTRACE_IDNONE
},
97 { LS_LCK_RW_LOCK_EXCL
, LSR_SPIN
, LS_LCK_RW_LOCK_EXCL_SPIN
, DTRACE_IDNONE
},
98 { LS_LCK_RW_DONE
, LSR_RELEASE
, LS_LCK_RW_DONE_RELEASE
, DTRACE_IDNONE
},
99 { LS_LCK_RW_TRY_LOCK_SHARED
, LSR_ACQUIRE
, LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE
, DTRACE_IDNONE
},
100 { LS_LCK_RW_TRY_LOCK_EXCL
, LSR_ACQUIRE
, LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE
, DTRACE_IDNONE
},
101 { LS_LCK_RW_LOCK_SHARED_TO_EXCL
, LSR_UPGRADE
, LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE
, DTRACE_IDNONE
},
102 { LS_LCK_RW_LOCK_SHARED_TO_EXCL
, LSR_BLOCK
, LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK
, DTRACE_IDNONE
},
103 { LS_LCK_RW_LOCK_SHARED_TO_EXCL
, LSR_SPIN
, LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN
, DTRACE_IDNONE
},
104 { LS_LCK_RW_LOCK_EXCL_TO_SHARED
, LSR_DOWNGRADE
, LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE
, DTRACE_IDNONE
},
108 /* Interlock and spinlock measurements would be nice, but later */
109 { LS_LCK_SPIN_LOCK
, LSS_ACQUIRE
, LS_LCK_SPIN_LOCK_ACQUIRE
, DTRACE_IDNONE
},
110 { LS_LCK_SPIN_LOCK
, LSS_SPIN
, LS_LCK_SPIN_LOCK_SPIN
, DTRACE_IDNONE
},
111 { LS_LCK_SPIN_UNLOCK
, LSS_RELEASE
, LS_LCK_SPIN_UNLOCK_RELEASE
, DTRACE_IDNONE
},
113 { LS_LCK_RW_LOCK_EXCL_TO_SHARED
, LSA_ILK_SPIN
, LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN
, DTRACE_IDNONE
},
114 { LS_LCK_MTX_LOCK
, LSA_ILK_SPIN
, LS_LCK_MTX_LOCK_ILK_SPIN
, DTRACE_IDNONE
},
115 { LS_LCK_MTX_EXT_LOCK
, LSA_ILK_SPIN
, LS_LCK_MTX_EXT_LOCK_ILK_SPIN
, DTRACE_IDNONE
},
116 { LS_LCK_RW_TRY_LOCK_EXCL
, LSA_ILK_SPIN
, LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN
, DTRACE_IDNONE
},
117 { LS_LCK_RW_TRY_LOCK_SHARED
, LSA_SPIN
, LS_LCK_RW_TRY_LOCK_SHARED_SPIN
, DTRACE_IDNONE
},
123 dtrace_id_t lockstat_probemap
[LS_NPROBES
];
126 extern void lck_mtx_lock_lockstat_patch_point(void);
127 extern void lck_mtx_try_lock_lockstat_patch_point(void);
128 extern void lck_mtx_try_lock_spin_lockstat_patch_point(void);
129 extern void lck_mtx_unlock_lockstat_patch_point(void);
130 extern void lck_mtx_lock_ext_lockstat_patch_point(void);
131 extern void lck_mtx_ext_unlock_lockstat_patch_point(void);
133 extern void lck_rw_lock_shared_lockstat_patch_point(void);
134 extern void lck_rw_lock_exclusive_lockstat_patch_point(void);
135 extern void lck_rw_lock_shared_to_exclusive_lockstat_patch_point(void);
136 extern void lck_rw_try_lock_shared_lockstat_patch_point(void);
137 extern void lck_rw_try_lock_exclusive_lockstat_patch_point(void);
138 extern void lck_mtx_lock_spin_lockstat_patch_point(void);
139 #endif /* CONFIG_DTRACE */
141 vm_offset_t
*assembly_probes
[] = {
143 #if defined(__i386__) || defined(__x86_64__)
145 * On x86 these points are better done via hot patches, which ensure
146 * there is zero overhead when not in use. On x86 these patch points
147 * are swapped between the return instruction and a no-op, with the
148 * Dtrace call following the return.
150 (vm_offset_t
*) lck_mtx_lock_lockstat_patch_point
,
151 (vm_offset_t
*) lck_mtx_try_lock_lockstat_patch_point
,
152 (vm_offset_t
*) lck_mtx_try_lock_spin_lockstat_patch_point
,
153 (vm_offset_t
*) lck_mtx_unlock_lockstat_patch_point
,
154 (vm_offset_t
*) lck_mtx_lock_ext_lockstat_patch_point
,
155 (vm_offset_t
*) lck_mtx_ext_unlock_lockstat_patch_point
,
156 (vm_offset_t
*) lck_rw_lock_shared_lockstat_patch_point
,
157 (vm_offset_t
*) lck_rw_lock_exclusive_lockstat_patch_point
,
158 (vm_offset_t
*) lck_rw_lock_shared_to_exclusive_lockstat_patch_point
,
159 (vm_offset_t
*) lck_rw_try_lock_shared_lockstat_patch_point
,
160 (vm_offset_t
*) lck_rw_try_lock_exclusive_lockstat_patch_point
,
161 (vm_offset_t
*) lck_mtx_lock_spin_lockstat_patch_point
,
163 (vm_offset_t
*) lck_mtx_unlock_lockstat_patch_point
,
165 #endif /* CONFIG_DTRACE */
169 * Hot patch switches back and forth the probe points between NOP and RET.
170 * The argument indicates whether the probe point is on or off.
172 #if defined(__APPLE__)
174 #endif /* __APPLE__ */
175 void lockstat_hot_patch(boolean_t active
)
177 #pragma unused(active)
181 for (i
= 0; assembly_probes
[i
]; i
++) {
182 #if defined(__i386__) || defined(__x86_64__)
184 instr
= (active
? NOP
: RET
);
185 (void) ml_nofault_copy( (vm_offset_t
)&instr
, *(assembly_probes
[i
]),
193 void (*lockstat_probe
)(dtrace_id_t
, uint64_t, uint64_t,
194 uint64_t, uint64_t, uint64_t);
196 static dev_info_t
*lockstat_devi
; /* saved in xxattach() for xxinfo() */
197 static dtrace_provider_id_t lockstat_id
;
201 lockstat_enable(void *arg
, dtrace_id_t id
, void *parg
)
203 #pragma unused(arg) /* __APPLE__ */
205 lockstat_probe_t
*probe
= parg
;
207 ASSERT(!lockstat_probemap
[probe
->lsp_probe
]);
209 lockstat_probemap
[probe
->lsp_probe
] = id
;
212 lockstat_hot_patch(TRUE
);
220 lockstat_disable(void *arg
, dtrace_id_t id
, void *parg
)
222 #pragma unused(arg, id) /* __APPLE__ */
224 lockstat_probe_t
*probe
= parg
;
227 ASSERT(lockstat_probemap
[probe
->lsp_probe
]);
229 lockstat_probemap
[probe
->lsp_probe
] = 0;
230 lockstat_hot_patch(FALSE
);
234 * See if we have any probes left enabled.
236 for (i
= 0; i
< LS_NPROBES
; i
++) {
237 if (lockstat_probemap
[i
]) {
239 * This probe is still enabled. We don't need to deal
240 * with waiting for all threads to be out of the
241 * lockstat critical sections; just return.
251 lockstat_provide(void *arg
, const dtrace_probedesc_t
*desc
)
253 #pragma unused(arg, desc) /* __APPLE__ */
257 for (i
= 0; lockstat_probes
[i
].lsp_func
!= NULL
; i
++) {
258 lockstat_probe_t
*probe
= &lockstat_probes
[i
];
260 if (dtrace_probe_lookup(lockstat_id
, "mach_kernel",
261 probe
->lsp_func
, probe
->lsp_name
) != 0)
264 ASSERT(!probe
->lsp_id
);
265 probe
->lsp_id
= dtrace_probe_create(lockstat_id
,
266 "mach_kernel", probe
->lsp_func
, probe
->lsp_name
,
267 LOCKSTAT_AFRAMES
, probe
);
274 lockstat_destroy(void *arg
, dtrace_id_t id
, void *parg
)
276 #pragma unused(arg, id) /* __APPLE__ */
278 lockstat_probe_t
*probe
= parg
;
280 ASSERT(!lockstat_probemap
[probe
->lsp_probe
]);
284 static dtrace_pattr_t lockstat_attr
= {
285 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_COMMON
},
286 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
287 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
288 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_COMMON
},
289 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_COMMON
},
292 static dtrace_pops_t lockstat_pops
= {
306 lockstat_attach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
312 return (DDI_SUCCESS
);
314 return (DDI_FAILURE
);
317 if (ddi_create_minor_node(devi
, "lockstat", S_IFCHR
, 0,
318 DDI_PSEUDO
, 0) == DDI_FAILURE
||
319 dtrace_register("lockstat", &lockstat_attr
, DTRACE_PRIV_KERNEL
,
320 NULL
, &lockstat_pops
, NULL
, &lockstat_id
) != 0) {
321 ddi_remove_minor_node(devi
, NULL
);
322 return (DDI_FAILURE
);
325 lockstat_probe
= dtrace_probe
;
328 ddi_report_dev(devi
);
329 lockstat_devi
= devi
;
330 return (DDI_SUCCESS
);
333 d_open_t _lockstat_open
;
335 int _lockstat_open(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
337 #pragma unused(dev,flags,devtype,p)
341 #define LOCKSTAT_MAJOR -24 /* let the kernel pick the device number */
344 * A struct describing which functions will get invoked for certain
347 static struct cdevsw lockstat_cdevsw
=
349 _lockstat_open
, /* open */
350 eno_opcl
, /* close */
351 eno_rdwrt
, /* read */
352 eno_rdwrt
, /* write */
353 eno_ioctl
, /* ioctl */
354 (stop_fcn_t
*)nulldev
, /* stop */
355 (reset_fcn_t
*)nulldev
, /* reset */
357 eno_select
, /* select */
359 eno_strat
, /* strategy */
365 static int gLockstatInited
= 0;
367 void lockstat_init( void );
369 void lockstat_init( void )
371 if (0 == gLockstatInited
)
373 int majdevno
= cdevsw_add(LOCKSTAT_MAJOR
, &lockstat_cdevsw
);
376 printf("lockstat_init: failed to allocate a major number!\n");
381 lockstat_attach( (dev_info_t
*)(uintptr_t)majdevno
, DDI_ATTACH
);
384 panic("lockstat_init: called twice!\n");
386 #undef LOCKSTAT_MAJOR