4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* #pragma ident "@(#)lockstat.c 1.11 06/03/24 SMI" */
31 #define _KERNEL /* Solaris vs. Darwin */
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/errno.h>
39 #include <sys/ioctl.h>
41 #include <sys/fcntl.h>
42 #include <miscfs/devfs/devfs.h>
44 #include <sys/dtrace.h>
45 #include <sys/dtrace_impl.h>
47 #include <sys/dtrace_glue.h>
49 #include <sys/lockstat.h>
51 #include <kern/processor.h>
53 #define membar_producer dtrace_membar_producer
56 * Hot patch values, x86
61 #define LOCKSTAT_AFRAMES 1
63 #define NOP 0x60000000
64 #define RET 0x4e800020 /* blr */
65 #define LOCKSTAT_AFRAMES 2
67 #error "not ported to this architecture"
71 typedef struct lockstat_probe
{
78 lockstat_probe_t lockstat_probes
[] =
81 /* Not implemented yet on PPC... */
82 { LS_LCK_MTX_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_LOCK_ACQUIRE
, DTRACE_IDNONE
},
83 { LS_LCK_MTX_LOCK
, LSA_SPIN
, LS_LCK_MTX_LOCK_SPIN
, DTRACE_IDNONE
},
84 { LS_LCK_MTX_TRY_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_TRY_LOCK_ACQUIRE
, DTRACE_IDNONE
},
85 { LS_LCK_MTX_TRY_SPIN_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE
, DTRACE_IDNONE
},
86 { LS_LCK_MTX_UNLOCK
, LSA_RELEASE
, LS_LCK_MTX_UNLOCK_RELEASE
, DTRACE_IDNONE
},
87 { LS_LCK_MTX_EXT_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_EXT_LOCK_ACQUIRE
, DTRACE_IDNONE
},
88 { LS_LCK_MTX_EXT_LOCK
, LSA_SPIN
, LS_LCK_MTX_EXT_LOCK_SPIN
, DTRACE_IDNONE
},
89 { LS_LCK_MTX_EXT_TRY_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE
, DTRACE_IDNONE
},
90 { LS_LCK_MTX_UNLOCK
, LSA_RELEASE
, LS_LCK_MTX_EXT_UNLOCK_RELEASE
, DTRACE_IDNONE
},
91 { LS_MUTEX_LOCK
, LSA_ACQUIRE
, LS_MUTEX_LOCK_ACQUIRE
, DTRACE_IDNONE
},
92 { LS_MUTEX_UNLOCK
, LSA_RELEASE
, LS_MUTEX_UNLOCK_RELEASE
, DTRACE_IDNONE
},
93 { LS_MUTEX_TRY_LOCK
, LSA_ACQUIRE
, LS_MUTEX_TRY_LOCK_ACQUIRE
, DTRACE_IDNONE
},
94 { LS_MUTEX_TRY_SPIN
, LSA_ACQUIRE
, LS_MUTEX_TRY_SPIN_ACQUIRE
, DTRACE_IDNONE
},
95 { LS_MUTEX_LOCK_SPIN
, LSA_ACQUIRE
, LS_MUTEX_LOCK_SPIN_ACQUIRE
, DTRACE_IDNONE
},
97 { LS_LCK_MTX_LOCK
, LSA_BLOCK
, LS_LCK_MTX_LOCK_BLOCK
, DTRACE_IDNONE
},
98 { LS_LCK_MTX_EXT_LOCK
, LSA_BLOCK
, LS_LCK_MTX_EXT_LOCK_BLOCK
, DTRACE_IDNONE
},
100 { LS_MUTEX_CONVERT_SPIN
, LSA_ACQUIRE
, LS_MUTEX_CONVERT_SPIN_ACQUIRE
, DTRACE_IDNONE
},
102 { LS_LCK_RW_LOCK_SHARED
, LSR_ACQUIRE
, LS_LCK_RW_LOCK_SHARED_ACQUIRE
, DTRACE_IDNONE
},
103 { LS_LCK_RW_LOCK_SHARED
, LSR_BLOCK
, LS_LCK_RW_LOCK_SHARED_BLOCK
, DTRACE_IDNONE
},
104 { LS_LCK_RW_LOCK_SHARED
, LSR_SPIN
, LS_LCK_RW_LOCK_SHARED_SPIN
, DTRACE_IDNONE
},
105 { LS_LCK_RW_LOCK_EXCL
, LSR_ACQUIRE
, LS_LCK_RW_LOCK_EXCL_ACQUIRE
, DTRACE_IDNONE
},
106 { LS_LCK_RW_LOCK_EXCL
, LSR_BLOCK
, LS_LCK_RW_LOCK_EXCL_BLOCK
, DTRACE_IDNONE
},
107 { LS_LCK_RW_LOCK_EXCL
, LSR_SPIN
, LS_LCK_RW_LOCK_EXCL_SPIN
, DTRACE_IDNONE
},
108 { LS_LCK_RW_DONE
, LSR_RELEASE
, LS_LCK_RW_DONE_RELEASE
, DTRACE_IDNONE
},
109 { LS_LCK_RW_TRY_LOCK_SHARED
, LSR_ACQUIRE
, LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE
, DTRACE_IDNONE
},
110 { LS_LCK_RW_TRY_LOCK_EXCL
, LSR_ACQUIRE
, LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE
, DTRACE_IDNONE
},
111 { LS_LCK_RW_LOCK_SHARED_TO_EXCL
, LSR_UPGRADE
, LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE
, DTRACE_IDNONE
},
112 { LS_LCK_RW_LOCK_SHARED_TO_EXCL
, LSR_BLOCK
, LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK
, DTRACE_IDNONE
},
113 { LS_LCK_RW_LOCK_SHARED_TO_EXCL
, LSR_SPIN
, LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN
, DTRACE_IDNONE
},
114 { LS_LCK_RW_LOCK_EXCL_TO_SHARED
, LSR_DOWNGRADE
, LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE
, DTRACE_IDNONE
},
118 /* Interlock and spinlock measurements would be nice, but later */
119 { LS_LCK_SPIN_LOCK
, LSS_ACQUIRE
, LS_LCK_SPIN_LOCK_ACQUIRE
, DTRACE_IDNONE
},
120 { LS_LCK_SPIN_LOCK
, LSS_SPIN
, LS_LCK_SPIN_LOCK_SPIN
, DTRACE_IDNONE
},
121 { LS_LCK_SPIN_UNLOCK
, LSS_RELEASE
, LS_LCK_SPIN_UNLOCK_RELEASE
, DTRACE_IDNONE
},
123 { LS_LCK_RW_LOCK_EXCL_TO_SHARED
, LSA_ILK_SPIN
, LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN
, DTRACE_IDNONE
},
124 { LS_LCK_MTX_LOCK
, LSA_ILK_SPIN
, LS_LCK_MTX_LOCK_ILK_SPIN
, DTRACE_IDNONE
},
125 { LS_LCK_MTX_EXT_LOCK
, LSA_ILK_SPIN
, LS_LCK_MTX_EXT_LOCK_ILK_SPIN
, DTRACE_IDNONE
},
126 { LS_LCK_RW_TRY_LOCK_EXCL
, LSA_ILK_SPIN
, LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN
, DTRACE_IDNONE
},
127 { LS_LCK_RW_TRY_LOCK_SHARED
, LSA_SPIN
, LS_LCK_RW_TRY_LOCK_SHARED_SPIN
, DTRACE_IDNONE
},
133 dtrace_id_t lockstat_probemap
[LS_NPROBES
];
135 extern void lck_mtx_lock_lockstat_patch_point();
136 extern void lck_mtx_try_lock_lockstat_patch_point();
137 extern void lck_mtx_try_lock_spin_lockstat_patch_point();
138 extern void lck_mtx_unlock_lockstat_patch_point();
139 extern void lck_mtx_unlock2_lockstat_patch_point();
140 extern void mutex_lock_lockstat_patch_point();
141 extern void mutex_unlock_lockstat_patch_point();
142 extern void mutex_unlock2_lockstat_patch_point();
143 extern void mutex_try_lockstat_patch_point();
144 extern void mutex_try_spin_lockstat_patch_point();
145 extern void mutex_lock_spin_lockstat_patch_point();
146 extern void mutex_convert_spin_lockstat_patch_point();
147 extern void lck_rw_done_lockstat_patch_point();
148 extern void lck_rw_lock_shared_lockstat_patch_point();
149 extern void lck_mtx_lock_ext_lockstat_patch_point();
150 extern void lck_mtx_ext_unlock_lockstat_patch_point();
152 vm_offset_t
*assembly_probes
[] = {
153 #if defined(__i386__)
155 * On x86 these points are better done via hot patches, which ensure
156 * there is zero overhead when not in use. On x86 these patch points
157 * are swapped between the return instruction and a no-op, with the
158 * Dtrace call following the return.
160 (vm_offset_t
*) lck_mtx_lock_lockstat_patch_point
,
161 (vm_offset_t
*) lck_mtx_try_lock_lockstat_patch_point
,
162 (vm_offset_t
*) lck_mtx_try_lock_spin_lockstat_patch_point
,
163 (vm_offset_t
*) lck_mtx_unlock_lockstat_patch_point
,
164 (vm_offset_t
*) lck_mtx_unlock2_lockstat_patch_point
,
165 (vm_offset_t
*) lck_rw_lock_shared_lockstat_patch_point
,
166 (vm_offset_t
*) lck_rw_done_lockstat_patch_point
,
167 (vm_offset_t
*) lck_mtx_lock_ext_lockstat_patch_point
,
168 (vm_offset_t
*) lck_mtx_ext_unlock_lockstat_patch_point
,
169 (vm_offset_t
*) mutex_lock_lockstat_patch_point
,
170 (vm_offset_t
*) mutex_try_spin_lockstat_patch_point
,
171 (vm_offset_t
*) mutex_try_lockstat_patch_point
,
172 (vm_offset_t
*) mutex_unlock_lockstat_patch_point
,
173 (vm_offset_t
*) mutex_unlock2_lockstat_patch_point
,
174 (vm_offset_t
*) mutex_lock_spin_lockstat_patch_point
,
175 (vm_offset_t
*) mutex_convert_spin_lockstat_patch_point
,
177 (vm_offset_t
*) lck_mtx_unlock_lockstat_patch_point
,
181 * Hot patch switches back and forth the probe points between NOP and RET.
182 * The argument indicates whether the probe point is on or off.
184 void lockstat_hot_patch(boolean_t active
)
186 #pragma unused(active)
190 for (i
= 0; assembly_probes
[i
]; i
++) {
193 instr
= (active
? NOP
: RET
);
194 (void) ml_nofault_copy( (vm_offset_t
)&instr
, *(assembly_probes
[i
]),
199 instr
= (active
? NOP
: RET
);
200 (void) ml_nofault_copy( (vm_offset_t
)&instr
, *(assembly_probes
[i
]), sizeof(instr
));
207 void (*lockstat_probe
)(dtrace_id_t
, uint64_t, uint64_t,
208 uint64_t, uint64_t, uint64_t);
211 * An initial value for lockstat_probe. See lockstat_attach(). Think safety.
214 lockstat_stub(dtrace_id_t id
, uint64_t arg0
, uint64_t arg1
,
215 uint64_t arg2
, uint64_t arg3
, uint64_t arg4
)
217 #pragma unused(id,arg0,arg1,arg2,arg3,arg4)
221 static dev_info_t
*lockstat_devi
; /* saved in xxattach() for xxinfo() */
222 static dtrace_provider_id_t lockstat_id
;
226 lockstat_enable(void *arg
, dtrace_id_t id
, void *parg
)
229 lockstat_probe_t
*probe
= parg
;
231 ASSERT(!lockstat_probemap
[probe
->lsp_probe
]);
233 lockstat_probemap
[probe
->lsp_probe
] = id
;
236 lockstat_probe
= dtrace_probe
;
239 lockstat_hot_patch(TRUE
);
246 lockstat_disable(void *arg
, dtrace_id_t id
, void *parg
)
248 #pragma unused(arg,id)
249 lockstat_probe_t
*probe
= parg
;
252 ASSERT(lockstat_probemap
[probe
->lsp_probe
]);
254 lockstat_probemap
[probe
->lsp_probe
] = 0;
255 lockstat_hot_patch(FALSE
);
259 * See if we have any probes left enabled.
261 for (i
= 0; i
< LS_NPROBES
; i
++) {
262 if (lockstat_probemap
[i
]) {
264 * This probe is still enabled. We don't need to deal
265 * with waiting for all threads to be out of the
266 * lockstat critical sections; just return.
276 lockstat_provide(void *arg
, const dtrace_probedesc_t
*desc
)
278 #pragma unused(arg,desc)
281 for (i
= 0; lockstat_probes
[i
].lsp_func
!= NULL
; i
++) {
282 lockstat_probe_t
*probe
= &lockstat_probes
[i
];
284 if (dtrace_probe_lookup(lockstat_id
, "mach_kernel",
285 probe
->lsp_func
, probe
->lsp_name
) != 0)
288 ASSERT(!probe
->lsp_id
);
289 probe
->lsp_id
= dtrace_probe_create(lockstat_id
,
290 "mach_kernel", probe
->lsp_func
, probe
->lsp_name
,
291 LOCKSTAT_AFRAMES
, probe
);
298 lockstat_destroy(void *arg
, dtrace_id_t id
, void *parg
)
300 #pragma unused(arg,id)
301 lockstat_probe_t
*probe
= parg
;
303 ASSERT(!lockstat_probemap
[probe
->lsp_probe
]);
307 static dtrace_pattr_t lockstat_attr
= {
308 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_COMMON
},
309 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
310 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
311 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_COMMON
},
312 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_COMMON
},
315 static dtrace_pops_t lockstat_pops
= {
329 lockstat_attach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
335 return (DDI_SUCCESS
);
337 return (DDI_FAILURE
);
340 if (ddi_create_minor_node(devi
, "lockstat", S_IFCHR
, 0,
341 DDI_PSEUDO
, 0) == DDI_FAILURE
||
342 dtrace_register("lockstat", &lockstat_attr
, DTRACE_PRIV_KERNEL
,
343 NULL
, &lockstat_pops
, NULL
, &lockstat_id
) != 0) {
344 ddi_remove_minor_node(devi
, NULL
);
345 return (DDI_FAILURE
);
348 ddi_report_dev(devi
);
349 lockstat_devi
= devi
;
350 lockstat_probe
= lockstat_stub
;
351 return (DDI_SUCCESS
);
354 d_open_t _lockstat_open
;
356 int _lockstat_open(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
358 #pragma unused(dev,flags,devtype,p)
362 #define LOCKSTAT_MAJOR -24 /* let the kernel pick the device number */
365 * A struct describing which functions will get invoked for certain
368 static struct cdevsw lockstat_cdevsw
=
370 _lockstat_open
, /* open */
371 eno_opcl
, /* close */
372 eno_rdwrt
, /* read */
373 eno_rdwrt
, /* write */
374 eno_ioctl
, /* ioctl */
375 (stop_fcn_t
*)nulldev
, /* stop */
376 (reset_fcn_t
*)nulldev
, /* reset */
378 eno_select
, /* select */
380 eno_strat
, /* strategy */
386 static int gLockstatInited
= 0;
388 void lockstat_init( void );
390 void lockstat_init( void )
392 if (0 == gLockstatInited
)
394 int majdevno
= cdevsw_add(LOCKSTAT_MAJOR
, &lockstat_cdevsw
);
397 printf("lockstat_init: failed to allocate a major number!\n");
402 lockstat_attach( (dev_info_t
*)majdevno
, DDI_ATTACH
);
405 panic("lockstat_init: called twice!\n");
407 #undef LOCKSTAT_MAJOR