4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* #pragma ident "@(#)lockstat.c 1.11 06/03/24 SMI" */
31 #define _KERNEL /* Solaris vs. Darwin */
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/errno.h>
39 #include <sys/ioctl.h>
41 #include <sys/fcntl.h>
42 #include <miscfs/devfs/devfs.h>
44 #include <sys/dtrace.h>
45 #include <sys/dtrace_impl.h>
47 #include <sys/dtrace_glue.h>
49 #include <sys/lockstat.h>
51 #include <kern/processor.h>
53 #define membar_producer dtrace_membar_producer
56 * Hot patch values, x86
61 #define LOCKSTAT_AFRAMES 1
63 #define NOP 0x60000000
64 #define RET 0x4e800020 /* blr */
65 #define LOCKSTAT_AFRAMES 2
67 #error "not ported to this architecture"
71 typedef struct lockstat_probe
{
78 lockstat_probe_t lockstat_probes
[] =
81 /* Not implemented yet on PPC... */
82 { LS_LCK_MTX_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_LOCK_ACQUIRE
, DTRACE_IDNONE
},
83 { LS_LCK_MTX_LOCK
, LSA_SPIN
, LS_LCK_MTX_LOCK_SPIN
, DTRACE_IDNONE
},
84 { LS_LCK_MTX_TRY_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_TRY_LOCK_ACQUIRE
, DTRACE_IDNONE
},
85 { LS_LCK_MTX_TRY_SPIN_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE
, DTRACE_IDNONE
},
86 { LS_LCK_MTX_UNLOCK
, LSA_RELEASE
, LS_LCK_MTX_UNLOCK_RELEASE
, DTRACE_IDNONE
},
87 { LS_LCK_MTX_EXT_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_EXT_LOCK_ACQUIRE
, DTRACE_IDNONE
},
88 { LS_LCK_MTX_EXT_LOCK
, LSA_SPIN
, LS_LCK_MTX_EXT_LOCK_SPIN
, DTRACE_IDNONE
},
89 { LS_LCK_MTX_EXT_TRY_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE
, DTRACE_IDNONE
},
90 { LS_LCK_MTX_UNLOCK
, LSA_RELEASE
, LS_LCK_MTX_EXT_UNLOCK_RELEASE
, DTRACE_IDNONE
},
91 { LS_MUTEX_LOCK
, LSA_ACQUIRE
, LS_MUTEX_LOCK_ACQUIRE
, DTRACE_IDNONE
},
92 { LS_MUTEX_UNLOCK
, LSA_RELEASE
, LS_MUTEX_UNLOCK_RELEASE
, DTRACE_IDNONE
},
93 { LS_MUTEX_TRY_LOCK
, LSA_ACQUIRE
, LS_MUTEX_TRY_LOCK_ACQUIRE
, DTRACE_IDNONE
},
94 { LS_MUTEX_TRY_SPIN
, LSA_ACQUIRE
, LS_MUTEX_TRY_SPIN_ACQUIRE
, DTRACE_IDNONE
},
95 { LS_MUTEX_LOCK_SPIN
, LSA_ACQUIRE
, LS_MUTEX_LOCK_SPIN_ACQUIRE
, DTRACE_IDNONE
},
97 { LS_LCK_MTX_LOCK
, LSA_BLOCK
, LS_LCK_MTX_LOCK_BLOCK
, DTRACE_IDNONE
},
98 { LS_LCK_MTX_EXT_LOCK
, LSA_BLOCK
, LS_LCK_MTX_EXT_LOCK_BLOCK
, DTRACE_IDNONE
},
100 { LS_MUTEX_CONVERT_SPIN
, LSA_ACQUIRE
, LS_MUTEX_CONVERT_SPIN_ACQUIRE
, DTRACE_IDNONE
},
102 { LS_LCK_RW_LOCK_SHARED
, LSR_ACQUIRE
, LS_LCK_RW_LOCK_SHARED_ACQUIRE
, DTRACE_IDNONE
},
103 { LS_LCK_RW_LOCK_SHARED
, LSR_BLOCK
, LS_LCK_RW_LOCK_SHARED_BLOCK
, DTRACE_IDNONE
},
104 { LS_LCK_RW_LOCK_SHARED
, LSR_SPIN
, LS_LCK_RW_LOCK_SHARED_SPIN
, DTRACE_IDNONE
},
105 { LS_LCK_RW_LOCK_EXCL
, LSR_ACQUIRE
, LS_LCK_RW_LOCK_EXCL_ACQUIRE
, DTRACE_IDNONE
},
106 { LS_LCK_RW_LOCK_EXCL
, LSR_BLOCK
, LS_LCK_RW_LOCK_EXCL_BLOCK
, DTRACE_IDNONE
},
107 { LS_LCK_RW_LOCK_EXCL
, LSR_SPIN
, LS_LCK_RW_LOCK_EXCL_SPIN
, DTRACE_IDNONE
},
108 { LS_LCK_RW_DONE
, LSR_RELEASE
, LS_LCK_RW_DONE_RELEASE
, DTRACE_IDNONE
},
109 { LS_LCK_RW_TRY_LOCK_SHARED
, LSR_ACQUIRE
, LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE
, DTRACE_IDNONE
},
110 { LS_LCK_RW_TRY_LOCK_EXCL
, LSR_ACQUIRE
, LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE
, DTRACE_IDNONE
},
111 { LS_LCK_RW_LOCK_SHARED_TO_EXCL
, LSR_UPGRADE
, LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE
, DTRACE_IDNONE
},
112 { LS_LCK_RW_LOCK_SHARED_TO_EXCL
, LSR_BLOCK
, LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK
, DTRACE_IDNONE
},
113 { LS_LCK_RW_LOCK_SHARED_TO_EXCL
, LSR_SPIN
, LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN
, DTRACE_IDNONE
},
114 { LS_LCK_RW_LOCK_EXCL_TO_SHARED
, LSR_DOWNGRADE
, LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE
, DTRACE_IDNONE
},
118 /* Interlock and spinlock measurements would be nice, but later */
119 { LS_LCK_SPIN_LOCK
, LSS_ACQUIRE
, LS_LCK_SPIN_LOCK_ACQUIRE
, DTRACE_IDNONE
},
120 { LS_LCK_SPIN_LOCK
, LSS_SPIN
, LS_LCK_SPIN_LOCK_SPIN
, DTRACE_IDNONE
},
121 { LS_LCK_SPIN_UNLOCK
, LSS_RELEASE
, LS_LCK_SPIN_UNLOCK_RELEASE
, DTRACE_IDNONE
},
123 { LS_LCK_RW_LOCK_EXCL_TO_SHARED
, LSA_ILK_SPIN
, LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN
, DTRACE_IDNONE
},
124 { LS_LCK_MTX_LOCK
, LSA_ILK_SPIN
, LS_LCK_MTX_LOCK_ILK_SPIN
, DTRACE_IDNONE
},
125 { LS_LCK_MTX_EXT_LOCK
, LSA_ILK_SPIN
, LS_LCK_MTX_EXT_LOCK_ILK_SPIN
, DTRACE_IDNONE
},
126 { LS_LCK_RW_TRY_LOCK_EXCL
, LSA_ILK_SPIN
, LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN
, DTRACE_IDNONE
},
127 { LS_LCK_RW_TRY_LOCK_SHARED
, LSA_SPIN
, LS_LCK_RW_TRY_LOCK_SHARED_SPIN
, DTRACE_IDNONE
},
133 dtrace_id_t lockstat_probemap
[LS_NPROBES
];
135 extern void lck_mtx_lock_lockstat_patch_point();
136 extern void lck_mtx_try_lock_lockstat_patch_point();
137 extern void lck_mtx_try_lock_spin_lockstat_patch_point();
138 extern void lck_mtx_unlock_lockstat_patch_point();
139 extern void lck_mtx_unlock2_lockstat_patch_point();
140 extern void mutex_lock_lockstat_patch_point();
141 extern void mutex_unlock_lockstat_patch_point();
142 extern void mutex_unlock2_lockstat_patch_point();
143 extern void mutex_try_lockstat_patch_point();
144 extern void mutex_try_spin_lockstat_patch_point();
145 extern void mutex_lock_spin_lockstat_patch_point();
146 extern void mutex_convert_spin_lockstat_patch_point();
147 extern void lck_rw_done_lockstat_patch_point();
148 extern void lck_rw_lock_shared_lockstat_patch_point();
149 extern void lck_mtx_lock_ext_lockstat_patch_point();
150 extern void lck_mtx_ext_unlock_lockstat_patch_point();
152 vm_offset_t
*assembly_probes
[] = {
153 #if defined(__i386__)
155 * On x86 these points are better done via hot patches, which ensure
156 * there is zero overhead when not in use. On x86 these patch points
157 * are swapped between the return instruction and a no-op, with the
158 * Dtrace call following the return.
160 (vm_offset_t
*) lck_mtx_lock_lockstat_patch_point
,
161 (vm_offset_t
*) lck_mtx_try_lock_lockstat_patch_point
,
162 (vm_offset_t
*) lck_mtx_try_lock_spin_lockstat_patch_point
,
163 (vm_offset_t
*) lck_mtx_unlock_lockstat_patch_point
,
164 (vm_offset_t
*) lck_mtx_unlock2_lockstat_patch_point
,
165 (vm_offset_t
*) lck_rw_lock_shared_lockstat_patch_point
,
166 (vm_offset_t
*) lck_rw_done_lockstat_patch_point
,
167 (vm_offset_t
*) lck_mtx_lock_ext_lockstat_patch_point
,
168 (vm_offset_t
*) lck_mtx_ext_unlock_lockstat_patch_point
,
169 (vm_offset_t
*) mutex_lock_lockstat_patch_point
,
170 (vm_offset_t
*) mutex_try_spin_lockstat_patch_point
,
171 (vm_offset_t
*) mutex_try_lockstat_patch_point
,
172 (vm_offset_t
*) mutex_unlock_lockstat_patch_point
,
173 (vm_offset_t
*) mutex_unlock2_lockstat_patch_point
,
174 (vm_offset_t
*) mutex_lock_spin_lockstat_patch_point
,
175 (vm_offset_t
*) mutex_convert_spin_lockstat_patch_point
,
177 (vm_offset_t
*) lck_mtx_unlock_lockstat_patch_point
,
181 * Hot patch switches back and forth the probe points between NOP and RET.
182 * The argument indicates whether the probe point is on or off.
184 void lockstat_hot_patch(boolean_t active
)
189 for (i
= 0; assembly_probes
[i
]; i
++) {
192 instr
= (active
? NOP
: RET
);
193 (void) ml_nofault_copy( (vm_offset_t
)&instr
, *(assembly_probes
[i
]),
198 instr
= (active
? NOP
: RET
);
199 (void) ml_nofault_copy( (vm_offset_t
)&instr
, *(assembly_probes
[i
]), sizeof(instr
));
206 void (*lockstat_probe
)(dtrace_id_t
, uint64_t, uint64_t,
207 uint64_t, uint64_t, uint64_t);
210 * An initial value for lockstat_probe. See lockstat_attach(). Think safety.
213 lockstat_stub(dtrace_id_t id
, uint64_t arg0
, uint64_t arg1
,
214 uint64_t arg2
, uint64_t arg3
, uint64_t arg4
)
216 #pragma unused(id,arg0,arg1,arg2,arg3,arg4)
220 static dev_info_t
*lockstat_devi
; /* saved in xxattach() for xxinfo() */
221 static dtrace_provider_id_t lockstat_id
;
225 lockstat_enable(void *arg
, dtrace_id_t id
, void *parg
)
227 lockstat_probe_t
*probe
= parg
;
229 ASSERT(!lockstat_probemap
[probe
->lsp_probe
]);
231 lockstat_probemap
[probe
->lsp_probe
] = id
;
234 lockstat_probe
= dtrace_probe
;
237 lockstat_hot_patch(TRUE
);
244 lockstat_disable(void *arg
, dtrace_id_t id
, void *parg
)
246 lockstat_probe_t
*probe
= parg
;
249 ASSERT(lockstat_probemap
[probe
->lsp_probe
]);
251 lockstat_probemap
[probe
->lsp_probe
] = 0;
252 lockstat_hot_patch(FALSE
);
256 * See if we have any probes left enabled.
258 for (i
= 0; i
< LS_NPROBES
; i
++) {
259 if (lockstat_probemap
[i
]) {
261 * This probe is still enabled. We don't need to deal
262 * with waiting for all threads to be out of the
263 * lockstat critical sections; just return.
273 lockstat_provide(void *arg
, const dtrace_probedesc_t
*desc
)
277 for (i
= 0; lockstat_probes
[i
].lsp_func
!= NULL
; i
++) {
278 lockstat_probe_t
*probe
= &lockstat_probes
[i
];
280 if (dtrace_probe_lookup(lockstat_id
, "mach_kernel",
281 probe
->lsp_func
, probe
->lsp_name
) != 0)
284 ASSERT(!probe
->lsp_id
);
285 probe
->lsp_id
= dtrace_probe_create(lockstat_id
,
286 "mach_kernel", probe
->lsp_func
, probe
->lsp_name
,
287 LOCKSTAT_AFRAMES
, probe
);
294 lockstat_destroy(void *arg
, dtrace_id_t id
, void *parg
)
296 lockstat_probe_t
*probe
= parg
;
298 ASSERT(!lockstat_probemap
[probe
->lsp_probe
]);
302 static dtrace_pattr_t lockstat_attr
= {
303 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_COMMON
},
304 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
305 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
306 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_COMMON
},
307 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_COMMON
},
310 static dtrace_pops_t lockstat_pops
= {
324 lockstat_attach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
330 return (DDI_SUCCESS
);
332 return (DDI_FAILURE
);
335 if (ddi_create_minor_node(devi
, "lockstat", S_IFCHR
, 0,
336 DDI_PSEUDO
, 0) == DDI_FAILURE
||
337 dtrace_register("lockstat", &lockstat_attr
, DTRACE_PRIV_KERNEL
,
338 NULL
, &lockstat_pops
, NULL
, &lockstat_id
) != 0) {
339 ddi_remove_minor_node(devi
, NULL
);
340 return (DDI_FAILURE
);
343 ddi_report_dev(devi
);
344 lockstat_devi
= devi
;
345 lockstat_probe
= lockstat_stub
;
346 return (DDI_SUCCESS
);
349 d_open_t _lockstat_open
;
351 int _lockstat_open(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
353 #pragma unused(dev,flags,devtype,p)
357 #define LOCKSTAT_MAJOR -24 /* let the kernel pick the device number */
360 * A struct describing which functions will get invoked for certain
363 static struct cdevsw lockstat_cdevsw
=
365 _lockstat_open
, /* open */
366 eno_opcl
, /* close */
367 eno_rdwrt
, /* read */
368 eno_rdwrt
, /* write */
369 eno_ioctl
, /* ioctl */
370 (stop_fcn_t
*)nulldev
, /* stop */
371 (reset_fcn_t
*)nulldev
, /* reset */
373 eno_select
, /* select */
375 eno_strat
, /* strategy */
381 static int gLockstatInited
= 0;
383 void lockstat_init( void );
385 void lockstat_init( void )
387 if (0 == gLockstatInited
)
389 int majdevno
= cdevsw_add(LOCKSTAT_MAJOR
, &lockstat_cdevsw
);
392 printf("lockstat_init: failed to allocate a major number!\n");
397 lockstat_attach( (dev_info_t
*)majdevno
, DDI_ATTACH
);
400 panic("lockstat_init: called twice!\n");
402 #undef LOCKSTAT_MAJOR