4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* #pragma ident "@(#)lockstat.c 1.12 08/01/16 SMI" */
31 #define _KERNEL /* Solaris vs. Darwin */
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/errno.h>
39 #include <sys/ioctl.h>
41 #include <sys/fcntl.h>
42 #include <miscfs/devfs/devfs.h>
44 #include <sys/dtrace.h>
45 #include <sys/dtrace_impl.h>
47 #include <sys/dtrace_glue.h>
49 #include <sys/lockstat.h>
51 #include <kern/processor.h>
53 #define membar_producer dtrace_membar_producer
56 * Hot patch values, x86
58 #if defined(__i386__) || defined(__x86_64__)
61 #define LOCKSTAT_AFRAMES 1
63 #error "not ported to this architecture"
66 typedef struct lockstat_probe
{
73 lockstat_probe_t lockstat_probes
[] =
75 #if defined(__i386__) || defined(__x86_64__)
76 /* Only provide implemented probes for each architecture */
77 { LS_LCK_MTX_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_LOCK_ACQUIRE
, DTRACE_IDNONE
},
78 { LS_LCK_MTX_LOCK
, LSA_SPIN
, LS_LCK_MTX_LOCK_SPIN
, DTRACE_IDNONE
},
79 { LS_LCK_MTX_LOCK
, LSA_BLOCK
, LS_LCK_MTX_LOCK_BLOCK
, DTRACE_IDNONE
},
80 { LS_LCK_MTX_TRY_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_TRY_LOCK_ACQUIRE
, DTRACE_IDNONE
},
81 { LS_LCK_MTX_TRY_SPIN_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE
, DTRACE_IDNONE
},
82 { LS_LCK_MTX_UNLOCK
, LSA_RELEASE
, LS_LCK_MTX_UNLOCK_RELEASE
, DTRACE_IDNONE
},
83 { LS_LCK_MTX_EXT_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_EXT_LOCK_ACQUIRE
, DTRACE_IDNONE
},
84 { LS_LCK_MTX_EXT_LOCK
, LSA_SPIN
, LS_LCK_MTX_EXT_LOCK_SPIN
, DTRACE_IDNONE
},
85 { LS_LCK_MTX_EXT_LOCK
, LSA_BLOCK
, LS_LCK_MTX_EXT_LOCK_BLOCK
, DTRACE_IDNONE
},
86 // { LS_LCK_MTX_EXT_TRY_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE, DTRACE_IDNONE },
87 { LS_LCK_MTX_EXT_UNLOCK
, LSA_RELEASE
, LS_LCK_MTX_EXT_UNLOCK_RELEASE
, DTRACE_IDNONE
},
88 { LS_LCK_MTX_LOCK_SPIN_LOCK
, LSA_ACQUIRE
, LS_LCK_MTX_LOCK_SPIN_ACQUIRE
, DTRACE_IDNONE
},
89 { LS_LCK_RW_LOCK_SHARED
, LSR_ACQUIRE
, LS_LCK_RW_LOCK_SHARED_ACQUIRE
, DTRACE_IDNONE
},
90 { LS_LCK_RW_LOCK_SHARED
, LSR_BLOCK
, LS_LCK_RW_LOCK_SHARED_BLOCK
, DTRACE_IDNONE
},
91 { LS_LCK_RW_LOCK_SHARED
, LSR_SPIN
, LS_LCK_RW_LOCK_SHARED_SPIN
, DTRACE_IDNONE
},
92 { LS_LCK_RW_LOCK_EXCL
, LSR_ACQUIRE
, LS_LCK_RW_LOCK_EXCL_ACQUIRE
, DTRACE_IDNONE
},
93 { LS_LCK_RW_LOCK_EXCL
, LSR_BLOCK
, LS_LCK_RW_LOCK_EXCL_BLOCK
, DTRACE_IDNONE
},
94 { LS_LCK_RW_LOCK_EXCL
, LSR_SPIN
, LS_LCK_RW_LOCK_EXCL_SPIN
, DTRACE_IDNONE
},
95 { LS_LCK_RW_DONE
, LSR_RELEASE
, LS_LCK_RW_DONE_RELEASE
, DTRACE_IDNONE
},
96 { LS_LCK_RW_TRY_LOCK_SHARED
, LSR_ACQUIRE
, LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE
, DTRACE_IDNONE
},
97 { LS_LCK_RW_TRY_LOCK_EXCL
, LSR_ACQUIRE
, LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE
, DTRACE_IDNONE
},
98 { LS_LCK_RW_LOCK_SHARED_TO_EXCL
, LSR_UPGRADE
, LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE
, DTRACE_IDNONE
},
99 { LS_LCK_RW_LOCK_SHARED_TO_EXCL
, LSR_SPIN
, LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN
, DTRACE_IDNONE
},
100 { LS_LCK_RW_LOCK_SHARED_TO_EXCL
, LSR_BLOCK
, LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK
, DTRACE_IDNONE
},
101 { LS_LCK_RW_LOCK_EXCL_TO_SHARED
, LSR_DOWNGRADE
, LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE
, DTRACE_IDNONE
},
104 /* Interlock and spinlock measurements would be nice, but later */
105 { LS_LCK_SPIN_LOCK
, LSS_ACQUIRE
, LS_LCK_SPIN_LOCK_ACQUIRE
, DTRACE_IDNONE
},
106 { LS_LCK_SPIN_LOCK
, LSS_SPIN
, LS_LCK_SPIN_LOCK_SPIN
, DTRACE_IDNONE
},
107 { LS_LCK_SPIN_UNLOCK
, LSS_RELEASE
, LS_LCK_SPIN_UNLOCK_RELEASE
, DTRACE_IDNONE
},
109 { LS_LCK_RW_LOCK_EXCL_TO_SHARED
, LSA_ILK_SPIN
, LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN
, DTRACE_IDNONE
},
110 { LS_LCK_MTX_LOCK
, LSA_ILK_SPIN
, LS_LCK_MTX_LOCK_ILK_SPIN
, DTRACE_IDNONE
},
111 { LS_LCK_MTX_EXT_LOCK
, LSA_ILK_SPIN
, LS_LCK_MTX_EXT_LOCK_ILK_SPIN
, DTRACE_IDNONE
},
112 { LS_LCK_RW_TRY_LOCK_EXCL
, LSA_ILK_SPIN
, LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN
, DTRACE_IDNONE
},
113 { LS_LCK_RW_TRY_LOCK_SHARED
, LSA_SPIN
, LS_LCK_RW_TRY_LOCK_SHARED_SPIN
, DTRACE_IDNONE
},
119 dtrace_id_t lockstat_probemap
[LS_NPROBES
];
122 extern void lck_mtx_lock_lockstat_patch_point(void);
123 extern void lck_mtx_try_lock_lockstat_patch_point(void);
124 extern void lck_mtx_try_lock_spin_lockstat_patch_point(void);
125 extern void lck_mtx_unlock_lockstat_patch_point(void);
126 extern void lck_mtx_lock_ext_lockstat_patch_point(void);
127 extern void lck_mtx_ext_unlock_lockstat_patch_point(void);
129 extern void lck_rw_done_release1_lockstat_patch_point(void);
130 extern void lck_rw_done_release2_lockstat_patch_point(void);
131 extern void lck_rw_lock_shared_lockstat_patch_point(void);
132 extern void lck_rw_lock_exclusive_lockstat_patch_point(void);
133 extern void lck_rw_lock_shared_to_exclusive_lockstat_patch_point(void);
134 extern void lck_rw_try_lock_shared_lockstat_patch_point(void);
135 extern void lck_rw_try_lock_exclusive_lockstat_patch_point(void);
136 extern void lck_mtx_lock_spin_lockstat_patch_point(void);
137 #endif /* CONFIG_DTRACE */
139 typedef struct lockstat_assembly_probe
{
141 vm_offset_t
* lsap_patch_point
;
142 } lockstat_assembly_probe_t
;
145 lockstat_assembly_probe_t assembly_probes
[] =
148 #if defined(__i386__) || defined(__x86_64__)
150 * On x86 these points are better done via hot patches, which ensure
151 * there is zero overhead when not in use. On x86 these patch points
152 * are swapped between the return instruction and a no-op, with the
153 * Dtrace call following the return.
155 { LS_LCK_MTX_LOCK_ACQUIRE
, (vm_offset_t
*) lck_mtx_lock_lockstat_patch_point
},
156 { LS_LCK_MTX_TRY_LOCK_ACQUIRE
, (vm_offset_t
*) lck_mtx_try_lock_lockstat_patch_point
},
157 { LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE
, (vm_offset_t
*) lck_mtx_try_lock_spin_lockstat_patch_point
},
158 { LS_LCK_MTX_UNLOCK_RELEASE
, (vm_offset_t
*) lck_mtx_unlock_lockstat_patch_point
},
159 { LS_LCK_MTX_EXT_LOCK_ACQUIRE
, (vm_offset_t
*) lck_mtx_lock_ext_lockstat_patch_point
},
160 { LS_LCK_MTX_EXT_UNLOCK_RELEASE
, (vm_offset_t
*) lck_mtx_ext_unlock_lockstat_patch_point
},
161 { LS_LCK_RW_LOCK_SHARED_ACQUIRE
, (vm_offset_t
*) lck_rw_lock_shared_lockstat_patch_point
},
162 { LS_LCK_RW_LOCK_EXCL_ACQUIRE
, (vm_offset_t
*) lck_rw_lock_exclusive_lockstat_patch_point
},
163 { LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE
,(vm_offset_t
*) lck_rw_lock_shared_to_exclusive_lockstat_patch_point
},
164 { LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE
, (vm_offset_t
*) lck_rw_try_lock_shared_lockstat_patch_point
},
165 { LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE
, (vm_offset_t
*) lck_rw_try_lock_exclusive_lockstat_patch_point
},
166 { LS_LCK_MTX_LOCK_SPIN_ACQUIRE
, (vm_offset_t
*) lck_mtx_lock_spin_lockstat_patch_point
},
168 #endif /* CONFIG_DTRACE */
169 { LS_LCK_INVALID
, NULL
}
172 * Hot patch switches back and forth the probe points between NOP and RET.
173 * The active argument indicates whether the probe point will turn on or off.
174 * on == plant a NOP and thus fall through to the probe call
175 * off == plant a RET and thus avoid the probe call completely
176 * The lsap_probe identifies which probe we will patch.
178 #if defined(__APPLE__)
180 void lockstat_hot_patch(boolean_t active
, int ls_probe
)
182 #pragma unused(active)
186 * Loop through entire table, in case there are
187 * multiple patch points per probe.
189 for (i
= 0; assembly_probes
[i
].lsap_patch_point
; i
++) {
190 if (ls_probe
== assembly_probes
[i
].lsap_probe
)
191 #if defined(__i386__) || defined(__x86_64__)
194 instr
= (active
? NOP
: RET
);
195 (void) ml_nofault_copy( (vm_offset_t
)&instr
, *(assembly_probes
[i
].lsap_patch_point
),
201 #endif /* __APPLE__*/
204 void (*lockstat_probe
)(dtrace_id_t
, uint64_t, uint64_t,
205 uint64_t, uint64_t, uint64_t);
207 #if defined(__APPLE__)
208 /* This wrapper is used by arm assembler hot patched probes */
210 lockstat_probe_wrapper(int probe
, uintptr_t lp
, int rwflag
)
213 id
= lockstat_probemap
[probe
];
216 (*lockstat_probe
)(id
, (uintptr_t)lp
, (uint64_t)rwflag
, 0,0,0);
219 #endif /* __APPLE__ */
222 static dev_info_t
*lockstat_devi
; /* saved in xxattach() for xxinfo() */
223 static dtrace_provider_id_t lockstat_id
;
227 lockstat_enable(void *arg
, dtrace_id_t id
, void *parg
)
229 #pragma unused(arg) /* __APPLE__ */
231 lockstat_probe_t
*probe
= parg
;
233 ASSERT(!lockstat_probemap
[probe
->lsp_probe
]);
235 lockstat_probemap
[probe
->lsp_probe
] = id
;
238 lockstat_hot_patch(TRUE
, probe
->lsp_probe
);
246 lockstat_disable(void *arg
, dtrace_id_t id
, void *parg
)
248 #pragma unused(arg, id) /* __APPLE__ */
250 lockstat_probe_t
*probe
= parg
;
253 ASSERT(lockstat_probemap
[probe
->lsp_probe
]);
255 lockstat_probemap
[probe
->lsp_probe
] = 0;
256 lockstat_hot_patch(FALSE
, probe
->lsp_probe
);
260 * See if we have any probes left enabled.
262 for (i
= 0; i
< LS_NPROBES
; i
++) {
263 if (lockstat_probemap
[i
]) {
265 * This probe is still enabled. We don't need to deal
266 * with waiting for all threads to be out of the
267 * lockstat critical sections; just return.
277 lockstat_provide(void *arg
, const dtrace_probedesc_t
*desc
)
279 #pragma unused(arg, desc) /* __APPLE__ */
283 for (i
= 0; lockstat_probes
[i
].lsp_func
!= NULL
; i
++) {
284 lockstat_probe_t
*probe
= &lockstat_probes
[i
];
286 if (dtrace_probe_lookup(lockstat_id
, "mach_kernel",
287 probe
->lsp_func
, probe
->lsp_name
) != 0)
290 ASSERT(!probe
->lsp_id
);
291 probe
->lsp_id
= dtrace_probe_create(lockstat_id
,
292 "mach_kernel", probe
->lsp_func
, probe
->lsp_name
,
293 LOCKSTAT_AFRAMES
, probe
);
300 lockstat_destroy(void *arg
, dtrace_id_t id
, void *parg
)
302 #pragma unused(arg, id) /* __APPLE__ */
304 lockstat_probe_t
*probe
= parg
;
306 ASSERT(!lockstat_probemap
[probe
->lsp_probe
]);
310 static dtrace_pattr_t lockstat_attr
= {
311 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_COMMON
},
312 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
313 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
314 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_COMMON
},
315 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_COMMON
},
318 static dtrace_pops_t lockstat_pops
= {
332 lockstat_attach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
338 return (DDI_SUCCESS
);
340 return (DDI_FAILURE
);
343 if (ddi_create_minor_node(devi
, "lockstat", S_IFCHR
, 0,
344 DDI_PSEUDO
, 0) == DDI_FAILURE
||
345 dtrace_register("lockstat", &lockstat_attr
, DTRACE_PRIV_KERNEL
,
346 NULL
, &lockstat_pops
, NULL
, &lockstat_id
) != 0) {
347 ddi_remove_minor_node(devi
, NULL
);
348 return (DDI_FAILURE
);
351 lockstat_probe
= dtrace_probe
;
354 ddi_report_dev(devi
);
355 lockstat_devi
= devi
;
356 return (DDI_SUCCESS
);
359 d_open_t _lockstat_open
;
361 int _lockstat_open(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
363 #pragma unused(dev,flags,devtype,p)
367 #define LOCKSTAT_MAJOR -24 /* let the kernel pick the device number */
370 * A struct describing which functions will get invoked for certain
373 static struct cdevsw lockstat_cdevsw
=
375 _lockstat_open
, /* open */
376 eno_opcl
, /* close */
377 eno_rdwrt
, /* read */
378 eno_rdwrt
, /* write */
379 eno_ioctl
, /* ioctl */
380 (stop_fcn_t
*)nulldev
, /* stop */
381 (reset_fcn_t
*)nulldev
, /* reset */
383 eno_select
, /* select */
385 eno_strat
, /* strategy */
391 static int gLockstatInited
= 0;
393 void lockstat_init( void );
395 void lockstat_init( void )
397 if (0 == gLockstatInited
)
399 int majdevno
= cdevsw_add(LOCKSTAT_MAJOR
, &lockstat_cdevsw
);
402 printf("lockstat_init: failed to allocate a major number!\n");
407 lockstat_attach( (dev_info_t
*)(uintptr_t)majdevno
, DDI_ATTACH
);
410 panic("lockstat_init: called twice!\n");
412 #undef LOCKSTAT_MAJOR