]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/dtrace/lockstat.c
xnu-2050.24.15.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / lockstat.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /* #pragma ident "@(#)lockstat.c 1.12 08/01/16 SMI" */
27
28
29 #ifdef KERNEL
30 #ifndef _KERNEL
31 #define _KERNEL /* Solaris vs. Darwin */
32 #endif
33 #endif
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/errno.h>
38 #include <sys/stat.h>
39 #include <sys/ioctl.h>
40 #include <sys/conf.h>
41 #include <sys/fcntl.h>
42 #include <miscfs/devfs/devfs.h>
43
44 #include <sys/dtrace.h>
45 #include <sys/dtrace_impl.h>
46
47 #include <sys/dtrace_glue.h>
48
49 #include <sys/lockstat.h>
50
51 #include <kern/processor.h>
52
53 #define membar_producer dtrace_membar_producer
54
55 /*
56 * Hot patch values, x86
57 */
58 #if defined(__i386__) || defined(__x86_64__)
59 #define NOP 0x90
60 #define RET 0xc3
61 #define LOCKSTAT_AFRAMES 1
62 #else
63 #error "not ported to this architecture"
64 #endif
65
66 typedef struct lockstat_probe {
67 const char *lsp_func;
68 const char *lsp_name;
69 int lsp_probe;
70 dtrace_id_t lsp_id;
71 } lockstat_probe_t;
72
73 lockstat_probe_t lockstat_probes[] =
74 {
75 #if defined(__i386__) || defined(__x86_64__)
76 /* Only provide implemented probes for each architecture */
77 { LS_LCK_MTX_LOCK, LSA_ACQUIRE, LS_LCK_MTX_LOCK_ACQUIRE, DTRACE_IDNONE },
78 { LS_LCK_MTX_LOCK, LSA_SPIN, LS_LCK_MTX_LOCK_SPIN, DTRACE_IDNONE },
79 { LS_LCK_MTX_LOCK, LSA_BLOCK, LS_LCK_MTX_LOCK_BLOCK, DTRACE_IDNONE },
80 { LS_LCK_MTX_TRY_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_LOCK_ACQUIRE, DTRACE_IDNONE },
81 { LS_LCK_MTX_TRY_SPIN_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, DTRACE_IDNONE },
82 { LS_LCK_MTX_UNLOCK, LSA_RELEASE, LS_LCK_MTX_UNLOCK_RELEASE, DTRACE_IDNONE },
83 { LS_LCK_MTX_EXT_LOCK, LSA_ACQUIRE, LS_LCK_MTX_EXT_LOCK_ACQUIRE, DTRACE_IDNONE },
84 { LS_LCK_MTX_EXT_LOCK, LSA_SPIN, LS_LCK_MTX_EXT_LOCK_SPIN, DTRACE_IDNONE },
85 { LS_LCK_MTX_EXT_LOCK, LSA_BLOCK, LS_LCK_MTX_EXT_LOCK_BLOCK, DTRACE_IDNONE },
86 // { LS_LCK_MTX_EXT_TRY_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE, DTRACE_IDNONE },
87 { LS_LCK_MTX_EXT_UNLOCK, LSA_RELEASE, LS_LCK_MTX_EXT_UNLOCK_RELEASE, DTRACE_IDNONE },
88 { LS_LCK_MTX_LOCK_SPIN_LOCK, LSA_ACQUIRE, LS_LCK_MTX_LOCK_SPIN_ACQUIRE, DTRACE_IDNONE },
89 { LS_LCK_RW_LOCK_SHARED, LSR_ACQUIRE, LS_LCK_RW_LOCK_SHARED_ACQUIRE, DTRACE_IDNONE },
90 { LS_LCK_RW_LOCK_SHARED, LSR_BLOCK, LS_LCK_RW_LOCK_SHARED_BLOCK, DTRACE_IDNONE },
91 { LS_LCK_RW_LOCK_SHARED, LSR_SPIN, LS_LCK_RW_LOCK_SHARED_SPIN, DTRACE_IDNONE },
92 { LS_LCK_RW_LOCK_EXCL, LSR_ACQUIRE, LS_LCK_RW_LOCK_EXCL_ACQUIRE, DTRACE_IDNONE },
93 { LS_LCK_RW_LOCK_EXCL, LSR_BLOCK, LS_LCK_RW_LOCK_EXCL_BLOCK, DTRACE_IDNONE },
94 { LS_LCK_RW_LOCK_EXCL, LSR_SPIN, LS_LCK_RW_LOCK_EXCL_SPIN, DTRACE_IDNONE },
95 { LS_LCK_RW_DONE, LSR_RELEASE, LS_LCK_RW_DONE_RELEASE, DTRACE_IDNONE },
96 { LS_LCK_RW_TRY_LOCK_SHARED, LSR_ACQUIRE, LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, DTRACE_IDNONE },
97 { LS_LCK_RW_TRY_LOCK_EXCL, LSR_ACQUIRE, LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE, DTRACE_IDNONE },
98 { LS_LCK_RW_LOCK_SHARED_TO_EXCL, LSR_UPGRADE, LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, DTRACE_IDNONE },
99 { LS_LCK_RW_LOCK_SHARED_TO_EXCL, LSR_SPIN, LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN, DTRACE_IDNONE },
100 { LS_LCK_RW_LOCK_SHARED_TO_EXCL, LSR_BLOCK, LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK, DTRACE_IDNONE },
101 { LS_LCK_RW_LOCK_EXCL_TO_SHARED, LSR_DOWNGRADE, LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE, DTRACE_IDNONE },
102 #endif
103 #ifdef LATER
104 /* Interlock and spinlock measurements would be nice, but later */
105 { LS_LCK_SPIN_LOCK, LSS_ACQUIRE, LS_LCK_SPIN_LOCK_ACQUIRE, DTRACE_IDNONE },
106 { LS_LCK_SPIN_LOCK, LSS_SPIN, LS_LCK_SPIN_LOCK_SPIN, DTRACE_IDNONE },
107 { LS_LCK_SPIN_UNLOCK, LSS_RELEASE, LS_LCK_SPIN_UNLOCK_RELEASE, DTRACE_IDNONE },
108
109 { LS_LCK_RW_LOCK_EXCL_TO_SHARED, LSA_ILK_SPIN, LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN, DTRACE_IDNONE },
110 { LS_LCK_MTX_LOCK, LSA_ILK_SPIN, LS_LCK_MTX_LOCK_ILK_SPIN, DTRACE_IDNONE },
111 { LS_LCK_MTX_EXT_LOCK, LSA_ILK_SPIN, LS_LCK_MTX_EXT_LOCK_ILK_SPIN, DTRACE_IDNONE },
112 { LS_LCK_RW_TRY_LOCK_EXCL, LSA_ILK_SPIN, LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN, DTRACE_IDNONE },
113 { LS_LCK_RW_TRY_LOCK_SHARED, LSA_SPIN, LS_LCK_RW_TRY_LOCK_SHARED_SPIN, DTRACE_IDNONE },
114 #endif
115
116 { NULL, NULL, 0, 0 }
117 };
118
119 dtrace_id_t lockstat_probemap[LS_NPROBES];
120
121 #if CONFIG_DTRACE
122 extern void lck_mtx_lock_lockstat_patch_point(void);
123 extern void lck_mtx_try_lock_lockstat_patch_point(void);
124 extern void lck_mtx_try_lock_spin_lockstat_patch_point(void);
125 extern void lck_mtx_unlock_lockstat_patch_point(void);
126 extern void lck_mtx_lock_ext_lockstat_patch_point(void);
127 extern void lck_mtx_ext_unlock_lockstat_patch_point(void);
128
129 extern void lck_rw_done_release1_lockstat_patch_point(void);
130 extern void lck_rw_done_release2_lockstat_patch_point(void);
131 extern void lck_rw_lock_shared_lockstat_patch_point(void);
132 extern void lck_rw_lock_exclusive_lockstat_patch_point(void);
133 extern void lck_rw_lock_shared_to_exclusive_lockstat_patch_point(void);
134 extern void lck_rw_try_lock_shared_lockstat_patch_point(void);
135 extern void lck_rw_try_lock_exclusive_lockstat_patch_point(void);
136 extern void lck_mtx_lock_spin_lockstat_patch_point(void);
137 #endif /* CONFIG_DTRACE */
138
139 typedef struct lockstat_assembly_probe {
140 int lsap_probe;
141 vm_offset_t * lsap_patch_point;
142 } lockstat_assembly_probe_t;
143
144
145 lockstat_assembly_probe_t assembly_probes[] =
146 {
147 #if CONFIG_DTRACE
148 #if defined(__i386__) || defined(__x86_64__)
149 /*
150 * On x86 these points are better done via hot patches, which ensure
151 * there is zero overhead when not in use. On x86 these patch points
152 * are swapped between the return instruction and a no-op, with the
153 * Dtrace call following the return.
154 */
155 { LS_LCK_MTX_LOCK_ACQUIRE, (vm_offset_t *) lck_mtx_lock_lockstat_patch_point },
156 { LS_LCK_MTX_TRY_LOCK_ACQUIRE, (vm_offset_t *) lck_mtx_try_lock_lockstat_patch_point },
157 { LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, (vm_offset_t *) lck_mtx_try_lock_spin_lockstat_patch_point },
158 { LS_LCK_MTX_UNLOCK_RELEASE, (vm_offset_t *) lck_mtx_unlock_lockstat_patch_point },
159 { LS_LCK_MTX_EXT_LOCK_ACQUIRE, (vm_offset_t *) lck_mtx_lock_ext_lockstat_patch_point },
160 { LS_LCK_MTX_EXT_UNLOCK_RELEASE, (vm_offset_t *) lck_mtx_ext_unlock_lockstat_patch_point },
161 { LS_LCK_RW_LOCK_SHARED_ACQUIRE, (vm_offset_t *) lck_rw_lock_shared_lockstat_patch_point },
162 { LS_LCK_RW_LOCK_EXCL_ACQUIRE, (vm_offset_t *) lck_rw_lock_exclusive_lockstat_patch_point },
163 { LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE,(vm_offset_t *) lck_rw_lock_shared_to_exclusive_lockstat_patch_point },
164 { LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, (vm_offset_t *) lck_rw_try_lock_shared_lockstat_patch_point },
165 { LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE, (vm_offset_t *) lck_rw_try_lock_exclusive_lockstat_patch_point },
166 { LS_LCK_MTX_LOCK_SPIN_ACQUIRE, (vm_offset_t *) lck_mtx_lock_spin_lockstat_patch_point },
167 #endif
168 #endif /* CONFIG_DTRACE */
169 { LS_LCK_INVALID, NULL }
170 };
171 /*
172 * Hot patch switches back and forth the probe points between NOP and RET.
173 * The active argument indicates whether the probe point will turn on or off.
174 * on == plant a NOP and thus fall through to the probe call
175 * off == plant a RET and thus avoid the probe call completely
176 * The lsap_probe identifies which probe we will patch.
177 */
178 #if defined(__APPLE__)
179 static
180 void lockstat_hot_patch(boolean_t active, int ls_probe)
181 {
182 #pragma unused(active)
183 int i;
184
185 /*
186 * Loop through entire table, in case there are
187 * multiple patch points per probe.
188 */
189 for (i = 0; assembly_probes[i].lsap_patch_point; i++) {
190 if (ls_probe == assembly_probes[i].lsap_probe)
191 #if defined(__i386__) || defined(__x86_64__)
192 {
193 uint8_t instr;
194 instr = (active ? NOP : RET );
195 (void) ml_nofault_copy( (vm_offset_t)&instr, *(assembly_probes[i].lsap_patch_point),
196 sizeof(instr));
197 }
198 #endif
199 } /* for */
200 }
201 #endif /* __APPLE__*/
202
203
204 void (*lockstat_probe)(dtrace_id_t, uint64_t, uint64_t,
205 uint64_t, uint64_t, uint64_t);
206
207 #if defined(__APPLE__)
208 /* This wrapper is used by arm assembler hot patched probes */
209 void
210 lockstat_probe_wrapper(int probe, uintptr_t lp, int rwflag)
211 {
212 dtrace_id_t id;
213 id = lockstat_probemap[probe];
214 if (id != 0)
215 {
216 (*lockstat_probe)(id, (uintptr_t)lp, (uint64_t)rwflag, 0,0,0);
217 }
218 }
219 #endif /* __APPLE__ */
220
221
222 static dev_info_t *lockstat_devi; /* saved in xxattach() for xxinfo() */
223 static dtrace_provider_id_t lockstat_id;
224
225 /*ARGSUSED*/
226 static int
227 lockstat_enable(void *arg, dtrace_id_t id, void *parg)
228 {
229 #pragma unused(arg) /* __APPLE__ */
230
231 lockstat_probe_t *probe = parg;
232
233 ASSERT(!lockstat_probemap[probe->lsp_probe]);
234
235 lockstat_probemap[probe->lsp_probe] = id;
236 membar_producer();
237
238 lockstat_hot_patch(TRUE, probe->lsp_probe);
239 membar_producer();
240 return(0);
241
242 }
243
244 /*ARGSUSED*/
245 static void
246 lockstat_disable(void *arg, dtrace_id_t id, void *parg)
247 {
248 #pragma unused(arg, id) /* __APPLE__ */
249
250 lockstat_probe_t *probe = parg;
251 int i;
252
253 ASSERT(lockstat_probemap[probe->lsp_probe]);
254
255 lockstat_probemap[probe->lsp_probe] = 0;
256 lockstat_hot_patch(FALSE, probe->lsp_probe);
257 membar_producer();
258
259 /*
260 * See if we have any probes left enabled.
261 */
262 for (i = 0; i < LS_NPROBES; i++) {
263 if (lockstat_probemap[i]) {
264 /*
265 * This probe is still enabled. We don't need to deal
266 * with waiting for all threads to be out of the
267 * lockstat critical sections; just return.
268 */
269 return;
270 }
271 }
272
273 }
274
275 /*ARGSUSED*/
276 static void
277 lockstat_provide(void *arg, const dtrace_probedesc_t *desc)
278 {
279 #pragma unused(arg, desc) /* __APPLE__ */
280
281 int i = 0;
282
283 for (i = 0; lockstat_probes[i].lsp_func != NULL; i++) {
284 lockstat_probe_t *probe = &lockstat_probes[i];
285
286 if (dtrace_probe_lookup(lockstat_id, "mach_kernel",
287 probe->lsp_func, probe->lsp_name) != 0)
288 continue;
289
290 ASSERT(!probe->lsp_id);
291 probe->lsp_id = dtrace_probe_create(lockstat_id,
292 "mach_kernel", probe->lsp_func, probe->lsp_name,
293 LOCKSTAT_AFRAMES, probe);
294 }
295 }
296
297
298 /*ARGSUSED*/
299 static void
300 lockstat_destroy(void *arg, dtrace_id_t id, void *parg)
301 {
302 #pragma unused(arg, id) /* __APPLE__ */
303
304 lockstat_probe_t *probe = parg;
305
306 ASSERT(!lockstat_probemap[probe->lsp_probe]);
307 probe->lsp_id = 0;
308 }
309
310 static dtrace_pattr_t lockstat_attr = {
311 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
312 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
313 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
314 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
315 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
316 };
317
318 static dtrace_pops_t lockstat_pops = {
319 lockstat_provide,
320 NULL,
321 lockstat_enable,
322 lockstat_disable,
323 NULL,
324 NULL,
325 NULL,
326 NULL,
327 NULL,
328 lockstat_destroy
329 };
330
331 static int
332 lockstat_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
333 {
334 switch (cmd) {
335 case DDI_ATTACH:
336 break;
337 case DDI_RESUME:
338 return (DDI_SUCCESS);
339 default:
340 return (DDI_FAILURE);
341 }
342
343 if (ddi_create_minor_node(devi, "lockstat", S_IFCHR, 0,
344 DDI_PSEUDO, 0) == DDI_FAILURE ||
345 dtrace_register("lockstat", &lockstat_attr, DTRACE_PRIV_KERNEL,
346 NULL, &lockstat_pops, NULL, &lockstat_id) != 0) {
347 ddi_remove_minor_node(devi, NULL);
348 return (DDI_FAILURE);
349 }
350
351 lockstat_probe = dtrace_probe;
352 membar_producer();
353
354 ddi_report_dev(devi);
355 lockstat_devi = devi;
356 return (DDI_SUCCESS);
357 }
358
359 d_open_t _lockstat_open;
360
361 int _lockstat_open(dev_t dev, int flags, int devtype, struct proc *p)
362 {
363 #pragma unused(dev,flags,devtype,p)
364 return 0;
365 }
366
367 #define LOCKSTAT_MAJOR -24 /* let the kernel pick the device number */
368
369 /*
370 * A struct describing which functions will get invoked for certain
371 * actions.
372 */
373 static struct cdevsw lockstat_cdevsw =
374 {
375 _lockstat_open, /* open */
376 eno_opcl, /* close */
377 eno_rdwrt, /* read */
378 eno_rdwrt, /* write */
379 eno_ioctl, /* ioctl */
380 (stop_fcn_t *)nulldev, /* stop */
381 (reset_fcn_t *)nulldev, /* reset */
382 NULL, /* tty's */
383 eno_select, /* select */
384 eno_mmap, /* mmap */
385 eno_strat, /* strategy */
386 eno_getc, /* getc */
387 eno_putc, /* putc */
388 0 /* type */
389 };
390
391 static int gLockstatInited = 0;
392
393 void lockstat_init( void );
394
395 void lockstat_init( void )
396 {
397 if (0 == gLockstatInited)
398 {
399 int majdevno = cdevsw_add(LOCKSTAT_MAJOR, &lockstat_cdevsw);
400
401 if (majdevno < 0) {
402 printf("lockstat_init: failed to allocate a major number!\n");
403 gLockstatInited = 0;
404 return;
405 }
406
407 lockstat_attach( (dev_info_t *)(uintptr_t)majdevno, DDI_ATTACH );
408 gLockstatInited = 1;
409 } else
410 panic("lockstat_init: called twice!\n");
411 }
412 #undef LOCKSTAT_MAJOR