]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/dtrace/lockstat.c
xnu-3789.1.32.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / lockstat.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /* #pragma ident "@(#)lockstat.c 1.12 08/01/16 SMI" */
27
28
29 #ifdef KERNEL
30 #ifndef _KERNEL
31 #define _KERNEL /* Solaris vs. Darwin */
32 #endif
33 #endif
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/errno.h>
38 #include <sys/stat.h>
39 #include <sys/ioctl.h>
40 #include <sys/conf.h>
41 #include <sys/fcntl.h>
42 #include <miscfs/devfs/devfs.h>
43
44 #include <sys/dtrace.h>
45 #include <sys/dtrace_impl.h>
46
47 #include <sys/dtrace_glue.h>
48
49 #include <sys/lockstat.h>
50
51 #include <kern/processor.h>
52
53 #define membar_producer dtrace_membar_producer
54
55 /*
56 * Hot patch values, x86
57 */
58 #if defined(__x86_64__)
59 #define NOP 0x90
60 #define RET 0xc3
61 #define LOCKSTAT_AFRAMES 1
62 #else
63 #error "not ported to this architecture"
64 #endif
65
66 typedef struct lockstat_probe {
67 const char *lsp_func;
68 const char *lsp_name;
69 int lsp_probe;
70 dtrace_id_t lsp_id;
71 } lockstat_probe_t;
72
73 lockstat_probe_t lockstat_probes[] =
74 {
75 #if defined(__x86_64__)
76 /* Only provide implemented probes for each architecture */
77 { LS_LCK_MTX_LOCK, LSA_ACQUIRE, LS_LCK_MTX_LOCK_ACQUIRE, DTRACE_IDNONE },
78 { LS_LCK_MTX_LOCK, LSA_SPIN, LS_LCK_MTX_LOCK_SPIN, DTRACE_IDNONE },
79 { LS_LCK_MTX_LOCK, LSA_BLOCK, LS_LCK_MTX_LOCK_BLOCK, DTRACE_IDNONE },
80 { LS_LCK_MTX_TRY_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_LOCK_ACQUIRE, DTRACE_IDNONE },
81 { LS_LCK_MTX_TRY_SPIN_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, DTRACE_IDNONE },
82 { LS_LCK_MTX_UNLOCK, LSA_RELEASE, LS_LCK_MTX_UNLOCK_RELEASE, DTRACE_IDNONE },
83 { LS_LCK_MTX_EXT_LOCK, LSA_ACQUIRE, LS_LCK_MTX_EXT_LOCK_ACQUIRE, DTRACE_IDNONE },
84 { LS_LCK_MTX_EXT_LOCK, LSA_SPIN, LS_LCK_MTX_EXT_LOCK_SPIN, DTRACE_IDNONE },
85 { LS_LCK_MTX_EXT_LOCK, LSA_BLOCK, LS_LCK_MTX_EXT_LOCK_BLOCK, DTRACE_IDNONE },
86 // { LS_LCK_MTX_EXT_TRY_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE, DTRACE_IDNONE },
87 { LS_LCK_MTX_EXT_UNLOCK, LSA_RELEASE, LS_LCK_MTX_EXT_UNLOCK_RELEASE, DTRACE_IDNONE },
88 { LS_LCK_MTX_LOCK_SPIN_LOCK, LSA_ACQUIRE, LS_LCK_MTX_LOCK_SPIN_ACQUIRE, DTRACE_IDNONE },
89 { LS_LCK_RW_LOCK_SHARED, LSR_ACQUIRE, LS_LCK_RW_LOCK_SHARED_ACQUIRE, DTRACE_IDNONE },
90 { LS_LCK_RW_LOCK_SHARED, LSR_BLOCK, LS_LCK_RW_LOCK_SHARED_BLOCK, DTRACE_IDNONE },
91 { LS_LCK_RW_LOCK_SHARED, LSR_SPIN, LS_LCK_RW_LOCK_SHARED_SPIN, DTRACE_IDNONE },
92 { LS_LCK_RW_LOCK_EXCL, LSR_ACQUIRE, LS_LCK_RW_LOCK_EXCL_ACQUIRE, DTRACE_IDNONE },
93 { LS_LCK_RW_LOCK_EXCL, LSR_BLOCK, LS_LCK_RW_LOCK_EXCL_BLOCK, DTRACE_IDNONE },
94 { LS_LCK_RW_LOCK_EXCL, LSR_SPIN, LS_LCK_RW_LOCK_EXCL_SPIN, DTRACE_IDNONE },
95 { LS_LCK_RW_DONE, LSR_RELEASE, LS_LCK_RW_DONE_RELEASE, DTRACE_IDNONE },
96 { LS_LCK_RW_TRY_LOCK_SHARED, LSR_ACQUIRE, LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, DTRACE_IDNONE },
97 { LS_LCK_RW_TRY_LOCK_EXCL, LSR_ACQUIRE, LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE, DTRACE_IDNONE },
98 { LS_LCK_RW_LOCK_SHARED_TO_EXCL, LSR_UPGRADE, LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, DTRACE_IDNONE },
99 { LS_LCK_RW_LOCK_SHARED_TO_EXCL, LSR_SPIN, LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN, DTRACE_IDNONE },
100 { LS_LCK_RW_LOCK_SHARED_TO_EXCL, LSR_BLOCK, LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK, DTRACE_IDNONE },
101 { LS_LCK_RW_LOCK_EXCL_TO_SHARED, LSR_DOWNGRADE, LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE, DTRACE_IDNONE },
102 #endif
103 #ifdef LATER
104 /* Interlock and spinlock measurements would be nice, but later */
105 { LS_LCK_SPIN_LOCK, LSS_ACQUIRE, LS_LCK_SPIN_LOCK_ACQUIRE, DTRACE_IDNONE },
106 { LS_LCK_SPIN_LOCK, LSS_SPIN, LS_LCK_SPIN_LOCK_SPIN, DTRACE_IDNONE },
107 { LS_LCK_SPIN_UNLOCK, LSS_RELEASE, LS_LCK_SPIN_UNLOCK_RELEASE, DTRACE_IDNONE },
108
109 { LS_LCK_RW_LOCK_EXCL_TO_SHARED, LSA_ILK_SPIN, LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN, DTRACE_IDNONE },
110 { LS_LCK_MTX_LOCK, LSA_ILK_SPIN, LS_LCK_MTX_LOCK_ILK_SPIN, DTRACE_IDNONE },
111 { LS_LCK_MTX_EXT_LOCK, LSA_ILK_SPIN, LS_LCK_MTX_EXT_LOCK_ILK_SPIN, DTRACE_IDNONE },
112 { LS_LCK_RW_TRY_LOCK_EXCL, LSA_ILK_SPIN, LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN, DTRACE_IDNONE },
113 { LS_LCK_RW_TRY_LOCK_SHARED, LSA_SPIN, LS_LCK_RW_TRY_LOCK_SHARED_SPIN, DTRACE_IDNONE },
114 #endif
115
116 { NULL, NULL, 0, 0 }
117 };
118
119 dtrace_id_t lockstat_probemap[LS_NPROBES];
120
121 #if CONFIG_DTRACE
122 #if defined(__x86_64__)
123 extern void lck_mtx_lock_lockstat_patch_point(void);
124 extern void lck_mtx_try_lock_lockstat_patch_point(void);
125 extern void lck_mtx_try_lock_spin_lockstat_patch_point(void);
126 extern void lck_mtx_unlock_lockstat_patch_point(void);
127 extern void lck_mtx_lock_ext_lockstat_patch_point(void);
128 extern void lck_mtx_ext_unlock_lockstat_patch_point(void);
129 extern void lck_rw_lock_shared_lockstat_patch_point(void);
130 extern void lck_rw_lock_exclusive_lockstat_patch_point(void);
131 extern void lck_rw_lock_shared_to_exclusive_lockstat_patch_point(void);
132 extern void lck_rw_try_lock_shared_lockstat_patch_point(void);
133 extern void lck_rw_try_lock_exclusive_lockstat_patch_point(void);
134 extern void lck_mtx_lock_spin_lockstat_patch_point(void);
135 #endif
136
137 #endif /* CONFIG_DTRACE */
138
139 typedef struct lockstat_assembly_probe {
140 int lsap_probe;
141 vm_offset_t * lsap_patch_point;
142 } lockstat_assembly_probe_t;
143
144
145 lockstat_assembly_probe_t assembly_probes[] =
146 {
147 #if CONFIG_DTRACE
148 #if defined(__x86_64__)
149 /*
150 * On x86 these points are better done via hot patches, which ensure
151 * there is zero overhead when not in use. On x86 these patch points
152 * are swapped between the return instruction and a no-op, with the
153 * Dtrace call following the return.
154 */
155 { LS_LCK_MTX_LOCK_ACQUIRE, (vm_offset_t *) lck_mtx_lock_lockstat_patch_point },
156 { LS_LCK_MTX_TRY_LOCK_ACQUIRE, (vm_offset_t *) lck_mtx_try_lock_lockstat_patch_point },
157 { LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, (vm_offset_t *) lck_mtx_try_lock_spin_lockstat_patch_point },
158 { LS_LCK_MTX_UNLOCK_RELEASE, (vm_offset_t *) lck_mtx_unlock_lockstat_patch_point },
159 { LS_LCK_MTX_EXT_LOCK_ACQUIRE, (vm_offset_t *) lck_mtx_lock_ext_lockstat_patch_point },
160 { LS_LCK_MTX_EXT_UNLOCK_RELEASE, (vm_offset_t *) lck_mtx_ext_unlock_lockstat_patch_point },
161 { LS_LCK_RW_LOCK_SHARED_ACQUIRE, (vm_offset_t *) lck_rw_lock_shared_lockstat_patch_point },
162 { LS_LCK_RW_LOCK_EXCL_ACQUIRE, (vm_offset_t *) lck_rw_lock_exclusive_lockstat_patch_point },
163 { LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE,(vm_offset_t *) lck_rw_lock_shared_to_exclusive_lockstat_patch_point },
164 { LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, (vm_offset_t *) lck_rw_try_lock_shared_lockstat_patch_point },
165 { LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE, (vm_offset_t *) lck_rw_try_lock_exclusive_lockstat_patch_point },
166 { LS_LCK_MTX_LOCK_SPIN_ACQUIRE, (vm_offset_t *) lck_mtx_lock_spin_lockstat_patch_point },
167 #endif
168 /* No assembly patch points for ARM */
169 #endif /* CONFIG_DTRACE */
170 { LS_LCK_INVALID, NULL }
171 };
172
173
174 /*
175 * APPLE NOTE:
176 * Hot patch is used to manipulate probe points by swapping between
177 * no-op and return instructions.
178 * The active flag indicates whether the probe point will turn on or off.
179 * on == plant a NOP and thus fall through to the probe call
180 * off == plant a RET and thus avoid the probe call completely
181 * The ls_probe identifies which probe we will patch.
182 */
183 static
184 void lockstat_hot_patch(boolean_t active, int ls_probe)
185 {
186 #pragma unused(active)
187 int i;
188
189 /*
190 * Loop through entire table, in case there are
191 * multiple patch points per probe.
192 */
193 for (i = 0; assembly_probes[i].lsap_patch_point; i++) {
194 if (ls_probe == assembly_probes[i].lsap_probe)
195 #if defined(__x86_64__)
196 {
197 uint8_t instr;
198 instr = (active ? NOP : RET );
199 (void) ml_nofault_copy( (vm_offset_t)&instr, *(assembly_probes[i].lsap_patch_point),
200 sizeof(instr));
201 }
202 #endif
203 } /* for */
204 }
205
206 void (*lockstat_probe)(dtrace_id_t, uint64_t, uint64_t,
207 uint64_t, uint64_t, uint64_t);
208
209
210 /*
211 * APPLE NOTE:
212 * This wrapper is used only by assembler hot patched probes.
213 */
214 void
215 lockstat_probe_wrapper(int probe, uintptr_t lp, int rwflag)
216 {
217 dtrace_id_t id;
218 id = lockstat_probemap[probe];
219 if (id != 0)
220 {
221 (*lockstat_probe)(id, (uintptr_t)lp, (uint64_t)rwflag, 0,0,0);
222 }
223 }
224
225 static dev_info_t *lockstat_devi; /* saved in xxattach() for xxinfo() */
226 static dtrace_provider_id_t lockstat_id;
227
228 /*ARGSUSED*/
229 static int
230 lockstat_enable(void *arg, dtrace_id_t id, void *parg)
231 {
232 #pragma unused(arg) /* __APPLE__ */
233
234 lockstat_probe_t *probe = parg;
235
236 ASSERT(!lockstat_probemap[probe->lsp_probe]);
237
238 lockstat_probemap[probe->lsp_probe] = id;
239 membar_producer();
240
241 lockstat_hot_patch(TRUE, probe->lsp_probe);
242 membar_producer();
243 return(0);
244
245 }
246
247 /*ARGSUSED*/
248 static void
249 lockstat_disable(void *arg, dtrace_id_t id, void *parg)
250 {
251 #pragma unused(arg, id) /* __APPLE__ */
252
253 lockstat_probe_t *probe = parg;
254 int i;
255
256 ASSERT(lockstat_probemap[probe->lsp_probe]);
257
258 lockstat_probemap[probe->lsp_probe] = 0;
259 lockstat_hot_patch(FALSE, probe->lsp_probe);
260 membar_producer();
261
262 /*
263 * See if we have any probes left enabled.
264 */
265 for (i = 0; i < LS_NPROBES; i++) {
266 if (lockstat_probemap[i]) {
267 /*
268 * This probe is still enabled. We don't need to deal
269 * with waiting for all threads to be out of the
270 * lockstat critical sections; just return.
271 */
272 return;
273 }
274 }
275
276 }
277
278 /*ARGSUSED*/
279 static void
280 lockstat_provide(void *arg, const dtrace_probedesc_t *desc)
281 {
282 #pragma unused(arg, desc) /* __APPLE__ */
283
284 int i = 0;
285
286 for (i = 0; lockstat_probes[i].lsp_func != NULL; i++) {
287 lockstat_probe_t *probe = &lockstat_probes[i];
288
289 if (dtrace_probe_lookup(lockstat_id, "mach_kernel",
290 probe->lsp_func, probe->lsp_name) != 0)
291 continue;
292
293 ASSERT(!probe->lsp_id);
294 probe->lsp_id = dtrace_probe_create(lockstat_id,
295 "mach_kernel", probe->lsp_func, probe->lsp_name,
296 LOCKSTAT_AFRAMES, probe);
297 }
298 }
299
300
301 /*ARGSUSED*/
302 static void
303 lockstat_destroy(void *arg, dtrace_id_t id, void *parg)
304 {
305 #pragma unused(arg, id) /* __APPLE__ */
306
307 lockstat_probe_t *probe = parg;
308
309 ASSERT(!lockstat_probemap[probe->lsp_probe]);
310 probe->lsp_id = 0;
311 }
312
313 static dtrace_pattr_t lockstat_attr = {
314 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
315 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
316 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
317 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
318 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
319 };
320
321 static dtrace_pops_t lockstat_pops = {
322 lockstat_provide,
323 NULL,
324 lockstat_enable,
325 lockstat_disable,
326 NULL,
327 NULL,
328 NULL,
329 NULL,
330 NULL,
331 lockstat_destroy
332 };
333
334 static int
335 lockstat_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
336 {
337 switch (cmd) {
338 case DDI_ATTACH:
339 break;
340 case DDI_RESUME:
341 return (DDI_SUCCESS);
342 default:
343 return (DDI_FAILURE);
344 }
345
346 if (ddi_create_minor_node(devi, "lockstat", S_IFCHR, 0,
347 DDI_PSEUDO, 0) == DDI_FAILURE ||
348 dtrace_register("lockstat", &lockstat_attr, DTRACE_PRIV_KERNEL,
349 NULL, &lockstat_pops, NULL, &lockstat_id) != 0) {
350 ddi_remove_minor_node(devi, NULL);
351 return (DDI_FAILURE);
352 }
353
354 lockstat_probe = dtrace_probe;
355 membar_producer();
356
357 ddi_report_dev(devi);
358 lockstat_devi = devi;
359 return (DDI_SUCCESS);
360 }
361
362 d_open_t _lockstat_open;
363
364 int _lockstat_open(dev_t dev, int flags, int devtype, struct proc *p)
365 {
366 #pragma unused(dev,flags,devtype,p)
367 return 0;
368 }
369
370 #define LOCKSTAT_MAJOR -24 /* let the kernel pick the device number */
371
372 /*
373 * A struct describing which functions will get invoked for certain
374 * actions.
375 */
376 static struct cdevsw lockstat_cdevsw =
377 {
378 _lockstat_open, /* open */
379 eno_opcl, /* close */
380 eno_rdwrt, /* read */
381 eno_rdwrt, /* write */
382 eno_ioctl, /* ioctl */
383 (stop_fcn_t *)nulldev, /* stop */
384 (reset_fcn_t *)nulldev, /* reset */
385 NULL, /* tty's */
386 eno_select, /* select */
387 eno_mmap, /* mmap */
388 eno_strat, /* strategy */
389 eno_getc, /* getc */
390 eno_putc, /* putc */
391 0 /* type */
392 };
393
394 static int gLockstatInited = 0;
395
396 void lockstat_init( void );
397
398 void lockstat_init( void )
399 {
400 if (0 == gLockstatInited)
401 {
402 int majdevno = cdevsw_add(LOCKSTAT_MAJOR, &lockstat_cdevsw);
403
404 if (majdevno < 0) {
405 printf("lockstat_init: failed to allocate a major number!\n");
406 gLockstatInited = 0;
407 return;
408 }
409
410 lockstat_attach( (dev_info_t *)(uintptr_t)majdevno, DDI_ATTACH );
411 gLockstatInited = 1;
412 } else
413 panic("lockstat_init: called twice!\n");
414 }
415 #undef LOCKSTAT_MAJOR