2 * Copyright (c) 2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <sys/ioctl.h>
31 #include <miscfs/devfs/devfs.h>
33 #include <sys/systm.h>
34 #include <sys/dtrace.h>
35 #include <sys/dtrace_impl.h>
36 #include <kern/lock_group.h>
37 #include <kern/lock_stat.h>
41 #define LP_NODE "lockprof"
43 #define LOCKPROF_AFRAMES 3
44 #define LOCKPROF_LEN 64
46 static dtrace_provider_id_t lockprof_id
;
48 decl_lck_mtx_data(extern, lck_grp_lock
);
49 extern queue_head_t lck_grp_queue
;
50 extern unsigned int lck_grp_cnt
;
52 extern void lck_grp_reference(lck_grp_t
*grp
);
53 extern void lck_grp_deallocate(lck_grp_t
*grp
);
55 #define LOCKPROF_MAX 10000 /* maximum number of lockprof probes */
56 static uint32_t lockprof_count
; /* current number of lockprof probes */
60 * Counts time spent spinning/blocking
64 * Requires LCK_GRP_ATTR_STAT to be set on the lock
65 * group, either via lck_grp_attr_setsta on the lock group,
66 * or globally via the lcks=3 boot-arg
77 {"spin-held-", 0, offsetof(lck_grp_t
, lck_grp_spincnt
), offsetof(lck_grp_stats_t
, lgss_spin_held
)},
78 {"spin-miss-", 0, offsetof(lck_grp_t
, lck_grp_spincnt
), offsetof(lck_grp_stats_t
, lgss_spin_miss
)},
79 {"spin-spin-", TIME_EVENT
, offsetof(lck_grp_t
, lck_grp_spincnt
), offsetof(lck_grp_stats_t
, lgss_spin_spin
)},
80 {"ticket-held-", 0, offsetof(lck_grp_t
, lck_grp_ticketcnt
), offsetof(lck_grp_stats_t
, lgss_ticket_held
)},
81 {"ticket-miss-", 0, offsetof(lck_grp_t
, lck_grp_ticketcnt
), offsetof(lck_grp_stats_t
, lgss_ticket_miss
)},
82 {"ticket-spin-", TIME_EVENT
, offsetof(lck_grp_t
, lck_grp_ticketcnt
), offsetof(lck_grp_stats_t
, lgss_ticket_spin
)},
84 {"adaptive-held-", STAT_NEEDED
, offsetof(lck_grp_t
, lck_grp_mtxcnt
), offsetof(lck_grp_stats_t
, lgss_mtx_held
)},
85 {"adaptive-miss-", STAT_NEEDED
, offsetof(lck_grp_t
, lck_grp_mtxcnt
), offsetof(lck_grp_stats_t
, lgss_mtx_miss
)},
86 {"adaptive-wait-", STAT_NEEDED
, offsetof(lck_grp_t
, lck_grp_mtxcnt
), offsetof(lck_grp_stats_t
, lgss_mtx_wait
)},
87 {"adaptive-direct-wait-", STAT_NEEDED
, offsetof(lck_grp_t
, lck_grp_mtxcnt
), offsetof(lck_grp_stats_t
, lgss_mtx_direct_wait
)},
88 #endif /* HAS_EXT_MUTEXES */
93 * Default defined probes for counting events
95 const static int hold_defaults
[] = {
96 10000 /* 10000 events */
100 * Default defined probes for time events
102 const static struct {
106 } cont_defaults
[] = {
107 {100, "ms", NANOSEC
/ MILLISEC
} /* 100 ms */
110 typedef struct lockprof_probe
{
112 dtrace_id_t lockprof_id
;
113 uint64_t lockprof_limit
;
114 lck_grp_t
*lockprof_grp
;
118 lockprof_invoke(lck_grp_t
*grp
, lck_grp_stat_t
*stat
, uint64_t val
)
120 dtrace_probe(stat
->lgs_probeid
, (uintptr_t)grp
, val
, 0, 0, 0);
124 lockprof_lock_count(lck_grp_t
*grp
, int kind
)
126 return *(int*)((void*)(grp
) + probes
[kind
].count_offset
);
130 probe_create(int kind
, const char *suffix
, const char *grp_name
, uint64_t count
, uint64_t mult
)
132 char name
[LOCKPROF_LEN
];
133 lck_mtx_lock(&lck_grp_lock
);
134 lck_grp_t
*grp
= (lck_grp_t
*)queue_first(&lck_grp_queue
);
135 uint64_t limit
= count
* mult
;
137 if (probes
[kind
].flags
& TIME_EVENT
) {
138 nanoseconds_to_absolutetime(limit
, &limit
);
141 for (unsigned int i
= 0; i
< lck_grp_cnt
; i
++, grp
= (lck_grp_t
*)queue_next((queue_entry_t
)grp
)) {
142 if (!grp_name
|| grp_name
[0] == '\0' || strcmp(grp_name
, grp
->lck_grp_name
) == 0) {
143 snprintf(name
, sizeof(name
), "%s%llu%s", probes
[kind
].prefix
, count
, suffix
?: "");
145 if (dtrace_probe_lookup(lockprof_id
, grp
->lck_grp_name
, NULL
, name
) != 0) {
148 if (lockprof_lock_count(grp
, kind
) == 0) {
151 if ((probes
[kind
].flags
& STAT_NEEDED
) && !(grp
->lck_grp_attr
& LCK_GRP_ATTR_STAT
)) {
154 if (lockprof_count
>= LOCKPROF_MAX
) {
158 lockprof_probe_t
*probe
= kmem_zalloc(sizeof(lockprof_probe_t
), KM_SLEEP
);
159 probe
->lockprof_kind
= kind
;
160 probe
->lockprof_limit
= limit
;
161 probe
->lockprof_grp
= grp
;
163 lck_grp_reference(grp
);
165 probe
->lockprof_id
= dtrace_probe_create(lockprof_id
, grp
->lck_grp_name
, NULL
, name
,
166 LOCKPROF_AFRAMES
, probe
);
171 lck_mtx_unlock(&lck_grp_lock
);
175 lockprof_provide(void *arg
, const dtrace_probedesc_t
*desc
)
178 size_t event_id
, i
, j
, len
;
181 for (i
= 0; i
< sizeof(hold_defaults
) / sizeof(hold_defaults
[0]); i
++) {
182 for (j
= 0; probes
[j
].prefix
!= NULL
; j
++) {
183 if (!(probes
[j
].flags
& TIME_EVENT
)) {
184 probe_create(j
, NULL
, NULL
, hold_defaults
[i
], 1);
188 for (i
= 0; i
< sizeof(cont_defaults
) / sizeof(cont_defaults
[0]); i
++) {
189 for (j
= 0; probes
[j
].prefix
!= NULL
; j
++) {
190 if (probes
[j
].flags
& TIME_EVENT
) {
191 probe_create(j
, cont_defaults
[i
].suffix
, NULL
, cont_defaults
[i
].time
, cont_defaults
[i
].mult
);
198 const char *name
, *suffix
= NULL
;
199 hrtime_t val
= 0, mult
= 1;
205 { "us", NANOSEC
/ MICROSEC
},
206 { "usec", NANOSEC
/ MICROSEC
},
207 { "ms", NANOSEC
/ MILLISEC
},
208 { "msec", NANOSEC
/ MILLISEC
},
209 { "s", NANOSEC
/ SEC
},
210 { "sec", NANOSEC
/ SEC
},
214 name
= desc
->dtpd_name
;
216 for (event_id
= 0; probes
[event_id
].prefix
!= NULL
; event_id
++) {
217 len
= strlen(probes
[event_id
].prefix
);
219 if (strncmp(name
, probes
[event_id
].prefix
, len
) != 0) {
225 if (probes
[event_id
].prefix
== NULL
) {
231 * We need to start before any time suffix.
233 for (i
= strlen(name
); i
>= len
; i
--) {
234 if (name
[i
] >= '0' && name
[i
] <= '9') {
241 * Now determine the numerical value present in the probe name.
243 for (uint64_t m
= 1; i
>= len
; i
--) {
244 if (name
[i
] < '0' || name
[i
] > '9') {
248 val
+= (name
[i
] - '0') * m
;
256 if (probes
[event_id
].flags
& TIME_EVENT
) {
257 for (i
= 0, mult
= 0; suffixes
[i
].name
!= NULL
; i
++) {
258 if (strncasecmp(suffixes
[i
].name
, suffix
, strlen(suffixes
[i
].name
) + 1) == 0) {
259 mult
= suffixes
[i
].mult
;
263 if (suffixes
[i
].name
== NULL
) {
266 } else if (*suffix
!= '\0') {
270 probe_create(event_id
, suffix
, desc
->dtpd_mod
, val
, mult
);
274 static lck_grp_stat_t
*
275 lockprof_stat(lck_grp_t
*grp
, int kind
)
277 return (lck_grp_stat_t
*)((void*)&grp
->lck_grp_stats
+ probes
[kind
].stat_offset
);
281 lockprof_enable(void *arg
, dtrace_id_t id
, void *parg
)
283 #pragma unused(arg, id, parg)
284 lockprof_probe_t
*probe
= (lockprof_probe_t
*)parg
;
285 lck_grp_t
*grp
= probe
->lockprof_grp
;
286 lck_grp_stat_t
*stat
;
292 if ((stat
= lockprof_stat(grp
, probe
->lockprof_kind
)) == NULL
) {
297 * lockprof_enable/disable are called with
300 if (stat
->lgs_limit
!= 0) {
304 stat
->lgs_limit
= probe
->lockprof_limit
;
305 stat
->lgs_enablings
++;
306 stat
->lgs_probeid
= probe
->lockprof_id
;
312 lockprof_disable(void *arg
, dtrace_id_t id
, void *parg
)
314 #pragma unused(arg, id)
315 lockprof_probe_t
*probe
= (lockprof_probe_t
*)parg
;
316 lck_grp_t
*grp
= probe
->lockprof_grp
;
317 lck_grp_stat_t
*stat
;
323 if ((stat
= lockprof_stat(grp
, probe
->lockprof_kind
)) == NULL
) {
327 if (stat
->lgs_limit
== 0 || stat
->lgs_enablings
== 0) {
332 stat
->lgs_enablings
--;
333 stat
->lgs_probeid
= 0;
337 lockprof_destroy(void *arg
, dtrace_id_t id
, void *parg
)
339 #pragma unused(arg, id)
340 lockprof_probe_t
*probe
= (lockprof_probe_t
*)parg
;
341 lck_grp_deallocate(probe
->lockprof_grp
);
342 kmem_free(probe
, sizeof(lockprof_probe_t
));
347 lockprof_getargdesc(void *arg
, dtrace_id_t id
, void *parg
, dtrace_argdesc_t
*desc
)
349 #pragma unused(arg, id, parg)
350 const char *argdesc
= NULL
;
351 switch (desc
->dtargd_ndx
) {
353 argdesc
= "lck_grp_t*";
356 argdesc
= "uint64_t";
361 strlcpy(desc
->dtargd_native
, argdesc
, DTRACE_ARGTYPELEN
);
363 desc
->dtargd_ndx
= DTRACE_ARGNONE
;
366 static dtrace_pattr_t lockprof_attr
= {
367 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_COMMON
},
368 { DTRACE_STABILITY_UNSTABLE
, DTRACE_STABILITY_UNSTABLE
, DTRACE_CLASS_UNKNOWN
},
369 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
370 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_COMMON
},
371 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_COMMON
},
374 static dtrace_pops_t lockprof_pops
= {
375 .dtps_provide
= lockprof_provide
,
376 .dtps_provide_module
= NULL
,
377 .dtps_enable
= lockprof_enable
,
378 .dtps_disable
= lockprof_disable
,
379 .dtps_suspend
= NULL
,
381 .dtps_getargdesc
= lockprof_getargdesc
,
382 .dtps_getargval
= NULL
,
383 .dtps_usermode
= NULL
,
384 .dtps_destroy
= lockprof_destroy
388 _lockprof_open(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
390 #pragma unused(dev,flags,devtype,p)
394 static const struct cdevsw lockprof_cdevsw
=
396 .d_open
= _lockprof_open
,
399 .d_write
= eno_rdwrt
,
400 .d_ioctl
= eno_ioctl
,
401 .d_stop
= (stop_fcn_t
*)nulldev
,
402 .d_reset
= (reset_fcn_t
*)nulldev
,
403 .d_select
= eno_select
,
405 .d_strategy
= eno_strat
,
406 .d_reserved_1
= eno_getc
,
407 .d_reserved_2
= eno_putc
,
411 #endif /* LOCK_STATS */
412 void lockprof_init(void);
417 int majorno
= cdevsw_add(-1, &lockprof_cdevsw
);
420 panic("dtrace: failed to allocate a major number");
424 if (dtrace_register(LP_NODE
, &lockprof_attr
, DTRACE_PRIV_KERNEL
,
425 NULL
, &lockprof_pops
, NULL
, &lockprof_id
) != 0) {
426 panic("dtrace: failed to register lockprof provider");
429 dev_t dev
= makedev(majorno
, 0);
431 if (devfs_make_node( dev
, DEVFS_CHAR
, UID_ROOT
, GID_WHEEL
, 0666,
432 LP_NODE
, 0 ) == NULL
) {
433 panic("dtrace: devfs_make_node failed for lockprof");
436 #endif /* LOCK_STATS */