]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/lock_stat.h
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / lock_stat.h
CommitLineData
0a7de745
A
1/*
2 * Copyright (c) 2018 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#ifndef _KERN_LOCKSTAT_H
29#define _KERN_LOCKSTAT_H
30#include <machine/locks.h>
31#include <machine/atomic.h>
32#include <kern/lock_group.h>
33
34/*
35 * N.B.: On x86, statistics are currently recorded for all indirect mutexes.
36 * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained
37 * as a 64-bit quantity (the new x86 specific statistics are also maintained
38 * as 32-bit quantities).
39 *
40 *
41 * Enable this preprocessor define to record the first miss alone
42 * By default, we count every miss, hence multiple misses may be
43 * recorded for a single lock acquire attempt via lck_mtx_lock
44 */
45#undef LOG_FIRST_MISS_ALONE
46
47/*
48 * This preprocessor define controls whether the R-M-W update of the
49 * per-group statistics elements are atomic (LOCK-prefixed)
50 * Enabled by default.
51 */
52#define ATOMIC_STAT_UPDATES 1
53
54/*
55 * DTrace lockstat probe definitions
56 *
0a7de745 57 */
0a7de745 58
f427ee49
A
59enum lockstat_probe_id {
60 /* Spinlocks */
61 LS_LCK_SPIN_LOCK_ACQUIRE,
62 LS_LCK_SPIN_LOCK_SPIN,
63 LS_LCK_SPIN_UNLOCK_RELEASE,
64
65 /*
66 * Mutexes can also have interlock-spin events, which are
67 * unique to our lock implementation.
68 */
69 LS_LCK_MTX_LOCK_ACQUIRE,
70 LS_LCK_MTX_LOCK_BLOCK,
71 LS_LCK_MTX_LOCK_SPIN,
72 LS_LCK_MTX_LOCK_ILK_SPIN,
73 LS_LCK_MTX_TRY_LOCK_ACQUIRE,
74 LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE,
75 LS_LCK_MTX_UNLOCK_RELEASE,
76 LS_LCK_MTX_LOCK_SPIN_ACQUIRE,
77
78 /*
79 * Provide a parallel set for indirect mutexes
80 */
81 LS_LCK_MTX_EXT_LOCK_ACQUIRE,
82 LS_LCK_MTX_EXT_LOCK_BLOCK,
83 LS_LCK_MTX_EXT_LOCK_SPIN,
84 LS_LCK_MTX_EXT_LOCK_ILK_SPIN,
85 LS_LCK_MTX_EXT_UNLOCK_RELEASE,
86
87 /*
88 * Reader-writer locks support a blocking upgrade primitive, as
89 * well as the possibility of spinning on the interlock.
90 */
91 LS_LCK_RW_LOCK_SHARED_ACQUIRE,
92 LS_LCK_RW_LOCK_SHARED_BLOCK,
93 LS_LCK_RW_LOCK_SHARED_SPIN,
94
95 LS_LCK_RW_LOCK_EXCL_ACQUIRE,
96 LS_LCK_RW_LOCK_EXCL_BLOCK,
97 LS_LCK_RW_LOCK_EXCL_SPIN,
98
99 LS_LCK_RW_DONE_RELEASE,
100
101 LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE,
102 LS_LCK_RW_TRY_LOCK_SHARED_SPIN,
103
104 LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE,
105 LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN,
106
107 LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE,
108 LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN,
109 LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK,
110
111 LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE,
112 LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN,
113
114 /* Ticket lock */
115 LS_LCK_TICKET_LOCK_ACQUIRE,
116 LS_LCK_TICKET_LOCK_RELEASE,
117 LS_LCK_TICKET_LOCK_SPIN,
118
119 LS_NPROBES
120};
0a7de745
A
121
122#if CONFIG_DTRACE
123extern uint32_t lockstat_probemap[LS_NPROBES];
c3c9b80d 124extern void dtrace_probe(uint32_t, uint64_t, uint64_t,
0a7de745
A
125 uint64_t, uint64_t, uint64_t);
126/*
127 * Macros to record lockstat probes.
128 */
129#define LOCKSTAT_RECORD4(probe, lp, arg0, arg1, arg2, arg3) \
c3c9b80d
A
130 { \
131 uint32_t id; \
132 if (__improbable(id = lockstat_probemap[(probe)])) { \
133 dtrace_probe(id, (uintptr_t)(lp), (arg0), \
134 (arg1), (arg2), (arg3)); \
135 } \
0a7de745
A
136 }
137#define LOCKSTAT_RECORD_(probe, lp, arg0, arg1, arg2, arg3, ...) LOCKSTAT_RECORD4(probe, lp, arg0, arg1, arg2, arg3)
138#define LOCKSTAT_RECORD__(probe, lp, arg0, arg1, arg2, arg3, ...) LOCKSTAT_RECORD_(probe, lp, arg0, arg1, arg2, arg3)
139#define LOCKSTAT_RECORD(probe, lp, ...) LOCKSTAT_RECORD__(probe, lp, ##__VA_ARGS__, 0, 0, 0, 0)
140#else
141#define LOCKSTAT_RECORD()
142#endif /* CONFIG_DTRACE */
143
144/*
145 * Time threshold before dtrace lockstat spin
146 * probes are triggered
147 */
148extern uint64_t dtrace_spin_threshold;
149
150#if CONFIG_DTRACE
151void lockprof_invoke(lck_grp_t*, lck_grp_stat_t*, uint64_t);
152#endif /* CONFIG_DTRACE */
153
154static inline void
155lck_grp_stat_enable(lck_grp_stat_t *stat)
156{
157 stat->lgs_enablings++;
158}
159
160static inline void
161lck_grp_stat_disable(lck_grp_stat_t *stat)
162{
163 stat->lgs_enablings--;
164}
165
166#if MACH_KERNEL_PRIVATE
0a7de745
A
167static inline void
168lck_grp_inc_stats(lck_grp_t *grp, lck_grp_stat_t *stat)
169{
f427ee49 170#pragma unused(grp)
0a7de745 171 if (__improbable(stat->lgs_enablings)) {
f427ee49 172#if ATOMIC_STAT_UPDATES
0a7de745 173 uint64_t val = os_atomic_inc_orig(&stat->lgs_count, relaxed);
f427ee49
A
174#else
175 uint64_t val = stat->lgs_count++;
176#endif /* ATOMIC_STAT_UPDATES */
177#if CONFIG_DTRACE && LOCK_STATS
0a7de745
A
178 if (__improbable(stat->lgs_limit && (val % (stat->lgs_limit)) == 0)) {
179 lockprof_invoke(grp, stat, val);
180 }
181#else
182#pragma unused(val)
f427ee49 183#endif /* CONFIG_DTRACE && LOCK_STATS */
0a7de745
A
184 }
185}
186
f427ee49 187#if LOCK_STATS
0a7de745
A
188static inline void
189lck_grp_inc_time_stats(lck_grp_t *grp, lck_grp_stat_t *stat, uint64_t time)
190{
191 if (__improbable(stat->lgs_enablings)) {
192 uint64_t val = os_atomic_add_orig(&stat->lgs_count, time, relaxed);
193#if CONFIG_DTRACE
194 if (__improbable(stat->lgs_limit)) {
195 while (__improbable(time > stat->lgs_limit)) {
196 time -= stat->lgs_limit;
197 lockprof_invoke(grp, stat, val);
198 }
199 if (__improbable(((val % stat->lgs_limit) + time) > stat->lgs_limit)) {
200 lockprof_invoke(grp, stat, val);
201 }
202 }
203#else
204#pragma unused(val)
205#endif /* CONFIG_DTRACE */
206 }
207}
208
209#endif /* LOCK_STATS */
210
211static inline void
212lck_grp_spin_update_held(void *lock LCK_GRP_ARG(lck_grp_t *grp))
213{
214#pragma unused(lock)
215#if CONFIG_DTRACE
216 LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, lock, (uintptr_t)LCK_GRP_PROBEARG(grp));
217#endif
218#if LOCK_STATS
219 if (!grp) {
220 return;
221 }
222 lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_spin_held;
223 lck_grp_inc_stats(grp, stat);
224#endif /* LOCK_STATS */
225}
226
227static inline void
228lck_grp_spin_update_miss(void *lock LCK_GRP_ARG(lck_grp_t *grp))
229{
230#pragma unused(lock)
231#if LOCK_STATS
232 if (!grp) {
233 return;
234 }
235 lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_spin_miss;
236 lck_grp_inc_stats(grp, stat);
237#endif /* LOCK_STATS */
238}
239
240static inline void
241lck_grp_spin_update_spin(void *lock LCK_GRP_ARG(lck_grp_t *grp), uint64_t time)
242{
243#pragma unused(lock, time)
244#if CONFIG_DTRACE
245 if (time > dtrace_spin_threshold) {
246 LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_SPIN, lock, time LCK_GRP_ARG((uintptr_t)grp));
247 }
248#endif /* CONFIG_DTRACE */
249#if LOCK_STATS
250 if (!grp) {
251 return;
252 }
253 lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_spin_spin;
254 lck_grp_inc_time_stats(grp, stat, time);
255#endif /* LOCK_STATS */
256}
257
258static inline boolean_t
259lck_grp_spin_spin_enabled(void *lock LCK_GRP_ARG(lck_grp_t *grp))
260{
261#pragma unused(lock)
262 boolean_t enabled = FALSE;
263#if CONFIG_DTRACE
264 enabled |= lockstat_probemap[LS_LCK_SPIN_LOCK_SPIN] != 0;
265#endif /* CONFIG_DTRACE */
266#if LOCK_STATS
267 enabled |= (grp && grp->lck_grp_stats.lgss_spin_spin.lgs_enablings);
268#endif /* LOCK_STATS */
269 return enabled;
270}
271
f427ee49
A
272static inline void
273lck_grp_ticket_update_held(void *lock LCK_GRP_ARG(lck_grp_t *grp))
0a7de745 274{
f427ee49
A
275#pragma unused(lock)
276#if CONFIG_DTRACE
277 LOCKSTAT_RECORD(LS_LCK_TICKET_LOCK_ACQUIRE, lock, (uintptr_t)LCK_GRP_PROBEARG(grp));
278#endif
279#if LOCK_STATS
280 if (!grp) {
281 return;
282 }
283 lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_ticket_held;
284 lck_grp_inc_stats(grp, stat);
285#endif /* LOCK_STATS */
286}
287
288static inline void
289lck_grp_ticket_update_miss(void *lock LCK_GRP_ARG(lck_grp_t *grp))
290{
291#pragma unused(lock)
292#if LOCK_STATS
293 if (!grp) {
294 return;
295 }
296 lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_ticket_miss;
297 lck_grp_inc_stats(grp, stat);
298#endif /* LOCK_STATS */
299}
300
301static inline boolean_t
302lck_grp_ticket_spin_enabled(void *lock LCK_GRP_ARG(lck_grp_t *grp))
303{
304#pragma unused(lock)
305 boolean_t enabled = FALSE;
306#if CONFIG_DTRACE
307 enabled |= lockstat_probemap[LS_LCK_TICKET_LOCK_SPIN] != 0;
308#endif /* CONFIG_DTRACE */
309#if LOCK_STATS
310 enabled |= (grp && grp->lck_grp_stats.lgss_ticket_spin.lgs_enablings);
311#endif /* LOCK_STATS */
312 return enabled;
313}
314
315static inline void
316lck_grp_ticket_update_spin(void *lock LCK_GRP_ARG(lck_grp_t *grp), uint64_t time)
317{
318#pragma unused(lock, time)
319#if CONFIG_DTRACE
320 if (time > dtrace_spin_threshold) {
321 LOCKSTAT_RECORD(LS_LCK_TICKET_LOCK_SPIN, lock, time LCK_GRP_ARG((uintptr_t)grp));
322 }
323#endif /* CONFIG_DTRACE */
324#if LOCK_STATS
325 if (!grp) {
326 return;
327 }
328 lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_ticket_spin;
329 lck_grp_inc_time_stats(grp, stat, time);
330#endif /* LOCK_STATS */
0a7de745
A
331}
332
f427ee49 333
0a7de745
A
334static void inline
335lck_grp_mtx_update_miss(
336 struct _lck_mtx_ext_ *lock,
337 int *first_miss)
338{
339#pragma unused(first_miss)
340#if LOG_FIRST_MISS_ALONE
341 if ((*first_miss & 1) == 0) {
342#endif /* LOG_FIRST_MISS_ALONE */
f427ee49
A
343 lck_grp_t *grp = lock->lck_mtx_grp;
344 lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_mtx_miss;
345 lck_grp_inc_stats(grp, stat);
0a7de745
A
346
347#if LOG_FIRST_MISS_ALONE
348 *first_miss |= 1;
349}
350#endif /* LOG_FIRST_MISS_ALONE */
351}
352
353static void inline
354lck_grp_mtx_update_direct_wait(
355 struct _lck_mtx_ext_ *lock)
356{
f427ee49
A
357 lck_grp_t *grp = lock->lck_mtx_grp;
358 lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_mtx_direct_wait;
359 lck_grp_inc_stats(grp, stat);
0a7de745
A
360}
361
362static void inline
363lck_grp_mtx_update_wait(
364 struct _lck_mtx_ext_ *lock,
365 int *first_miss)
366{
367#pragma unused(first_miss)
368#if LOG_FIRST_MISS_ALONE
369 if ((*first_miss & 2) == 0) {
370#endif /* LOG_FIRST_MISS_ALONE */
f427ee49
A
371 lck_grp_t *grp = lock->lck_mtx_grp;
372 lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_mtx_wait;
373 lck_grp_inc_stats(grp, stat);
0a7de745
A
374#if LOG_FIRST_MISS_ALONE
375 *first_miss |= 2;
376}
377#endif /* LOG_FIRST_MISS_ALONE */
378}
379
380static void inline
381lck_grp_mtx_update_held(
382 struct _lck_mtx_ext_ *lock)
383{
f427ee49
A
384 lck_grp_t *grp = lock->lck_mtx_grp;
385 lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_mtx_held;
386 lck_grp_inc_stats(grp, stat);
0a7de745 387}
f427ee49 388
0a7de745
A
389#endif /* MACH_KERNEL_PRIVATE */
390#endif /* _KERN_LOCKSTAT_H */