]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/lock_stat.h
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / kern / lock_stat.h
CommitLineData
0a7de745
A
1/*
2 * Copyright (c) 2018 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#ifndef _KERN_LOCKSTAT_H
29#define _KERN_LOCKSTAT_H
30#include <machine/locks.h>
31#include <machine/atomic.h>
32#include <kern/lock_group.h>
33
34/*
35 * N.B.: On x86, statistics are currently recorded for all indirect mutexes.
36 * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained
37 * as a 64-bit quantity (the new x86 specific statistics are also maintained
38 * as 32-bit quantities).
39 *
40 *
41 * Enable this preprocessor define to record the first miss alone
42 * By default, we count every miss, hence multiple misses may be
43 * recorded for a single lock acquire attempt via lck_mtx_lock
44 */
45#undef LOG_FIRST_MISS_ALONE
46
47/*
48 * This preprocessor define controls whether the R-M-W update of the
49 * per-group statistics elements are atomic (LOCK-prefixed)
50 * Enabled by default.
51 */
52#define ATOMIC_STAT_UPDATES 1
53
54/*
55 * DTrace lockstat probe definitions
56 *
57 * Spinlocks
58 */
59#define LS_LCK_SPIN_LOCK_ACQUIRE 0
60#define LS_LCK_SPIN_LOCK_SPIN 1
61#define LS_LCK_SPIN_UNLOCK_RELEASE 2
62
63/*
64 * Mutexes can also have interlock-spin events, which are
65 * unique to our lock implementation.
66 */
67#define LS_LCK_MTX_LOCK_ACQUIRE 3
68#define LS_LCK_MTX_LOCK_BLOCK 5
69#define LS_LCK_MTX_LOCK_SPIN 6
70#define LS_LCK_MTX_LOCK_ILK_SPIN 7
71#define LS_LCK_MTX_TRY_LOCK_ACQUIRE 8
72#define LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE 9
73#define LS_LCK_MTX_UNLOCK_RELEASE 10
74
75#define LS_LCK_MTX_LOCK_SPIN_ACQUIRE 39
76/*
77 * Provide a parallel set for indirect mutexes
78 */
79#define LS_LCK_MTX_EXT_LOCK_ACQUIRE 17
80#define LS_LCK_MTX_EXT_LOCK_BLOCK 18
81#define LS_LCK_MTX_EXT_LOCK_SPIN 19
82#define LS_LCK_MTX_EXT_LOCK_ILK_SPIN 20
83#define LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE 21
84#define LS_LCK_MTX_EXT_UNLOCK_RELEASE 22
85
86/*
87 * Reader-writer locks support a blocking upgrade primitive, as
88 * well as the possibility of spinning on the interlock.
89 */
90#define LS_LCK_RW_LOCK_SHARED_ACQUIRE 23
91#define LS_LCK_RW_LOCK_SHARED_BLOCK 24
92#define LS_LCK_RW_LOCK_SHARED_SPIN 25
93
94#define LS_LCK_RW_LOCK_EXCL_ACQUIRE 26
95#define LS_LCK_RW_LOCK_EXCL_BLOCK 27
96#define LS_LCK_RW_LOCK_EXCL_SPIN 28
97
98#define LS_LCK_RW_DONE_RELEASE 29
99
100#define LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE 30
101#define LS_LCK_RW_TRY_LOCK_SHARED_SPIN 31
102
103#define LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE 32
104#define LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN 33
105
106#define LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE 34
107#define LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN 35
108#define LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK 36
109
110#define LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE 37
111#define LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN 38
112
113#define LS_NPROBES 40
114#define LS_LCK_INVALID LS_NPROBES
115
116#if CONFIG_DTRACE
117extern uint32_t lockstat_probemap[LS_NPROBES];
118extern void (*lockstat_probe)(uint32_t, uint64_t, uint64_t,
119 uint64_t, uint64_t, uint64_t);
120/*
121 * Macros to record lockstat probes.
122 */
123#define LOCKSTAT_RECORD4(probe, lp, arg0, arg1, arg2, arg3) \
124 { \
125 uint32_t id; \
126 if (__improbable(id = lockstat_probemap[(probe)])) { \
127 (*lockstat_probe)(id, (uintptr_t)(lp), (arg0), \
128 (arg1), (arg2), (arg3)); \
129 } \
130 }
131#define LOCKSTAT_RECORD_(probe, lp, arg0, arg1, arg2, arg3, ...) LOCKSTAT_RECORD4(probe, lp, arg0, arg1, arg2, arg3)
132#define LOCKSTAT_RECORD__(probe, lp, arg0, arg1, arg2, arg3, ...) LOCKSTAT_RECORD_(probe, lp, arg0, arg1, arg2, arg3)
133#define LOCKSTAT_RECORD(probe, lp, ...) LOCKSTAT_RECORD__(probe, lp, ##__VA_ARGS__, 0, 0, 0, 0)
134#else
135#define LOCKSTAT_RECORD()
136#endif /* CONFIG_DTRACE */
137
138/*
139 * Time threshold before dtrace lockstat spin
140 * probes are triggered
141 */
142extern uint64_t dtrace_spin_threshold;
143
144#if CONFIG_DTRACE
145void lockprof_invoke(lck_grp_t*, lck_grp_stat_t*, uint64_t);
146#endif /* CONFIG_DTRACE */
147
148static inline void
149lck_grp_stat_enable(lck_grp_stat_t *stat)
150{
151 stat->lgs_enablings++;
152}
153
154static inline void
155lck_grp_stat_disable(lck_grp_stat_t *stat)
156{
157 stat->lgs_enablings--;
158}
159
160#if MACH_KERNEL_PRIVATE
161#if LOCK_STATS
162
163static inline void
164lck_grp_inc_stats(lck_grp_t *grp, lck_grp_stat_t *stat)
165{
166 if (__improbable(stat->lgs_enablings)) {
167 uint64_t val = os_atomic_inc_orig(&stat->lgs_count, relaxed);
168#if CONFIG_DTRACE
169 if (__improbable(stat->lgs_limit && (val % (stat->lgs_limit)) == 0)) {
170 lockprof_invoke(grp, stat, val);
171 }
172#else
173#pragma unused(val)
174#endif /* CONFIG_DTRACE */
175 }
176}
177
178static inline void
179lck_grp_inc_time_stats(lck_grp_t *grp, lck_grp_stat_t *stat, uint64_t time)
180{
181 if (__improbable(stat->lgs_enablings)) {
182 uint64_t val = os_atomic_add_orig(&stat->lgs_count, time, relaxed);
183#if CONFIG_DTRACE
184 if (__improbable(stat->lgs_limit)) {
185 while (__improbable(time > stat->lgs_limit)) {
186 time -= stat->lgs_limit;
187 lockprof_invoke(grp, stat, val);
188 }
189 if (__improbable(((val % stat->lgs_limit) + time) > stat->lgs_limit)) {
190 lockprof_invoke(grp, stat, val);
191 }
192 }
193#else
194#pragma unused(val)
195#endif /* CONFIG_DTRACE */
196 }
197}
198
199#endif /* LOCK_STATS */
200
201static inline void
202lck_grp_spin_update_held(void *lock LCK_GRP_ARG(lck_grp_t *grp))
203{
204#pragma unused(lock)
205#if CONFIG_DTRACE
206 LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, lock, (uintptr_t)LCK_GRP_PROBEARG(grp));
207#endif
208#if LOCK_STATS
209 if (!grp) {
210 return;
211 }
212 lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_spin_held;
213 lck_grp_inc_stats(grp, stat);
214#endif /* LOCK_STATS */
215}
216
217static inline void
218lck_grp_spin_update_miss(void *lock LCK_GRP_ARG(lck_grp_t *grp))
219{
220#pragma unused(lock)
221#if LOCK_STATS
222 if (!grp) {
223 return;
224 }
225 lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_spin_miss;
226 lck_grp_inc_stats(grp, stat);
227#endif /* LOCK_STATS */
228}
229
230static inline void
231lck_grp_spin_update_spin(void *lock LCK_GRP_ARG(lck_grp_t *grp), uint64_t time)
232{
233#pragma unused(lock, time)
234#if CONFIG_DTRACE
235 if (time > dtrace_spin_threshold) {
236 LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_SPIN, lock, time LCK_GRP_ARG((uintptr_t)grp));
237 }
238#endif /* CONFIG_DTRACE */
239#if LOCK_STATS
240 if (!grp) {
241 return;
242 }
243 lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_spin_spin;
244 lck_grp_inc_time_stats(grp, stat, time);
245#endif /* LOCK_STATS */
246}
247
248static inline boolean_t
249lck_grp_spin_spin_enabled(void *lock LCK_GRP_ARG(lck_grp_t *grp))
250{
251#pragma unused(lock)
252 boolean_t enabled = FALSE;
253#if CONFIG_DTRACE
254 enabled |= lockstat_probemap[LS_LCK_SPIN_LOCK_SPIN] != 0;
255#endif /* CONFIG_DTRACE */
256#if LOCK_STATS
257 enabled |= (grp && grp->lck_grp_stats.lgss_spin_spin.lgs_enablings);
258#endif /* LOCK_STATS */
259 return enabled;
260}
261
262static void inline
263lck_grp_mtx_inc_stats(
264 uint64_t* stat)
265{
266#if ATOMIC_STAT_UPDATES
267 os_atomic_inc(stat, relaxed);
268#else
269 *stat = (*stat)++;
270#endif /* ATOMIC_STAT_UPDATES */
271}
272
273static void inline
274lck_grp_mtx_update_miss(
275 struct _lck_mtx_ext_ *lock,
276 int *first_miss)
277{
278#pragma unused(first_miss)
279#if LOG_FIRST_MISS_ALONE
280 if ((*first_miss & 1) == 0) {
281#endif /* LOG_FIRST_MISS_ALONE */
282 uint64_t* stat = &lock->lck_mtx_grp->lck_grp_stats.lgss_mtx_miss.lgs_count;
283 lck_grp_mtx_inc_stats(stat);
284
285#if LOG_FIRST_MISS_ALONE
286 *first_miss |= 1;
287}
288#endif /* LOG_FIRST_MISS_ALONE */
289}
290
291static void inline
292lck_grp_mtx_update_direct_wait(
293 struct _lck_mtx_ext_ *lock)
294{
295 uint64_t* stat = &lock->lck_mtx_grp->lck_grp_stats.lgss_mtx_direct_wait.lgs_count;
296 lck_grp_mtx_inc_stats(stat);
297}
298
299static void inline
300lck_grp_mtx_update_wait(
301 struct _lck_mtx_ext_ *lock,
302 int *first_miss)
303{
304#pragma unused(first_miss)
305#if LOG_FIRST_MISS_ALONE
306 if ((*first_miss & 2) == 0) {
307#endif /* LOG_FIRST_MISS_ALONE */
308 uint64_t* stat = &lock->lck_mtx_grp->lck_grp_stats.lgss_mtx_wait.lgs_count;
309 lck_grp_mtx_inc_stats(stat);
310
311#if LOG_FIRST_MISS_ALONE
312 *first_miss |= 2;
313}
314#endif /* LOG_FIRST_MISS_ALONE */
315}
316
317static void inline
318lck_grp_mtx_update_held(
319 struct _lck_mtx_ext_ *lock)
320{
321 uint64_t* stat = &lock->lck_mtx_grp->lck_grp_stats.lgss_mtx_held.lgs_count;
322 lck_grp_mtx_inc_stats(stat);
323}
324#endif /* MACH_KERNEL_PRIVATE */
325#endif /* _KERN_LOCKSTAT_H */