2 * Copyright (c) 2018 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #ifndef _KERN_LOCKSTAT_H
29 #define _KERN_LOCKSTAT_H
30 #include <machine/locks.h>
31 #include <machine/atomic.h>
32 #include <kern/lock_group.h>
35 * N.B.: On x86, statistics are currently recorded for all indirect mutexes.
36 * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained
37 * as a 64-bit quantity (the new x86 specific statistics are also maintained
38 * as 32-bit quantities).
41 * Enable this preprocessor define to record the first miss alone
42 * By default, we count every miss, hence multiple misses may be
43 * recorded for a single lock acquire attempt via lck_mtx_lock
45 #undef LOG_FIRST_MISS_ALONE
48 * This preprocessor define controls whether the R-M-W update of the
49 * per-group statistics elements are atomic (LOCK-prefixed)
52 #define ATOMIC_STAT_UPDATES 1
55 * DTrace lockstat probe definitions
59 enum lockstat_probe_id
{
61 LS_LCK_SPIN_LOCK_ACQUIRE
,
62 LS_LCK_SPIN_LOCK_SPIN
,
63 LS_LCK_SPIN_UNLOCK_RELEASE
,
66 * Mutexes can also have interlock-spin events, which are
67 * unique to our lock implementation.
69 LS_LCK_MTX_LOCK_ACQUIRE
,
70 LS_LCK_MTX_LOCK_BLOCK
,
72 LS_LCK_MTX_LOCK_ILK_SPIN
,
73 LS_LCK_MTX_TRY_LOCK_ACQUIRE
,
74 LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE
,
75 LS_LCK_MTX_UNLOCK_RELEASE
,
76 LS_LCK_MTX_LOCK_SPIN_ACQUIRE
,
79 * Provide a parallel set for indirect mutexes
81 LS_LCK_MTX_EXT_LOCK_ACQUIRE
,
82 LS_LCK_MTX_EXT_LOCK_BLOCK
,
83 LS_LCK_MTX_EXT_LOCK_SPIN
,
84 LS_LCK_MTX_EXT_LOCK_ILK_SPIN
,
85 LS_LCK_MTX_EXT_UNLOCK_RELEASE
,
88 * Reader-writer locks support a blocking upgrade primitive, as
89 * well as the possibility of spinning on the interlock.
91 LS_LCK_RW_LOCK_SHARED_ACQUIRE
,
92 LS_LCK_RW_LOCK_SHARED_BLOCK
,
93 LS_LCK_RW_LOCK_SHARED_SPIN
,
95 LS_LCK_RW_LOCK_EXCL_ACQUIRE
,
96 LS_LCK_RW_LOCK_EXCL_BLOCK
,
97 LS_LCK_RW_LOCK_EXCL_SPIN
,
99 LS_LCK_RW_DONE_RELEASE
,
101 LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE
,
102 LS_LCK_RW_TRY_LOCK_SHARED_SPIN
,
104 LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE
,
105 LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN
,
107 LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE
,
108 LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN
,
109 LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK
,
111 LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE
,
112 LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN
,
115 LS_LCK_TICKET_LOCK_ACQUIRE
,
116 LS_LCK_TICKET_LOCK_RELEASE
,
117 LS_LCK_TICKET_LOCK_SPIN
,
123 extern uint32_t lockstat_probemap
[LS_NPROBES
];
124 extern void dtrace_probe(uint32_t, uint64_t, uint64_t,
125 uint64_t, uint64_t, uint64_t);
127 * Macros to record lockstat probes.
129 #define LOCKSTAT_RECORD4(probe, lp, arg0, arg1, arg2, arg3) \
132 if (__improbable(id = lockstat_probemap[(probe)])) { \
133 dtrace_probe(id, (uintptr_t)(lp), (arg0), \
134 (arg1), (arg2), (arg3)); \
137 #define LOCKSTAT_RECORD_(probe, lp, arg0, arg1, arg2, arg3, ...) LOCKSTAT_RECORD4(probe, lp, arg0, arg1, arg2, arg3)
138 #define LOCKSTAT_RECORD__(probe, lp, arg0, arg1, arg2, arg3, ...) LOCKSTAT_RECORD_(probe, lp, arg0, arg1, arg2, arg3)
139 #define LOCKSTAT_RECORD(probe, lp, ...) LOCKSTAT_RECORD__(probe, lp, ##__VA_ARGS__, 0, 0, 0, 0)
141 #define LOCKSTAT_RECORD()
142 #endif /* CONFIG_DTRACE */
145 * Time threshold before dtrace lockstat spin
146 * probes are triggered
148 extern uint64_t dtrace_spin_threshold
;
151 void lockprof_invoke(lck_grp_t
*, lck_grp_stat_t
*, uint64_t);
152 #endif /* CONFIG_DTRACE */
155 lck_grp_stat_enable(lck_grp_stat_t
*stat
)
157 stat
->lgs_enablings
++;
161 lck_grp_stat_disable(lck_grp_stat_t
*stat
)
163 stat
->lgs_enablings
--;
166 #if MACH_KERNEL_PRIVATE
168 lck_grp_inc_stats(lck_grp_t
*grp
, lck_grp_stat_t
*stat
)
171 if (__improbable(stat
->lgs_enablings
)) {
172 #if ATOMIC_STAT_UPDATES
173 uint64_t val
= os_atomic_inc_orig(&stat
->lgs_count
, relaxed
);
175 uint64_t val
= stat
->lgs_count
++;
176 #endif /* ATOMIC_STAT_UPDATES */
177 #if CONFIG_DTRACE && LOCK_STATS
178 if (__improbable(stat
->lgs_limit
&& (val
% (stat
->lgs_limit
)) == 0)) {
179 lockprof_invoke(grp
, stat
, val
);
183 #endif /* CONFIG_DTRACE && LOCK_STATS */
189 lck_grp_inc_time_stats(lck_grp_t
*grp
, lck_grp_stat_t
*stat
, uint64_t time
)
191 if (__improbable(stat
->lgs_enablings
)) {
192 uint64_t val
= os_atomic_add_orig(&stat
->lgs_count
, time
, relaxed
);
194 if (__improbable(stat
->lgs_limit
)) {
195 while (__improbable(time
> stat
->lgs_limit
)) {
196 time
-= stat
->lgs_limit
;
197 lockprof_invoke(grp
, stat
, val
);
199 if (__improbable(((val
% stat
->lgs_limit
) + time
) > stat
->lgs_limit
)) {
200 lockprof_invoke(grp
, stat
, val
);
205 #endif /* CONFIG_DTRACE */
209 #endif /* LOCK_STATS */
212 lck_grp_spin_update_held(void *lock
LCK_GRP_ARG(lck_grp_t
*grp
))
216 LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE
, lock
, (uintptr_t)LCK_GRP_PROBEARG(grp
));
222 lck_grp_stat_t
*stat
= &grp
->lck_grp_stats
.lgss_spin_held
;
223 lck_grp_inc_stats(grp
, stat
);
224 #endif /* LOCK_STATS */
228 lck_grp_spin_update_miss(void *lock
LCK_GRP_ARG(lck_grp_t
*grp
))
235 lck_grp_stat_t
*stat
= &grp
->lck_grp_stats
.lgss_spin_miss
;
236 lck_grp_inc_stats(grp
, stat
);
237 #endif /* LOCK_STATS */
241 lck_grp_spin_update_spin(void *lock
LCK_GRP_ARG(lck_grp_t
*grp
), uint64_t time
)
243 #pragma unused(lock, time)
245 if (time
> dtrace_spin_threshold
) {
246 LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_SPIN
, lock
, time
LCK_GRP_ARG((uintptr_t)grp
));
248 #endif /* CONFIG_DTRACE */
253 lck_grp_stat_t
*stat
= &grp
->lck_grp_stats
.lgss_spin_spin
;
254 lck_grp_inc_time_stats(grp
, stat
, time
);
255 #endif /* LOCK_STATS */
258 static inline boolean_t
259 lck_grp_spin_spin_enabled(void *lock
LCK_GRP_ARG(lck_grp_t
*grp
))
262 boolean_t enabled
= FALSE
;
264 enabled
|= lockstat_probemap
[LS_LCK_SPIN_LOCK_SPIN
] != 0;
265 #endif /* CONFIG_DTRACE */
267 enabled
|= (grp
&& grp
->lck_grp_stats
.lgss_spin_spin
.lgs_enablings
);
268 #endif /* LOCK_STATS */
273 lck_grp_ticket_update_held(void *lock
LCK_GRP_ARG(lck_grp_t
*grp
))
277 LOCKSTAT_RECORD(LS_LCK_TICKET_LOCK_ACQUIRE
, lock
, (uintptr_t)LCK_GRP_PROBEARG(grp
));
283 lck_grp_stat_t
*stat
= &grp
->lck_grp_stats
.lgss_ticket_held
;
284 lck_grp_inc_stats(grp
, stat
);
285 #endif /* LOCK_STATS */
289 lck_grp_ticket_update_miss(void *lock
LCK_GRP_ARG(lck_grp_t
*grp
))
296 lck_grp_stat_t
*stat
= &grp
->lck_grp_stats
.lgss_ticket_miss
;
297 lck_grp_inc_stats(grp
, stat
);
298 #endif /* LOCK_STATS */
301 static inline boolean_t
302 lck_grp_ticket_spin_enabled(void *lock
LCK_GRP_ARG(lck_grp_t
*grp
))
305 boolean_t enabled
= FALSE
;
307 enabled
|= lockstat_probemap
[LS_LCK_TICKET_LOCK_SPIN
] != 0;
308 #endif /* CONFIG_DTRACE */
310 enabled
|= (grp
&& grp
->lck_grp_stats
.lgss_ticket_spin
.lgs_enablings
);
311 #endif /* LOCK_STATS */
316 lck_grp_ticket_update_spin(void *lock
LCK_GRP_ARG(lck_grp_t
*grp
), uint64_t time
)
318 #pragma unused(lock, time)
320 if (time
> dtrace_spin_threshold
) {
321 LOCKSTAT_RECORD(LS_LCK_TICKET_LOCK_SPIN
, lock
, time
LCK_GRP_ARG((uintptr_t)grp
));
323 #endif /* CONFIG_DTRACE */
328 lck_grp_stat_t
*stat
= &grp
->lck_grp_stats
.lgss_ticket_spin
;
329 lck_grp_inc_time_stats(grp
, stat
, time
);
330 #endif /* LOCK_STATS */
335 lck_grp_mtx_update_miss(
336 struct _lck_mtx_ext_
*lock
,
339 #pragma unused(first_miss)
340 #if LOG_FIRST_MISS_ALONE
341 if ((*first_miss
& 1) == 0) {
342 #endif /* LOG_FIRST_MISS_ALONE */
343 lck_grp_t
*grp
= lock
->lck_mtx_grp
;
344 lck_grp_stat_t
*stat
= &grp
->lck_grp_stats
.lgss_mtx_miss
;
345 lck_grp_inc_stats(grp
, stat
);
347 #if LOG_FIRST_MISS_ALONE
350 #endif /* LOG_FIRST_MISS_ALONE */
354 lck_grp_mtx_update_direct_wait(
355 struct _lck_mtx_ext_
*lock
)
357 lck_grp_t
*grp
= lock
->lck_mtx_grp
;
358 lck_grp_stat_t
*stat
= &grp
->lck_grp_stats
.lgss_mtx_direct_wait
;
359 lck_grp_inc_stats(grp
, stat
);
363 lck_grp_mtx_update_wait(
364 struct _lck_mtx_ext_
*lock
,
367 #pragma unused(first_miss)
368 #if LOG_FIRST_MISS_ALONE
369 if ((*first_miss
& 2) == 0) {
370 #endif /* LOG_FIRST_MISS_ALONE */
371 lck_grp_t
*grp
= lock
->lck_mtx_grp
;
372 lck_grp_stat_t
*stat
= &grp
->lck_grp_stats
.lgss_mtx_wait
;
373 lck_grp_inc_stats(grp
, stat
);
374 #if LOG_FIRST_MISS_ALONE
377 #endif /* LOG_FIRST_MISS_ALONE */
381 lck_grp_mtx_update_held(
382 struct _lck_mtx_ext_
*lock
)
384 lck_grp_t
*grp
= lock
->lck_mtx_grp
;
385 lck_grp_stat_t
*stat
= &grp
->lck_grp_stats
.lgss_mtx_held
;
386 lck_grp_inc_stats(grp
, stat
);
389 #endif /* MACH_KERNEL_PRIVATE */
390 #endif /* _KERN_LOCKSTAT_H */