]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/kperf.h
xnu-6153.101.6.tar.gz
[apple/xnu.git] / osfmk / kperf / kperf.h
1 /*
2 * Copyright (c) 2011-2018 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef KPERF_H
30 #define KPERF_H
31
32 #include <kern/thread.h>
33 #include <kern/locks.h>
34
35 extern lck_grp_t kperf_lck_grp;
36
37 /* the trigger types supported by kperf */
38 #define TRIGGER_TYPE_TIMER (0)
39 #define TRIGGER_TYPE_PMI (1)
40 #define TRIGGER_TYPE_KDEBUG (2)
41 #define TRIGGER_TYPE_LAZY_WAIT (3)
42 #define TRIGGER_TYPE_LAZY_CPU (3)
43
44 uint32_t kperf_get_thread_ast(thread_t thread);
45 void kperf_set_thread_ast(thread_t thread, uint32_t flags);
46
47 /*
48 * Get and set dirtiness of thread, so kperf can track whether the thread
49 * has been dispatched since it last looked.
50 */
51 boolean_t kperf_thread_get_dirty(thread_t thread);
52 void kperf_thread_set_dirty(thread_t thread, boolean_t dirty);
53
54 /* possible states of kperf sampling */
55 #define KPERF_SAMPLING_OFF (0)
56 #define KPERF_SAMPLING_ON (1)
57 #define KPERF_SAMPLING_SHUTDOWN (2)
58
59 /*
60 * Initialize kperf. Must be called before use and can be called multiple times.
61 */
62 extern int kperf_init(void);
63
64 /* get and set sampling status */
65 extern unsigned kperf_sampling_status(void);
66 extern int kperf_sampling_enable(void);
67 extern int kperf_sampling_disable(void);
68
69 /* get a per-CPU sample buffer */
70 struct kperf_sample *kperf_intr_sample_buffer(void);
71
72 /*
73 * Callbacks into kperf from other systems.
74 */
75
76 /*
77 * kperf AST handler
78 *
79 * Prevent inlining, since the sampling function allocates on the stack and
80 * branches calling ast_taken (but never on a kperf AST) may blow their stacks.
81 */
82 extern __attribute__((noinline)) void kperf_thread_ast_handler(thread_t thread);
83
84 /* update whether the callback is set */
85 void kperf_on_cpu_update(void);
86
87 /* for scheduler switching threads on */
88 static inline void
89 kperf_on_cpu(thread_t thread, thread_continue_t continuation,
90 uintptr_t *starting_fp)
91 {
92 extern boolean_t kperf_on_cpu_active;
93 void kperf_on_cpu_internal(thread_t thread, thread_continue_t continuation,
94 uintptr_t *starting_fp);
95
96 if (__improbable(kperf_on_cpu_active)) {
97 kperf_on_cpu_internal(thread, continuation, starting_fp);
98 }
99 }
100
101 /* for scheduler switching threads off */
102 static inline void
103 kperf_off_cpu(thread_t thread)
104 {
105 extern unsigned int kperf_lazy_cpu_action;
106 void kperf_lazy_off_cpu(thread_t thread);
107
108 if (__improbable(kperf_lazy_cpu_action != 0)) {
109 kperf_lazy_off_cpu(thread);
110 }
111 }
112
113 /* for scheduler making threads runnable */
114 static inline void
115 kperf_make_runnable(thread_t thread, int interrupt)
116 {
117 extern unsigned int kperf_lazy_cpu_action;
118 void kperf_lazy_make_runnable(thread_t thread, bool interrupt);
119
120 if (__improbable(kperf_lazy_cpu_action != 0)) {
121 kperf_lazy_make_runnable(thread, interrupt);
122 }
123 }
124
125 /* for interrupt handler epilogue */
126 static inline void
127 kperf_interrupt(void)
128 {
129 extern unsigned int kperf_lazy_cpu_action;
130 extern void kperf_lazy_cpu_sample(thread_t thread, unsigned int flags,
131 bool interrupt);
132
133 if (__improbable(kperf_lazy_cpu_action != 0)) {
134 kperf_lazy_cpu_sample(current_thread(), 0, true);
135 }
136 }
137
138 /* for kdebug on every traced event */
139 static inline void
140 kperf_kdebug_callback(uint32_t debugid, uintptr_t *starting_fp)
141 {
142 extern boolean_t kperf_kdebug_active;
143 void kperf_kdebug_handler(uint32_t debugid, uintptr_t *starting_fp);
144
145 if (__improbable(kperf_kdebug_active)) {
146 kperf_kdebug_handler(debugid, starting_fp);
147 }
148 }
149
150 /*
151 * Used by ktrace to reset kperf. ktrace_lock must be held.
152 */
153 extern void kperf_reset(void);
154
155 /*
156 * Configure kperf from the kernel (e.g. during boot).
157 */
158 void kperf_kernel_configure(const char *config);
159
160 /* given a task port, find out its pid */
161 int kperf_port_to_pid(mach_port_name_t portname);
162
163 #if DEVELOPMENT || DEBUG
164 extern _Atomic long long kperf_pending_ipis;
165 #endif /* DEVELOPMENT || DEBUG */
166
167 #endif /* !defined(KPERF_H) */