]>
Commit | Line | Data |
---|---|---|
39236c6e A |
1 | /* |
2 | * Copyright (c) 2012 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
39037602 | 5 | * |
39236c6e A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
39037602 | 14 | * |
39236c6e A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
39037602 | 17 | * |
39236c6e A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
39037602 | 25 | * |
39236c6e A |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ | |
28 | ||
39037602 A |
29 | #ifndef KERN_KPC_H |
30 | #define KERN_KPC_H | |
39236c6e | 31 | |
fe8ab488 | 32 | /* Kernel interfaces to KPC PMC infrastructure. */ |
39236c6e A |
33 | |
34 | #include <machine/machine_kpc.h> | |
3e170ce0 | 35 | #include <kern/thread.h> /* thread_* */ |
39236c6e | 36 | |
39037602 A |
37 | __BEGIN_DECLS |
38 | ||
39236c6e A |
39 | /* cross-platform class constants */ |
40 | #define KPC_CLASS_FIXED (0) | |
41 | #define KPC_CLASS_CONFIGURABLE (1) | |
42 | #define KPC_CLASS_POWER (2) | |
fe8ab488 | 43 | #define KPC_CLASS_RAWPMU (3) |
39236c6e | 44 | |
fe8ab488 A |
45 | #define KPC_CLASS_FIXED_MASK (1u << KPC_CLASS_FIXED) |
46 | #define KPC_CLASS_CONFIGURABLE_MASK (1u << KPC_CLASS_CONFIGURABLE) | |
47 | #define KPC_CLASS_POWER_MASK (1u << KPC_CLASS_POWER) | |
48 | #define KPC_CLASS_RAWPMU_MASK (1u << KPC_CLASS_RAWPMU) | |
39236c6e | 49 | |
3e170ce0 A |
50 | #define KPC_PMU_ERROR (0) |
51 | #define KPC_PMU_INTEL_V3 (1) | |
52 | #define KPC_PMU_ARM_APPLE (2) | |
53 | #define KPC_PMU_INTEL_V2 (3) | |
54 | #define KPC_PMU_ARM_V2 (4) | |
55 | ||
fe8ab488 | 56 | #define KPC_ALL_CPUS (1u << 31) |
39236c6e | 57 | |
3e170ce0 A |
58 | /* action id setters/getters */ |
59 | #define FIXED_ACTIONID(ctr) (kpc_actionid[(ctr)]) | |
60 | #define CONFIGURABLE_ACTIONID(ctr) (kpc_actionid[(ctr) + kpc_fixed_count()]) | |
61 | ||
62 | /* reload counter setters/getters */ | |
63 | #define FIXED_RELOAD(ctr) (current_cpu_datap()->cpu_kpc_reload[(ctr)]) | |
64 | #define FIXED_RELOAD_CPU(cpu, ctr) (cpu_datap(cpu)->cpu_kpc_reload[(ctr)]) | |
65 | #define CONFIGURABLE_RELOAD(ctr) (current_cpu_datap()->cpu_kpc_reload[(ctr) + kpc_fixed_count()]) | |
66 | #define CONFIGURABLE_RELOAD_CPU(cpu, ctr) (cpu_datap(cpu)->cpu_kpc_reload[(ctr) + kpc_fixed_count()]) | |
67 | ||
68 | /* shadow counter setters/getters */ | |
69 | #define FIXED_SHADOW(ctr) (current_cpu_datap()->cpu_kpc_shadow[(ctr)]) | |
70 | #define FIXED_SHADOW_CPU(cpu, ctr) (cpu_datap(cpu)->cpu_kpc_shadow[(ctr)]) | |
71 | #define CONFIGURABLE_SHADOW(ctr) (current_cpu_datap()->cpu_kpc_shadow[(ctr) + kpc_fixed_count()]) | |
72 | #define CONFIGURABLE_SHADOW_CPU(cpu, ctr) (cpu_datap(cpu)->cpu_kpc_shadow[(ctr) + kpc_fixed_count()]) | |
73 | ||
74 | /** | |
75 | * Callback for notification when PMCs are acquired/released by a task. The | |
76 | * argument is equal to TRUE if the Power Manager (PM) can use its reserved PMCs. | |
77 | * Otherwise, the argument is equal to FALSE. | |
78 | */ | |
79 | typedef void (*kpc_pm_handler_t)(boolean_t); | |
80 | ||
81 | /* | |
82 | * Register a CPU to kpc and allocate its buffers. | |
83 | * | |
84 | * @param cpu_data | |
85 | * CPU data associated to the CPU being registered. | |
86 | * | |
87 | * @return | |
88 | * TRUE if buffers are correctly allocated, FALSE otherwise. | |
89 | */ | |
90 | struct cpu_data; | |
91 | extern boolean_t kpc_register_cpu(struct cpu_data *cpu_data); | |
39037602 | 92 | extern void kpc_unregister_cpu(struct cpu_data *cpu_data); |
3e170ce0 | 93 | |
d9a64523 A |
94 | extern bool kpc_supported; |
95 | ||
39236c6e A |
96 | /* bootstrap */ |
97 | extern void kpc_init(void); | |
98 | ||
3e170ce0 A |
99 | /* common initialization */ |
100 | extern void kpc_common_init(void); | |
101 | ||
fe8ab488 A |
102 | /* Architecture specific initialisation */ |
103 | extern void kpc_arch_init(void); | |
104 | ||
3e170ce0 A |
105 | /* Thread counting initialization */ |
106 | extern void kpc_thread_init(void); | |
107 | ||
39236c6e A |
108 | /* Get the bitmask of available classes */ |
109 | extern uint32_t kpc_get_classes(void); | |
110 | ||
111 | /* Get the bitmask of currently running counter classes */ | |
112 | extern uint32_t kpc_get_running(void); | |
113 | ||
3e170ce0 A |
114 | /* Get the version of KPC that's being run */ |
115 | extern int kpc_get_pmu_version(void); | |
116 | ||
39236c6e A |
117 | /* Set the bitmask of currently running counter classes. Specify |
118 | * classes = 0 to stop counters | |
119 | */ | |
120 | extern int kpc_set_running(uint32_t classes); | |
121 | ||
122 | /* Read CPU counters */ | |
123 | extern int kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes, | |
124 | int *curcpu, uint64_t *buf); | |
125 | ||
126 | /* Read shadow counters */ | |
127 | extern int kpc_get_shadow_counters( boolean_t all_cpus, uint32_t classes, | |
128 | int *curcpu, uint64_t *buf ); | |
129 | ||
130 | /* Read current thread's counter accumulations */ | |
131 | extern int kpc_get_curthread_counters(uint32_t *inoutcount, uint64_t *buf); | |
132 | ||
133 | /* Given a config, how many counters and config registers there are */ | |
134 | extern uint32_t kpc_get_counter_count(uint32_t classes); | |
135 | extern uint32_t kpc_get_config_count(uint32_t classes); | |
136 | ||
137 | /* enable/disable thread counting */ | |
138 | extern uint32_t kpc_get_thread_counting(void); | |
139 | extern int kpc_set_thread_counting(uint32_t classes); | |
140 | ||
141 | /* get and set config registers */ | |
142 | extern int kpc_get_config(uint32_t classes, kpc_config_t *current_config); | |
143 | extern int kpc_set_config(uint32_t classes, kpc_config_t *new_config); | |
144 | ||
145 | /* get and set PMI period */ | |
146 | extern int kpc_get_period(uint32_t classes, uint64_t *period); | |
147 | extern int kpc_set_period(uint32_t classes, uint64_t *period); | |
148 | ||
149 | /* get and set kperf actionid */ | |
150 | extern int kpc_get_actionid(uint32_t classes, uint32_t *actionid); | |
151 | extern int kpc_set_actionid(uint32_t classes, uint32_t *actionid); | |
152 | ||
153 | /* hooks on thread create and delete */ | |
154 | extern void kpc_thread_create(thread_t thread); | |
155 | extern void kpc_thread_destroy(thread_t thread); | |
156 | ||
157 | /* allocate a buffer big enough for all counters */ | |
158 | extern uint64_t *kpc_counterbuf_alloc(void); | |
159 | extern void kpc_counterbuf_free(uint64_t*); | |
d9a64523 | 160 | extern uint32_t kpc_get_counterbuf_size(void); |
39236c6e A |
161 | |
162 | /* whether we're currently accounting into threads */ | |
163 | extern int kpc_threads_counting; | |
164 | ||
165 | /* AST callback for KPC */ | |
166 | extern void kpc_thread_ast_handler( thread_t thread ); | |
3e170ce0 | 167 | |
39037602 A |
168 | #ifdef MACH_KERNEL_PRIVATE |
169 | ||
170 | /* context switch callback for KPC */ | |
171 | ||
172 | extern boolean_t kpc_off_cpu_active; | |
173 | ||
174 | extern void kpc_off_cpu_internal(thread_t thread); | |
175 | extern void kpc_off_cpu_update(void); | |
176 | ||
177 | static inline void | |
178 | kpc_off_cpu(thread_t thread) | |
179 | { | |
180 | if (__improbable(kpc_off_cpu_active)) { | |
181 | kpc_off_cpu_internal(thread); | |
182 | } | |
183 | } | |
184 | ||
185 | #endif /* defined(MACH_KERNEL_PRIVATE) */ | |
39236c6e | 186 | |
fe8ab488 A |
187 | /* acquire/release the counters used by the Power Manager */ |
188 | extern int kpc_force_all_ctrs( task_t task, int val ); | |
189 | extern int kpc_get_force_all_ctrs( void ); | |
190 | ||
191 | /* arch-specific routine for acquire/release the counters used by the Power Manager */ | |
192 | extern int kpc_force_all_ctrs_arch( task_t task, int val ); | |
193 | ||
194 | extern int kpc_set_sw_inc( uint32_t mask ); | |
195 | ||
39236c6e A |
196 | /* disable/enable whitelist of allowed events */ |
197 | extern int kpc_get_whitelist_disabled( void ); | |
198 | extern int kpc_disable_whitelist( int val ); | |
199 | ||
fe8ab488 | 200 | /* |
3e170ce0 A |
201 | * Register the Power Manager as a PMCs user. |
202 | * | |
203 | * This is a deprecated function used by old Power Managers, new Power Managers | |
204 | * should use the @em kpc_reserve_pm_counters() function. This function actually | |
205 | * calls @em kpc_reserve_pm_counters() with the following arguments: | |
206 | * - handler = handler | |
207 | * - pmc_mask = 0x83 | |
208 | * - custom_config = TRUE | |
209 | * | |
210 | * See @em kpc_reserve_pm_counters() for more details about the return value. | |
fe8ab488 | 211 | */ |
39236c6e A |
212 | extern boolean_t kpc_register_pm_handler(void (*handler)(boolean_t)); |
213 | ||
3e170ce0 A |
214 | /* |
215 | * Register the Power Manager as a PMCs user. | |
216 | * | |
217 | * @param handler | |
218 | * Notification callback to use when PMCs are acquired/released by a task. | |
5ba3f43e | 219 | * Power management must acknowledge the change using kpc_pm_acknowledge. |
3e170ce0 A |
220 | * |
221 | * @param pmc_mask | |
222 | * Bitmask of the configurable PMCs used by the Power Manager. The number of bits | |
223 | * set must less or equal than the number of configurable counters | |
224 | * available on the SoC. | |
225 | * | |
226 | * @param custom_config | |
227 | * If custom_config=TRUE, the legacy sharing mode is enabled, otherwise the | |
228 | * Modern Sharing mode is enabled. These modes are explained in more details in | |
229 | * the kperf documentation. | |
230 | * | |
231 | * @return | |
232 | * FALSE if a task has acquired all the PMCs, otherwise TRUE and the Power | |
233 | * Manager can start using the reserved PMCs. | |
234 | */ | |
235 | extern boolean_t kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler, | |
236 | boolean_t custom_config); | |
237 | ||
238 | /* | |
239 | * Unregister the Power Manager as a PMCs user, and release the previously | |
240 | * reserved counters. | |
241 | */ | |
242 | extern void kpc_release_pm_counters(void); | |
243 | ||
5ba3f43e A |
244 | /* |
245 | * Acknowledge the callback that PMCs are available to power management. | |
246 | * | |
247 | * @param available_to_pm Whether the counters were made available to power | |
248 | * management in the callback. Pass in whatever was passed into the handler | |
249 | * function. After this point, power management is able to use POWER_CLASS | |
250 | * counters. | |
251 | */ | |
252 | extern void kpc_pm_acknowledge(boolean_t available_to_pm); | |
253 | ||
fe8ab488 A |
254 | /* |
255 | * Is the PMU used by both the power manager and userspace? | |
256 | * | |
257 | * This is true when the power manager has been registered. It disables certain | |
258 | * counter configurations (like RAWPMU) that are incompatible with sharing | |
259 | * counters. | |
260 | */ | |
261 | extern boolean_t kpc_multiple_clients(void); | |
262 | ||
263 | /* | |
264 | * Is kpc controlling the fixed counters? | |
265 | * | |
266 | * This returns false when the power manager has requested custom configuration | |
267 | * control. | |
268 | */ | |
269 | extern boolean_t kpc_controls_fixed_counters(void); | |
39236c6e | 270 | |
3e170ce0 A |
271 | /* |
272 | * Is kpc controlling a specific PMC ? | |
273 | */ | |
274 | extern boolean_t kpc_controls_counter(uint32_t ctr); | |
275 | ||
276 | ||
39236c6e A |
277 | extern void kpc_idle(void); |
278 | extern void kpc_idle_exit(void); | |
279 | ||
280 | ||
3e170ce0 A |
281 | /* |
282 | * KPC PRIVATE | |
283 | */ | |
284 | ||
39236c6e | 285 | extern uint32_t kpc_actionid[KPC_MAX_COUNTERS]; |
3e170ce0 A |
286 | |
287 | /* handler for mp operations */ | |
288 | struct kpc_config_remote { | |
39236c6e A |
289 | uint32_t classes; |
290 | kpc_config_t *configv; | |
3e170ce0 A |
291 | uint64_t pmc_mask; |
292 | }; | |
293 | ||
294 | /* handler for mp operations */ | |
295 | struct kpc_running_remote { | |
296 | uint32_t classes; /* classes to run */ | |
297 | uint64_t cfg_target_mask; /* configurable counters selected */ | |
298 | uint64_t cfg_state_mask; /* configurable counters new state */ | |
299 | }; | |
300 | ||
301 | /* handler for mp operations */ | |
302 | struct kpc_get_counters_remote { | |
303 | uint32_t classes; | |
304 | uint32_t nb_counters; | |
305 | uint32_t buf_stride; | |
306 | uint64_t *buf; | |
39236c6e A |
307 | }; |
308 | ||
3e170ce0 A |
309 | extern int kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf); |
310 | extern int kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf); | |
39236c6e | 311 | extern int kpc_get_fixed_counters(uint64_t *counterv); |
3e170ce0 | 312 | extern int kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask); |
39236c6e | 313 | extern boolean_t kpc_is_running_fixed(void); |
3e170ce0 | 314 | extern boolean_t kpc_is_running_configurable(uint64_t pmc_mask); |
39236c6e A |
315 | extern uint32_t kpc_fixed_count(void); |
316 | extern uint32_t kpc_configurable_count(void); | |
317 | extern uint32_t kpc_fixed_config_count(void); | |
3e170ce0 | 318 | extern uint32_t kpc_configurable_config_count(uint64_t pmc_mask); |
fe8ab488 | 319 | extern uint32_t kpc_rawpmu_config_count(void); |
39236c6e | 320 | extern int kpc_get_fixed_config(kpc_config_t *configv); |
3e170ce0 | 321 | extern int kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask); |
fe8ab488 | 322 | extern int kpc_get_rawpmu_config(kpc_config_t *configv); |
39236c6e A |
323 | extern uint64_t kpc_fixed_max(void); |
324 | extern uint64_t kpc_configurable_max(void); | |
325 | extern int kpc_set_config_arch(struct kpc_config_remote *mp_config); | |
326 | extern int kpc_set_period_arch(struct kpc_config_remote *mp_config); | |
327 | extern void kpc_sample_kperf(uint32_t actionid); | |
3e170ce0 A |
328 | extern int kpc_set_running_arch(struct kpc_running_remote *mp_config); |
329 | ||
330 | ||
331 | /* | |
332 | * Helpers | |
333 | */ | |
334 | ||
335 | /* count the number of bits set */ | |
336 | extern uint8_t kpc_popcount(uint64_t value); | |
337 | ||
338 | /* for a set of classes, retrieve the configurable PMCs mask */ | |
339 | extern uint64_t kpc_get_configurable_pmc_mask(uint32_t classes); | |
340 | ||
39236c6e A |
341 | |
342 | /* Interface for kexts to publish a kpc interface */ | |
343 | struct kpc_driver | |
344 | { | |
345 | uint32_t (*get_classes)(void); | |
346 | uint32_t (*get_running)(void); | |
347 | int (*set_running)(uint32_t classes); | |
348 | int (*get_cpu_counters)(boolean_t all_cpus, uint32_t classes, | |
349 | int *curcpu, uint64_t *buf); | |
350 | int (*get_curthread_counters)(uint32_t *inoutcount, uint64_t *buf); | |
351 | uint32_t (*get_counter_count)(uint32_t classes); | |
352 | uint32_t (*get_config_count)(uint32_t classes); | |
353 | int (*get_config)(uint32_t classes, kpc_config_t *current_config); | |
354 | int (*set_config)(uint32_t classes, kpc_config_t *new_config); | |
355 | int (*get_period)(uint32_t classes, uint64_t *period); | |
356 | int (*set_period)(uint32_t classes, uint64_t *period); | |
357 | }; | |
358 | ||
39037602 A |
359 | __END_DECLS |
360 | ||
361 | #endif /* !definde(KERN_KPC_H) */ |