]>
Commit | Line | Data |
---|---|---|
39236c6e A |
1 | /* |
2 | * Copyright (c) 2012 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
0a7de745 | 5 | * |
39236c6e A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
39236c6e A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
39236c6e A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
39236c6e A |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ | |
28 | ||
29 | #include <mach/mach_types.h> | |
30 | #include <machine/machine_routines.h> | |
31 | #include <kern/processor.h> | |
32 | #include <kern/kalloc.h> | |
33 | #include <sys/errno.h> | |
39037602 | 34 | #include <sys/vm.h> |
39236c6e A |
35 | #include <kperf/buffer.h> |
36 | #include <kern/thread.h> | |
5ba3f43e A |
37 | #if defined(__arm64__) || defined(__arm__) |
38 | #include <arm/cpu_data_internal.h> | |
39 | #endif | |
39236c6e A |
40 | |
41 | #include <kern/kpc.h> | |
42 | ||
43 | #include <kperf/kperf.h> | |
44 | #include <kperf/sample.h> | |
45 | #include <kperf/context.h> | |
46 | #include <kperf/action.h> | |
47 | ||
39236c6e A |
48 | uint32_t kpc_actionid[KPC_MAX_COUNTERS]; |
49 | ||
3e170ce0 A |
50 | #define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t)) |
51 | #define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \ | |
0a7de745 | 52 | COUNTERBUF_SIZE_PER_CPU) |
3e170ce0 | 53 | |
39236c6e | 54 | /* locks */ |
f427ee49 A |
55 | static LCK_GRP_DECLARE(kpc_config_lckgrp, "kpc"); |
56 | static LCK_MTX_DECLARE(kpc_config_lock, &kpc_config_lckgrp); | |
39236c6e | 57 | |
fe8ab488 A |
58 | /* state specifying if all counters have been requested by kperf */ |
59 | static boolean_t force_all_ctrs = FALSE; | |
60 | ||
3e170ce0 | 61 | /* power manager */ |
5ba3f43e A |
62 | static kpc_pm_handler_t kpc_pm_handler; |
63 | static boolean_t kpc_pm_has_custom_config; | |
64 | static uint64_t kpc_pm_pmc_mask; | |
65 | #if MACH_ASSERT | |
66 | static bool kpc_calling_pm = false; | |
67 | #endif /* MACH_ASSERT */ | |
fe8ab488 | 68 | |
39037602 | 69 | boolean_t kpc_context_switch_active = FALSE; |
d9a64523 | 70 | bool kpc_supported = true; |
39037602 | 71 | |
f427ee49 A |
72 | static uint64_t * |
73 | kpc_percpu_alloc(void) | |
39236c6e | 74 | { |
f427ee49 A |
75 | return kheap_alloc_tag(KHEAP_DATA_BUFFERS, COUNTERBUF_SIZE_PER_CPU, |
76 | Z_WAITOK | Z_ZERO, VM_KERN_MEMORY_DIAG); | |
77 | } | |
78 | ||
79 | static void | |
80 | kpc_percpu_free(uint64_t *buf) | |
81 | { | |
82 | if (buf) { | |
83 | kheap_free(KHEAP_DATA_BUFFERS, buf, COUNTERBUF_SIZE_PER_CPU); | |
84 | } | |
39236c6e A |
85 | } |
86 | ||
3e170ce0 A |
87 | boolean_t |
88 | kpc_register_cpu(struct cpu_data *cpu_data) | |
89 | { | |
90 | assert(cpu_data); | |
91 | assert(cpu_data->cpu_kpc_buf[0] == NULL); | |
92 | assert(cpu_data->cpu_kpc_buf[1] == NULL); | |
93 | assert(cpu_data->cpu_kpc_shadow == NULL); | |
94 | assert(cpu_data->cpu_kpc_reload == NULL); | |
95 | ||
96 | /* | |
97 | * Buffers allocated through kpc_counterbuf_alloc() are large enough to | |
98 | * store all PMCs values from all CPUs. This mimics the userspace API. | |
99 | * This does not suit well with the per-CPU kpc buffers, since: | |
0a7de745 A |
100 | * 1. Buffers don't need to be this large. |
101 | * 2. The actual number of CPUs is not known at this point. | |
3e170ce0 A |
102 | * |
103 | * CPUs are asked to callout into kpc when being registered, we'll | |
104 | * allocate the memory here. | |
105 | */ | |
106 | ||
f427ee49 | 107 | if ((cpu_data->cpu_kpc_buf[0] = kpc_percpu_alloc()) == NULL) { |
3e170ce0 | 108 | goto error; |
0a7de745 | 109 | } |
f427ee49 | 110 | if ((cpu_data->cpu_kpc_buf[1] = kpc_percpu_alloc()) == NULL) { |
3e170ce0 | 111 | goto error; |
0a7de745 | 112 | } |
f427ee49 | 113 | if ((cpu_data->cpu_kpc_shadow = kpc_percpu_alloc()) == NULL) { |
3e170ce0 | 114 | goto error; |
0a7de745 | 115 | } |
f427ee49 | 116 | if ((cpu_data->cpu_kpc_reload = kpc_percpu_alloc()) == NULL) { |
3e170ce0 | 117 | goto error; |
0a7de745 | 118 | } |
3e170ce0 | 119 | |
3e170ce0 A |
120 | /* success */ |
121 | return TRUE; | |
122 | ||
123 | error: | |
39037602 | 124 | kpc_unregister_cpu(cpu_data); |
3e170ce0 A |
125 | return FALSE; |
126 | } | |
127 | ||
39037602 A |
128 | void |
129 | kpc_unregister_cpu(struct cpu_data *cpu_data) | |
130 | { | |
131 | assert(cpu_data); | |
132 | if (cpu_data->cpu_kpc_buf[0] != NULL) { | |
f427ee49 | 133 | kpc_percpu_free(cpu_data->cpu_kpc_buf[0]); |
39037602 A |
134 | cpu_data->cpu_kpc_buf[0] = NULL; |
135 | } | |
136 | if (cpu_data->cpu_kpc_buf[1] != NULL) { | |
f427ee49 | 137 | kpc_percpu_free(cpu_data->cpu_kpc_buf[1]); |
39037602 A |
138 | cpu_data->cpu_kpc_buf[1] = NULL; |
139 | } | |
140 | if (cpu_data->cpu_kpc_shadow != NULL) { | |
f427ee49 | 141 | kpc_percpu_free(cpu_data->cpu_kpc_shadow); |
39037602 A |
142 | cpu_data->cpu_kpc_shadow = NULL; |
143 | } | |
0a7de745 | 144 | if (cpu_data->cpu_kpc_reload != NULL) { |
f427ee49 | 145 | kpc_percpu_free(cpu_data->cpu_kpc_reload); |
39037602 A |
146 | cpu_data->cpu_kpc_reload = NULL; |
147 | } | |
148 | } | |
149 | ||
150 | ||
fe8ab488 A |
151 | static void |
152 | kpc_task_set_forced_all_ctrs(task_t task, boolean_t state) | |
153 | { | |
154 | assert(task); | |
155 | ||
156 | task_lock(task); | |
0a7de745 | 157 | if (state) { |
a39ff7e2 | 158 | task->t_kpc |= TASK_KPC_FORCED_ALL_CTRS; |
0a7de745 | 159 | } else { |
a39ff7e2 | 160 | task->t_kpc &= ~TASK_KPC_FORCED_ALL_CTRS; |
0a7de745 | 161 | } |
fe8ab488 A |
162 | task_unlock(task); |
163 | } | |
164 | ||
165 | static boolean_t | |
166 | kpc_task_get_forced_all_ctrs(task_t task) | |
167 | { | |
168 | assert(task); | |
a39ff7e2 | 169 | return task->t_kpc & TASK_KPC_FORCED_ALL_CTRS ? TRUE : FALSE; |
fe8ab488 A |
170 | } |
171 | ||
172 | int | |
173 | kpc_force_all_ctrs(task_t task, int val) | |
174 | { | |
3e170ce0 A |
175 | boolean_t new_state = val ? TRUE : FALSE; |
176 | boolean_t old_state = kpc_get_force_all_ctrs(); | |
fe8ab488 A |
177 | |
178 | /* | |
179 | * Refuse to do the operation if the counters are already forced by | |
180 | * another task. | |
181 | */ | |
0a7de745 | 182 | if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task)) { |
fe8ab488 | 183 | return EACCES; |
0a7de745 | 184 | } |
fe8ab488 A |
185 | |
186 | /* nothing to do if the state is not changing */ | |
0a7de745 | 187 | if (old_state == new_state) { |
fe8ab488 | 188 | return 0; |
0a7de745 | 189 | } |
fe8ab488 | 190 | |
fe8ab488 | 191 | /* notify the power manager */ |
5ba3f43e A |
192 | if (kpc_pm_handler) { |
193 | #if MACH_ASSERT | |
194 | kpc_calling_pm = true; | |
195 | #endif /* MACH_ASSERT */ | |
3e170ce0 | 196 | kpc_pm_handler( new_state ? FALSE : TRUE ); |
5ba3f43e A |
197 | #if MACH_ASSERT |
198 | kpc_calling_pm = false; | |
199 | #endif /* MACH_ASSERT */ | |
200 | } | |
fe8ab488 | 201 | |
5ba3f43e A |
202 | /* |
203 | * This is a force -- ensure that counters are forced, even if power | |
204 | * management fails to acknowledge it. | |
205 | */ | |
206 | if (force_all_ctrs != new_state) { | |
207 | force_all_ctrs = new_state; | |
208 | } | |
fe8ab488 | 209 | |
5ba3f43e A |
210 | /* update the task bits */ |
211 | kpc_task_set_forced_all_ctrs(task, new_state); | |
fe8ab488 A |
212 | |
213 | return 0; | |
214 | } | |
215 | ||
5ba3f43e A |
216 | void |
217 | kpc_pm_acknowledge(boolean_t available_to_pm) | |
218 | { | |
219 | /* | |
220 | * Force-all-counters should still be true when the counters are being | |
221 | * made available to power management and false when counters are going | |
222 | * to be taken away. | |
223 | */ | |
224 | assert(force_all_ctrs == available_to_pm); | |
225 | /* | |
226 | * Make sure power management isn't playing games with us. | |
227 | */ | |
228 | assert(kpc_calling_pm == true); | |
229 | ||
230 | /* | |
231 | * Counters being available means no one is forcing all counters. | |
232 | */ | |
233 | force_all_ctrs = available_to_pm ? FALSE : TRUE; | |
234 | } | |
235 | ||
fe8ab488 A |
236 | int |
237 | kpc_get_force_all_ctrs(void) | |
238 | { | |
239 | return force_all_ctrs; | |
240 | } | |
241 | ||
242 | boolean_t | |
3e170ce0 | 243 | kpc_multiple_clients(void) |
fe8ab488 | 244 | { |
3e170ce0 | 245 | return kpc_pm_handler != NULL; |
fe8ab488 A |
246 | } |
247 | ||
248 | boolean_t | |
3e170ce0 | 249 | kpc_controls_fixed_counters(void) |
fe8ab488 | 250 | { |
3e170ce0 | 251 | return !kpc_pm_handler || force_all_ctrs || !kpc_pm_has_custom_config; |
fe8ab488 A |
252 | } |
253 | ||
254 | boolean_t | |
3e170ce0 | 255 | kpc_controls_counter(uint32_t ctr) |
fe8ab488 | 256 | { |
3e170ce0 A |
257 | uint64_t pmc_mask = 0ULL; |
258 | ||
259 | assert(ctr < (kpc_fixed_count() + kpc_configurable_count())); | |
260 | ||
0a7de745 | 261 | if (ctr < kpc_fixed_count()) { |
3e170ce0 | 262 | return kpc_controls_fixed_counters(); |
0a7de745 | 263 | } |
3e170ce0 A |
264 | |
265 | /* | |
266 | * By default kpc manages all PMCs, but if the Power Manager registered | |
267 | * with custom_config=TRUE, the Power Manager manages its reserved PMCs. | |
268 | * However, kpc takes ownership back if a task acquired all PMCs via | |
269 | * force_all_ctrs. | |
270 | */ | |
271 | pmc_mask = (1ULL << (ctr - kpc_fixed_count())); | |
0a7de745 | 272 | if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs) { |
3e170ce0 | 273 | return FALSE; |
0a7de745 | 274 | } |
3e170ce0 A |
275 | |
276 | return TRUE; | |
fe8ab488 A |
277 | } |
278 | ||
39236c6e A |
279 | uint32_t |
280 | kpc_get_running(void) | |
281 | { | |
3e170ce0 | 282 | uint64_t pmc_mask = 0; |
39236c6e A |
283 | uint32_t cur_state = 0; |
284 | ||
0a7de745 | 285 | if (kpc_is_running_fixed()) { |
39236c6e | 286 | cur_state |= KPC_CLASS_FIXED_MASK; |
0a7de745 | 287 | } |
39236c6e | 288 | |
3e170ce0 | 289 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); |
0a7de745 | 290 | if (kpc_is_running_configurable(pmc_mask)) { |
39236c6e | 291 | cur_state |= KPC_CLASS_CONFIGURABLE_MASK; |
0a7de745 | 292 | } |
39236c6e | 293 | |
3e170ce0 | 294 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); |
0a7de745 | 295 | if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask)) { |
3e170ce0 | 296 | cur_state |= KPC_CLASS_POWER_MASK; |
0a7de745 | 297 | } |
3e170ce0 | 298 | |
39236c6e A |
299 | return cur_state; |
300 | } | |
301 | ||
3e170ce0 | 302 | /* may be called from an IPI */ |
39236c6e | 303 | int |
3e170ce0 | 304 | kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf) |
39236c6e | 305 | { |
0a7de745 | 306 | int enabled = 0, offset = 0; |
3e170ce0 | 307 | uint64_t pmc_mask = 0ULL; |
39236c6e | 308 | |
3e170ce0 | 309 | assert(buf); |
39236c6e | 310 | |
39236c6e A |
311 | enabled = ml_set_interrupts_enabled(FALSE); |
312 | ||
3e170ce0 | 313 | /* grab counters and CPU number as close as possible */ |
0a7de745 | 314 | if (curcpu) { |
f427ee49 | 315 | *curcpu = cpu_number(); |
0a7de745 | 316 | } |
39236c6e | 317 | |
3e170ce0 A |
318 | if (classes & KPC_CLASS_FIXED_MASK) { |
319 | kpc_get_fixed_counters(&buf[offset]); | |
39236c6e A |
320 | offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK); |
321 | } | |
322 | ||
3e170ce0 A |
323 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { |
324 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); | |
325 | kpc_get_configurable_counters(&buf[offset], pmc_mask); | |
326 | offset += kpc_popcount(pmc_mask); | |
327 | } | |
39236c6e | 328 | |
3e170ce0 A |
329 | if (classes & KPC_CLASS_POWER_MASK) { |
330 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); | |
331 | kpc_get_configurable_counters(&buf[offset], pmc_mask); | |
332 | offset += kpc_popcount(pmc_mask); | |
39236c6e A |
333 | } |
334 | ||
335 | ml_set_interrupts_enabled(enabled); | |
336 | ||
337 | return offset; | |
338 | } | |
339 | ||
3e170ce0 A |
340 | /* generic counter reading function, public api */ |
341 | int | |
342 | kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes, | |
0a7de745 | 343 | int *curcpu, uint64_t *buf) |
3e170ce0 A |
344 | { |
345 | assert(buf); | |
346 | ||
347 | /* | |
348 | * Unlike reading the current CPU counters, reading counters from all | |
349 | * CPUs is architecture dependent. This allows kpc to make the most of | |
350 | * the platform if memory mapped registers is supported. | |
351 | */ | |
0a7de745 | 352 | if (all_cpus) { |
3e170ce0 | 353 | return kpc_get_all_cpus_counters(classes, curcpu, buf); |
0a7de745 | 354 | } else { |
3e170ce0 | 355 | return kpc_get_curcpu_counters(classes, curcpu, buf); |
0a7de745 | 356 | } |
3e170ce0 A |
357 | } |
358 | ||
39236c6e | 359 | int |
3e170ce0 | 360 | kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes, |
0a7de745 | 361 | int *curcpu, uint64_t *buf) |
39236c6e | 362 | { |
f427ee49 | 363 | int curcpu_id = cpu_number(); |
3e170ce0 A |
364 | uint32_t cfg_count = kpc_configurable_count(), offset = 0; |
365 | uint64_t pmc_mask = 0ULL; | |
366 | boolean_t enabled; | |
39236c6e | 367 | |
3e170ce0 | 368 | assert(buf); |
39236c6e A |
369 | |
370 | enabled = ml_set_interrupts_enabled(FALSE); | |
371 | ||
f427ee49 | 372 | curcpu_id = cpu_number(); |
0a7de745 | 373 | if (curcpu) { |
3e170ce0 | 374 | *curcpu = curcpu_id; |
0a7de745 | 375 | } |
39236c6e | 376 | |
3e170ce0 A |
377 | for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) { |
378 | /* filter if the caller did not request all cpus */ | |
0a7de745 | 379 | if (!all_cpus && (cpu != curcpu_id)) { |
3e170ce0 | 380 | continue; |
0a7de745 | 381 | } |
39236c6e | 382 | |
3e170ce0 A |
383 | if (classes & KPC_CLASS_FIXED_MASK) { |
384 | uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); | |
385 | memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t)); | |
386 | offset += count; | |
387 | } | |
39236c6e | 388 | |
3e170ce0 A |
389 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { |
390 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); | |
39236c6e | 391 | |
0a7de745 A |
392 | for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) { |
393 | if ((1ULL << cfg_ctr) & pmc_mask) { | |
3e170ce0 | 394 | buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr); |
0a7de745 A |
395 | } |
396 | } | |
3e170ce0 | 397 | } |
39236c6e | 398 | |
3e170ce0 A |
399 | if (classes & KPC_CLASS_POWER_MASK) { |
400 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); | |
39236c6e | 401 | |
0a7de745 A |
402 | for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) { |
403 | if ((1ULL << cfg_ctr) & pmc_mask) { | |
3e170ce0 | 404 | buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr); |
0a7de745 A |
405 | } |
406 | } | |
3e170ce0 | 407 | } |
39236c6e A |
408 | } |
409 | ||
410 | ml_set_interrupts_enabled(enabled); | |
411 | ||
412 | return offset; | |
413 | } | |
414 | ||
415 | uint32_t | |
416 | kpc_get_counter_count(uint32_t classes) | |
417 | { | |
3e170ce0 | 418 | uint32_t count = 0; |
39236c6e | 419 | |
0a7de745 | 420 | if (classes & KPC_CLASS_FIXED_MASK) { |
39236c6e | 421 | count += kpc_fixed_count(); |
0a7de745 | 422 | } |
39236c6e | 423 | |
3e170ce0 A |
424 | if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) { |
425 | uint64_t pmc_msk = kpc_get_configurable_pmc_mask(classes); | |
426 | uint32_t pmc_cnt = kpc_popcount(pmc_msk); | |
427 | count += pmc_cnt; | |
428 | } | |
39236c6e A |
429 | |
430 | return count; | |
431 | } | |
432 | ||
433 | uint32_t | |
434 | kpc_get_config_count(uint32_t classes) | |
435 | { | |
3e170ce0 | 436 | uint32_t count = 0; |
39236c6e | 437 | |
0a7de745 | 438 | if (classes & KPC_CLASS_FIXED_MASK) { |
39236c6e | 439 | count += kpc_fixed_config_count(); |
0a7de745 | 440 | } |
39236c6e | 441 | |
3e170ce0 A |
442 | if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) { |
443 | uint64_t pmc_mask = kpc_get_configurable_pmc_mask(classes); | |
444 | count += kpc_configurable_config_count(pmc_mask); | |
445 | } | |
39236c6e | 446 | |
0a7de745 | 447 | if ((classes & KPC_CLASS_RAWPMU_MASK) && !kpc_multiple_clients()) { |
fe8ab488 | 448 | count += kpc_rawpmu_config_count(); |
0a7de745 | 449 | } |
fe8ab488 | 450 | |
39236c6e A |
451 | return count; |
452 | } | |
453 | ||
454 | int | |
455 | kpc_get_config(uint32_t classes, kpc_config_t *current_config) | |
456 | { | |
3e170ce0 | 457 | uint32_t count = 0; |
39236c6e | 458 | |
3e170ce0 A |
459 | assert(current_config); |
460 | ||
461 | if (classes & KPC_CLASS_FIXED_MASK) { | |
39236c6e A |
462 | kpc_get_fixed_config(¤t_config[count]); |
463 | count += kpc_get_config_count(KPC_CLASS_FIXED_MASK); | |
464 | } | |
465 | ||
3e170ce0 A |
466 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { |
467 | uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); | |
468 | kpc_get_configurable_config(¤t_config[count], pmc_mask); | |
39236c6e A |
469 | count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK); |
470 | } | |
471 | ||
3e170ce0 A |
472 | if (classes & KPC_CLASS_POWER_MASK) { |
473 | uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); | |
474 | kpc_get_configurable_config(¤t_config[count], pmc_mask); | |
475 | count += kpc_get_config_count(KPC_CLASS_POWER_MASK); | |
476 | } | |
477 | ||
0a7de745 | 478 | if (classes & KPC_CLASS_RAWPMU_MASK) { |
fe8ab488 A |
479 | // Client shouldn't ask for config words that aren't available. |
480 | // Most likely, they'd misinterpret the returned buffer if we | |
481 | // allowed this. | |
0a7de745 | 482 | if (kpc_multiple_clients()) { |
fe8ab488 A |
483 | return EPERM; |
484 | } | |
485 | kpc_get_rawpmu_config(¤t_config[count]); | |
486 | count += kpc_get_config_count(KPC_CLASS_RAWPMU_MASK); | |
487 | } | |
488 | ||
39236c6e A |
489 | return 0; |
490 | } | |
491 | ||
492 | int | |
493 | kpc_set_config(uint32_t classes, kpc_config_t *configv) | |
494 | { | |
4bd07ac2 | 495 | int ret = 0; |
3e170ce0 A |
496 | struct kpc_config_remote mp_config = { |
497 | .classes = classes, .configv = configv, | |
498 | .pmc_mask = kpc_get_configurable_pmc_mask(classes) | |
499 | }; | |
500 | ||
501 | assert(configv); | |
502 | ||
503 | /* don't allow RAWPMU configuration when sharing counters */ | |
504 | if ((classes & KPC_CLASS_RAWPMU_MASK) && kpc_multiple_clients()) { | |
505 | return EPERM; | |
506 | } | |
39236c6e | 507 | |
3e170ce0 A |
508 | /* no clients have the right to modify both classes */ |
509 | if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) && | |
0a7de745 | 510 | (classes & (KPC_CLASS_POWER_MASK))) { |
fe8ab488 A |
511 | return EPERM; |
512 | } | |
513 | ||
39236c6e A |
514 | lck_mtx_lock(&kpc_config_lock); |
515 | ||
3e170ce0 | 516 | /* translate the power class for the machine layer */ |
0a7de745 | 517 | if (classes & KPC_CLASS_POWER_MASK) { |
3e170ce0 | 518 | mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK; |
0a7de745 | 519 | } |
39236c6e | 520 | |
4bd07ac2 | 521 | ret = kpc_set_config_arch( &mp_config ); |
39236c6e A |
522 | |
523 | lck_mtx_unlock(&kpc_config_lock); | |
524 | ||
4bd07ac2 | 525 | return ret; |
39236c6e A |
526 | } |
527 | ||
d9a64523 A |
528 | uint32_t |
529 | kpc_get_counterbuf_size(void) | |
530 | { | |
531 | return COUNTERBUF_SIZE; | |
532 | } | |
533 | ||
3e170ce0 | 534 | /* allocate a buffer large enough for all possible counters */ |
39236c6e A |
535 | uint64_t * |
536 | kpc_counterbuf_alloc(void) | |
537 | { | |
f427ee49 A |
538 | return kheap_alloc_tag(KHEAP_DATA_BUFFERS, COUNTERBUF_SIZE, |
539 | Z_WAITOK | Z_ZERO, VM_KERN_MEMORY_DIAG); | |
39236c6e A |
540 | } |
541 | ||
542 | void | |
543 | kpc_counterbuf_free(uint64_t *buf) | |
544 | { | |
3e170ce0 | 545 | if (buf) { |
f427ee49 | 546 | kheap_free(KHEAP_DATA_BUFFERS, buf, COUNTERBUF_SIZE); |
3e170ce0 | 547 | } |
39236c6e A |
548 | } |
549 | ||
3e170ce0 | 550 | void |
f427ee49 A |
551 | kpc_sample_kperf(uint32_t actionid, uint32_t counter, uint64_t config, |
552 | uint64_t count, uintptr_t pc, kperf_kpc_flags_t flags) | |
39236c6e A |
553 | { |
554 | struct kperf_sample sbuf; | |
39236c6e | 555 | |
f427ee49 A |
556 | uint64_t desc = config | (uint64_t)counter << 32 | (uint64_t)flags << 48; |
557 | ||
558 | BUF_DATA(PERF_KPC_HNDLR | DBG_FUNC_START, desc, count, pc); | |
39236c6e | 559 | |
d9a64523 A |
560 | thread_t thread = current_thread(); |
561 | task_t task = get_threadtask(thread); | |
39236c6e | 562 | |
d9a64523 A |
563 | struct kperf_context ctx = { |
564 | .cur_thread = thread, | |
565 | .cur_task = task, | |
566 | .cur_pid = task_pid(task), | |
567 | .trigger_type = TRIGGER_TYPE_PMI, | |
568 | .trigger_id = 0, | |
569 | }; | |
39236c6e | 570 | |
39037602 | 571 | int r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER); |
39236c6e | 572 | |
39037602 | 573 | BUF_INFO(PERF_KPC_HNDLR | DBG_FUNC_END, r); |
39236c6e A |
574 | } |
575 | ||
576 | ||
3e170ce0 A |
577 | int |
578 | kpc_set_period(uint32_t classes, uint64_t *val) | |
39236c6e | 579 | { |
3e170ce0 A |
580 | struct kpc_config_remote mp_config = { |
581 | .classes = classes, .configv = val, | |
582 | .pmc_mask = kpc_get_configurable_pmc_mask(classes) | |
583 | }; | |
584 | ||
585 | assert(val); | |
586 | ||
587 | /* no clients have the right to modify both classes */ | |
588 | if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) && | |
0a7de745 | 589 | (classes & (KPC_CLASS_POWER_MASK))) { |
3e170ce0 A |
590 | return EPERM; |
591 | } | |
39236c6e A |
592 | |
593 | lck_mtx_lock(&kpc_config_lock); | |
594 | ||
3e170ce0 A |
595 | #ifdef FIXED_COUNTER_SHADOW |
596 | if ((classes & KPC_CLASS_FIXED_MASK) && !kpc_controls_fixed_counters()) { | |
597 | lck_mtx_unlock(&kpc_config_lock); | |
598 | return EPERM; | |
599 | } | |
600 | # else | |
39236c6e A |
601 | if (classes & KPC_CLASS_FIXED_MASK) { |
602 | lck_mtx_unlock(&kpc_config_lock); | |
3e170ce0 | 603 | return EINVAL; |
39236c6e A |
604 | } |
605 | #endif | |
606 | ||
3e170ce0 | 607 | /* translate the power class for the machine layer */ |
0a7de745 | 608 | if (classes & KPC_CLASS_POWER_MASK) { |
3e170ce0 | 609 | mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK; |
0a7de745 | 610 | } |
39236c6e | 611 | |
3e170ce0 | 612 | kprintf("setting period %u\n", classes); |
39236c6e A |
613 | kpc_set_period_arch( &mp_config ); |
614 | ||
615 | lck_mtx_unlock(&kpc_config_lock); | |
616 | ||
617 | return 0; | |
618 | } | |
619 | ||
3e170ce0 A |
620 | int |
621 | kpc_get_period(uint32_t classes, uint64_t *val) | |
39236c6e | 622 | { |
0a7de745 | 623 | uint32_t count = 0; |
3e170ce0 A |
624 | uint64_t pmc_mask = 0ULL; |
625 | ||
626 | assert(val); | |
39236c6e A |
627 | |
628 | lck_mtx_lock(&kpc_config_lock); | |
629 | ||
630 | if (classes & KPC_CLASS_FIXED_MASK) { | |
3e170ce0 | 631 | /* convert reload values to periods */ |
39236c6e | 632 | count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); |
0a7de745 | 633 | for (uint32_t i = 0; i < count; ++i) { |
3e170ce0 | 634 | *val++ = kpc_fixed_max() - FIXED_RELOAD(i); |
0a7de745 | 635 | } |
3e170ce0 | 636 | } |
39236c6e | 637 | |
3e170ce0 A |
638 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { |
639 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); | |
39236c6e | 640 | |
3e170ce0 A |
641 | /* convert reload values to periods */ |
642 | count = kpc_configurable_count(); | |
0a7de745 A |
643 | for (uint32_t i = 0; i < count; ++i) { |
644 | if ((1ULL << i) & pmc_mask) { | |
3e170ce0 | 645 | *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i); |
0a7de745 A |
646 | } |
647 | } | |
39236c6e A |
648 | } |
649 | ||
3e170ce0 A |
650 | if (classes & KPC_CLASS_POWER_MASK) { |
651 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); | |
39236c6e A |
652 | |
653 | /* convert reload values to periods */ | |
3e170ce0 | 654 | count = kpc_configurable_count(); |
0a7de745 A |
655 | for (uint32_t i = 0; i < count; ++i) { |
656 | if ((1ULL << i) & pmc_mask) { | |
3e170ce0 | 657 | *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i); |
0a7de745 A |
658 | } |
659 | } | |
39236c6e A |
660 | } |
661 | ||
662 | lck_mtx_unlock(&kpc_config_lock); | |
663 | ||
664 | return 0; | |
665 | } | |
666 | ||
3e170ce0 A |
667 | int |
668 | kpc_set_actionid(uint32_t classes, uint32_t *val) | |
39236c6e | 669 | { |
3e170ce0 A |
670 | uint32_t count = 0; |
671 | uint64_t pmc_mask = 0ULL; | |
672 | ||
673 | assert(val); | |
39236c6e A |
674 | |
675 | /* NOTE: what happens if a pmi occurs while actionids are being | |
676 | * set is undefined. */ | |
677 | lck_mtx_lock(&kpc_config_lock); | |
678 | ||
679 | if (classes & KPC_CLASS_FIXED_MASK) { | |
680 | count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); | |
0a7de745 | 681 | memcpy(&FIXED_ACTIONID(0), val, count * sizeof(uint32_t)); |
3e170ce0 | 682 | val += count; |
39236c6e A |
683 | } |
684 | ||
685 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { | |
3e170ce0 | 686 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); |
39236c6e | 687 | |
3e170ce0 | 688 | count = kpc_configurable_count(); |
0a7de745 A |
689 | for (uint32_t i = 0; i < count; ++i) { |
690 | if ((1ULL << i) & pmc_mask) { | |
3e170ce0 | 691 | CONFIGURABLE_ACTIONID(i) = *val++; |
0a7de745 A |
692 | } |
693 | } | |
3e170ce0 A |
694 | } |
695 | ||
696 | if (classes & KPC_CLASS_POWER_MASK) { | |
697 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); | |
698 | ||
699 | count = kpc_configurable_count(); | |
0a7de745 A |
700 | for (uint32_t i = 0; i < count; ++i) { |
701 | if ((1ULL << i) & pmc_mask) { | |
3e170ce0 | 702 | CONFIGURABLE_ACTIONID(i) = *val++; |
0a7de745 A |
703 | } |
704 | } | |
39236c6e A |
705 | } |
706 | ||
707 | lck_mtx_unlock(&kpc_config_lock); | |
708 | ||
709 | return 0; | |
710 | } | |
711 | ||
0a7de745 A |
712 | int |
713 | kpc_get_actionid(uint32_t classes, uint32_t *val) | |
39236c6e | 714 | { |
3e170ce0 A |
715 | uint32_t count = 0; |
716 | uint64_t pmc_mask = 0ULL; | |
717 | ||
718 | assert(val); | |
39236c6e A |
719 | |
720 | lck_mtx_lock(&kpc_config_lock); | |
721 | ||
722 | if (classes & KPC_CLASS_FIXED_MASK) { | |
723 | count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); | |
0a7de745 | 724 | memcpy(val, &FIXED_ACTIONID(0), count * sizeof(uint32_t)); |
3e170ce0 | 725 | val += count; |
39236c6e A |
726 | } |
727 | ||
728 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { | |
3e170ce0 | 729 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); |
39236c6e | 730 | |
3e170ce0 | 731 | count = kpc_configurable_count(); |
0a7de745 A |
732 | for (uint32_t i = 0; i < count; ++i) { |
733 | if ((1ULL << i) & pmc_mask) { | |
3e170ce0 | 734 | *val++ = CONFIGURABLE_ACTIONID(i); |
0a7de745 A |
735 | } |
736 | } | |
3e170ce0 A |
737 | } |
738 | ||
739 | if (classes & KPC_CLASS_POWER_MASK) { | |
740 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); | |
741 | ||
742 | count = kpc_configurable_count(); | |
0a7de745 A |
743 | for (uint32_t i = 0; i < count; ++i) { |
744 | if ((1ULL << i) & pmc_mask) { | |
3e170ce0 | 745 | *val++ = CONFIGURABLE_ACTIONID(i); |
0a7de745 A |
746 | } |
747 | } | |
39236c6e A |
748 | } |
749 | ||
750 | lck_mtx_unlock(&kpc_config_lock); | |
751 | ||
752 | return 0; | |
39236c6e | 753 | } |
3e170ce0 A |
754 | |
755 | int | |
756 | kpc_set_running(uint32_t classes) | |
757 | { | |
758 | uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK; | |
759 | struct kpc_running_remote mp_config = { | |
0a7de745 | 760 | .classes = classes, .cfg_target_mask = 0ULL, .cfg_state_mask = 0ULL |
3e170ce0 A |
761 | }; |
762 | ||
763 | /* target all available PMCs */ | |
764 | mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes); | |
765 | ||
766 | /* translate the power class for the machine layer */ | |
0a7de745 | 767 | if (classes & KPC_CLASS_POWER_MASK) { |
3e170ce0 | 768 | mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK; |
0a7de745 | 769 | } |
3e170ce0 A |
770 | |
771 | /* generate the state of each configurable PMCs */ | |
772 | mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes); | |
773 | ||
774 | return kpc_set_running_arch(&mp_config); | |
775 | } | |
776 | ||
777 | boolean_t | |
778 | kpc_register_pm_handler(kpc_pm_handler_t handler) | |
779 | { | |
780 | return kpc_reserve_pm_counters(0x38, handler, TRUE); | |
781 | } | |
782 | ||
783 | boolean_t | |
784 | kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler, | |
0a7de745 | 785 | boolean_t custom_config) |
3e170ce0 A |
786 | { |
787 | uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1; | |
788 | uint64_t req_mask = 0ULL; | |
789 | ||
790 | /* pre-condition */ | |
791 | assert(handler != NULL); | |
792 | assert(kpc_pm_handler == NULL); | |
793 | ||
794 | /* check number of counters requested */ | |
795 | req_mask = (pmc_mask & all_mask); | |
796 | assert(kpc_popcount(req_mask) <= kpc_configurable_count()); | |
797 | ||
798 | /* save the power manager states */ | |
799 | kpc_pm_has_custom_config = custom_config; | |
800 | kpc_pm_pmc_mask = req_mask; | |
801 | kpc_pm_handler = handler; | |
802 | ||
803 | printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n", | |
0a7de745 | 804 | req_mask, custom_config); |
3e170ce0 A |
805 | |
806 | /* post-condition */ | |
807 | { | |
808 | uint32_t cfg_count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK); | |
809 | uint32_t pwr_count = kpc_popcount(kpc_pm_pmc_mask); | |
810 | #pragma unused(cfg_count, pwr_count) | |
811 | assert((cfg_count + pwr_count) == kpc_configurable_count()); | |
812 | } | |
813 | ||
814 | return force_all_ctrs ? FALSE : TRUE; | |
815 | } | |
816 | ||
817 | void | |
818 | kpc_release_pm_counters(void) | |
819 | { | |
820 | /* pre-condition */ | |
821 | assert(kpc_pm_handler != NULL); | |
822 | ||
823 | /* release the counters */ | |
824 | kpc_pm_has_custom_config = FALSE; | |
825 | kpc_pm_pmc_mask = 0ULL; | |
826 | kpc_pm_handler = NULL; | |
827 | ||
828 | printf("kpc: pm released counters\n"); | |
829 | ||
830 | /* post-condition */ | |
831 | assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK) == kpc_configurable_count()); | |
832 | } | |
833 | ||
834 | uint8_t | |
835 | kpc_popcount(uint64_t value) | |
836 | { | |
f427ee49 | 837 | return (uint8_t)__builtin_popcountll(value); |
3e170ce0 A |
838 | } |
839 | ||
840 | uint64_t | |
841 | kpc_get_configurable_pmc_mask(uint32_t classes) | |
842 | { | |
843 | uint32_t configurable_count = kpc_configurable_count(); | |
844 | uint64_t cfg_mask = 0ULL, pwr_mask = 0ULL, all_cfg_pmcs_mask = 0ULL; | |
845 | ||
846 | /* not configurable classes or no configurable counters */ | |
847 | if (((classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) == 0) || | |
0a7de745 | 848 | (configurable_count == 0)) { |
3e170ce0 A |
849 | goto exit; |
850 | } | |
851 | ||
852 | assert(configurable_count < 64); | |
853 | all_cfg_pmcs_mask = (1ULL << configurable_count) - 1; | |
854 | ||
855 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { | |
0a7de745 | 856 | if (force_all_ctrs == TRUE) { |
3e170ce0 | 857 | cfg_mask |= all_cfg_pmcs_mask; |
0a7de745 | 858 | } else { |
3e170ce0 | 859 | cfg_mask |= (~kpc_pm_pmc_mask) & all_cfg_pmcs_mask; |
0a7de745 | 860 | } |
3e170ce0 A |
861 | } |
862 | ||
863 | /* | |
864 | * The power class exists iff: | |
0a7de745 A |
865 | * - No tasks acquired all PMCs |
866 | * - PM registered and uses kpc to interact with PMCs | |
3e170ce0 A |
867 | */ |
868 | if ((force_all_ctrs == FALSE) && | |
869 | (kpc_pm_handler != NULL) && | |
870 | (kpc_pm_has_custom_config == FALSE) && | |
0a7de745 | 871 | (classes & KPC_CLASS_POWER_MASK)) { |
3e170ce0 A |
872 | pwr_mask |= kpc_pm_pmc_mask & all_cfg_pmcs_mask; |
873 | } | |
874 | ||
875 | exit: | |
876 | /* post-conditions */ | |
0a7de745 A |
877 | assert(((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 ); |
878 | assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count()); | |
879 | assert((cfg_mask & pwr_mask) == 0ULL ); | |
3e170ce0 A |
880 | |
881 | return cfg_mask | pwr_mask; | |
882 | } |