]>
Commit | Line | Data |
---|---|---|
39236c6e A |
1 | /* |
2 | * Copyright (c) 2012 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
0a7de745 | 5 | * |
39236c6e A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
39236c6e A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
39236c6e A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
39236c6e A |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ | |
28 | ||
29 | #include <mach/mach_types.h> | |
30 | #include <machine/machine_routines.h> | |
31 | #include <kern/processor.h> | |
32 | #include <kern/kalloc.h> | |
33 | #include <sys/errno.h> | |
39037602 | 34 | #include <sys/vm.h> |
39236c6e A |
35 | #include <kperf/buffer.h> |
36 | #include <kern/thread.h> | |
5ba3f43e A |
37 | #if defined(__arm64__) || defined(__arm__) |
38 | #include <arm/cpu_data_internal.h> | |
39 | #endif | |
39236c6e A |
40 | |
41 | #include <kern/kpc.h> | |
42 | ||
43 | #include <kperf/kperf.h> | |
44 | #include <kperf/sample.h> | |
45 | #include <kperf/context.h> | |
46 | #include <kperf/action.h> | |
47 | ||
39236c6e A |
48 | uint32_t kpc_actionid[KPC_MAX_COUNTERS]; |
49 | ||
3e170ce0 A |
50 | #define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t)) |
51 | #define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \ | |
0a7de745 | 52 | COUNTERBUF_SIZE_PER_CPU) |
3e170ce0 | 53 | |
39236c6e A |
54 | /* locks */ |
55 | static lck_grp_attr_t *kpc_config_lckgrp_attr = NULL; | |
56 | static lck_grp_t *kpc_config_lckgrp = NULL; | |
57 | static lck_mtx_t kpc_config_lock; | |
58 | ||
fe8ab488 A |
59 | /* state specifying if all counters have been requested by kperf */ |
60 | static boolean_t force_all_ctrs = FALSE; | |
61 | ||
3e170ce0 | 62 | /* power manager */ |
5ba3f43e A |
63 | static kpc_pm_handler_t kpc_pm_handler; |
64 | static boolean_t kpc_pm_has_custom_config; | |
65 | static uint64_t kpc_pm_pmc_mask; | |
66 | #if MACH_ASSERT | |
67 | static bool kpc_calling_pm = false; | |
68 | #endif /* MACH_ASSERT */ | |
fe8ab488 | 69 | |
39037602 | 70 | boolean_t kpc_context_switch_active = FALSE; |
d9a64523 | 71 | bool kpc_supported = true; |
39037602 | 72 | |
39236c6e | 73 | void |
fe8ab488 | 74 | kpc_common_init(void) |
39236c6e A |
75 | { |
76 | kpc_config_lckgrp_attr = lck_grp_attr_alloc_init(); | |
77 | kpc_config_lckgrp = lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr); | |
78 | lck_mtx_init(&kpc_config_lock, kpc_config_lckgrp, LCK_ATTR_NULL); | |
79 | } | |
80 | ||
3e170ce0 A |
81 | boolean_t |
82 | kpc_register_cpu(struct cpu_data *cpu_data) | |
83 | { | |
84 | assert(cpu_data); | |
85 | assert(cpu_data->cpu_kpc_buf[0] == NULL); | |
86 | assert(cpu_data->cpu_kpc_buf[1] == NULL); | |
87 | assert(cpu_data->cpu_kpc_shadow == NULL); | |
88 | assert(cpu_data->cpu_kpc_reload == NULL); | |
89 | ||
90 | /* | |
91 | * Buffers allocated through kpc_counterbuf_alloc() are large enough to | |
92 | * store all PMCs values from all CPUs. This mimics the userspace API. | |
93 | * This does not suit well with the per-CPU kpc buffers, since: | |
0a7de745 A |
94 | * 1. Buffers don't need to be this large. |
95 | * 2. The actual number of CPUs is not known at this point. | |
3e170ce0 A |
96 | * |
97 | * CPUs are asked to callout into kpc when being registered, we'll | |
98 | * allocate the memory here. | |
99 | */ | |
100 | ||
0a7de745 | 101 | if ((cpu_data->cpu_kpc_buf[0] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) { |
3e170ce0 | 102 | goto error; |
0a7de745 A |
103 | } |
104 | if ((cpu_data->cpu_kpc_buf[1] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) { | |
3e170ce0 | 105 | goto error; |
0a7de745 A |
106 | } |
107 | if ((cpu_data->cpu_kpc_shadow = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) { | |
3e170ce0 | 108 | goto error; |
0a7de745 A |
109 | } |
110 | if ((cpu_data->cpu_kpc_reload = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) { | |
3e170ce0 | 111 | goto error; |
0a7de745 | 112 | } |
3e170ce0 A |
113 | |
114 | memset(cpu_data->cpu_kpc_buf[0], 0, COUNTERBUF_SIZE_PER_CPU); | |
115 | memset(cpu_data->cpu_kpc_buf[1], 0, COUNTERBUF_SIZE_PER_CPU); | |
116 | memset(cpu_data->cpu_kpc_shadow, 0, COUNTERBUF_SIZE_PER_CPU); | |
117 | memset(cpu_data->cpu_kpc_reload, 0, COUNTERBUF_SIZE_PER_CPU); | |
118 | ||
119 | /* success */ | |
120 | return TRUE; | |
121 | ||
122 | error: | |
39037602 | 123 | kpc_unregister_cpu(cpu_data); |
3e170ce0 A |
124 | return FALSE; |
125 | } | |
126 | ||
39037602 A |
127 | void |
128 | kpc_unregister_cpu(struct cpu_data *cpu_data) | |
129 | { | |
130 | assert(cpu_data); | |
131 | if (cpu_data->cpu_kpc_buf[0] != NULL) { | |
132 | kfree(cpu_data->cpu_kpc_buf[0], COUNTERBUF_SIZE_PER_CPU); | |
133 | cpu_data->cpu_kpc_buf[0] = NULL; | |
134 | } | |
135 | if (cpu_data->cpu_kpc_buf[1] != NULL) { | |
136 | kfree(cpu_data->cpu_kpc_buf[1], COUNTERBUF_SIZE_PER_CPU); | |
137 | cpu_data->cpu_kpc_buf[1] = NULL; | |
138 | } | |
139 | if (cpu_data->cpu_kpc_shadow != NULL) { | |
140 | kfree(cpu_data->cpu_kpc_shadow, COUNTERBUF_SIZE_PER_CPU); | |
141 | cpu_data->cpu_kpc_shadow = NULL; | |
142 | } | |
0a7de745 | 143 | if (cpu_data->cpu_kpc_reload != NULL) { |
39037602 A |
144 | kfree(cpu_data->cpu_kpc_reload, COUNTERBUF_SIZE_PER_CPU); |
145 | cpu_data->cpu_kpc_reload = NULL; | |
146 | } | |
147 | } | |
148 | ||
149 | ||
fe8ab488 A |
150 | static void |
151 | kpc_task_set_forced_all_ctrs(task_t task, boolean_t state) | |
152 | { | |
153 | assert(task); | |
154 | ||
155 | task_lock(task); | |
0a7de745 | 156 | if (state) { |
a39ff7e2 | 157 | task->t_kpc |= TASK_KPC_FORCED_ALL_CTRS; |
0a7de745 | 158 | } else { |
a39ff7e2 | 159 | task->t_kpc &= ~TASK_KPC_FORCED_ALL_CTRS; |
0a7de745 | 160 | } |
fe8ab488 A |
161 | task_unlock(task); |
162 | } | |
163 | ||
164 | static boolean_t | |
165 | kpc_task_get_forced_all_ctrs(task_t task) | |
166 | { | |
167 | assert(task); | |
a39ff7e2 | 168 | return task->t_kpc & TASK_KPC_FORCED_ALL_CTRS ? TRUE : FALSE; |
fe8ab488 A |
169 | } |
170 | ||
171 | int | |
172 | kpc_force_all_ctrs(task_t task, int val) | |
173 | { | |
3e170ce0 A |
174 | boolean_t new_state = val ? TRUE : FALSE; |
175 | boolean_t old_state = kpc_get_force_all_ctrs(); | |
fe8ab488 A |
176 | |
177 | /* | |
178 | * Refuse to do the operation if the counters are already forced by | |
179 | * another task. | |
180 | */ | |
0a7de745 | 181 | if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task)) { |
fe8ab488 | 182 | return EACCES; |
0a7de745 | 183 | } |
fe8ab488 A |
184 | |
185 | /* nothing to do if the state is not changing */ | |
0a7de745 | 186 | if (old_state == new_state) { |
fe8ab488 | 187 | return 0; |
0a7de745 | 188 | } |
fe8ab488 | 189 | |
fe8ab488 | 190 | /* notify the power manager */ |
5ba3f43e A |
191 | if (kpc_pm_handler) { |
192 | #if MACH_ASSERT | |
193 | kpc_calling_pm = true; | |
194 | #endif /* MACH_ASSERT */ | |
3e170ce0 | 195 | kpc_pm_handler( new_state ? FALSE : TRUE ); |
5ba3f43e A |
196 | #if MACH_ASSERT |
197 | kpc_calling_pm = false; | |
198 | #endif /* MACH_ASSERT */ | |
199 | } | |
fe8ab488 | 200 | |
5ba3f43e A |
201 | /* |
202 | * This is a force -- ensure that counters are forced, even if power | |
203 | * management fails to acknowledge it. | |
204 | */ | |
205 | if (force_all_ctrs != new_state) { | |
206 | force_all_ctrs = new_state; | |
207 | } | |
fe8ab488 | 208 | |
5ba3f43e A |
209 | /* update the task bits */ |
210 | kpc_task_set_forced_all_ctrs(task, new_state); | |
fe8ab488 A |
211 | |
212 | return 0; | |
213 | } | |
214 | ||
5ba3f43e A |
215 | void |
216 | kpc_pm_acknowledge(boolean_t available_to_pm) | |
217 | { | |
218 | /* | |
219 | * Force-all-counters should still be true when the counters are being | |
220 | * made available to power management and false when counters are going | |
221 | * to be taken away. | |
222 | */ | |
223 | assert(force_all_ctrs == available_to_pm); | |
224 | /* | |
225 | * Make sure power management isn't playing games with us. | |
226 | */ | |
227 | assert(kpc_calling_pm == true); | |
228 | ||
229 | /* | |
230 | * Counters being available means no one is forcing all counters. | |
231 | */ | |
232 | force_all_ctrs = available_to_pm ? FALSE : TRUE; | |
233 | } | |
234 | ||
fe8ab488 A |
235 | int |
236 | kpc_get_force_all_ctrs(void) | |
237 | { | |
238 | return force_all_ctrs; | |
239 | } | |
240 | ||
241 | boolean_t | |
3e170ce0 | 242 | kpc_multiple_clients(void) |
fe8ab488 | 243 | { |
3e170ce0 | 244 | return kpc_pm_handler != NULL; |
fe8ab488 A |
245 | } |
246 | ||
247 | boolean_t | |
3e170ce0 | 248 | kpc_controls_fixed_counters(void) |
fe8ab488 | 249 | { |
3e170ce0 | 250 | return !kpc_pm_handler || force_all_ctrs || !kpc_pm_has_custom_config; |
fe8ab488 A |
251 | } |
252 | ||
253 | boolean_t | |
3e170ce0 | 254 | kpc_controls_counter(uint32_t ctr) |
fe8ab488 | 255 | { |
3e170ce0 A |
256 | uint64_t pmc_mask = 0ULL; |
257 | ||
258 | assert(ctr < (kpc_fixed_count() + kpc_configurable_count())); | |
259 | ||
0a7de745 | 260 | if (ctr < kpc_fixed_count()) { |
3e170ce0 | 261 | return kpc_controls_fixed_counters(); |
0a7de745 | 262 | } |
3e170ce0 A |
263 | |
264 | /* | |
265 | * By default kpc manages all PMCs, but if the Power Manager registered | |
266 | * with custom_config=TRUE, the Power Manager manages its reserved PMCs. | |
267 | * However, kpc takes ownership back if a task acquired all PMCs via | |
268 | * force_all_ctrs. | |
269 | */ | |
270 | pmc_mask = (1ULL << (ctr - kpc_fixed_count())); | |
0a7de745 | 271 | if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs) { |
3e170ce0 | 272 | return FALSE; |
0a7de745 | 273 | } |
3e170ce0 A |
274 | |
275 | return TRUE; | |
fe8ab488 A |
276 | } |
277 | ||
39236c6e A |
278 | uint32_t |
279 | kpc_get_running(void) | |
280 | { | |
3e170ce0 | 281 | uint64_t pmc_mask = 0; |
39236c6e A |
282 | uint32_t cur_state = 0; |
283 | ||
0a7de745 | 284 | if (kpc_is_running_fixed()) { |
39236c6e | 285 | cur_state |= KPC_CLASS_FIXED_MASK; |
0a7de745 | 286 | } |
39236c6e | 287 | |
3e170ce0 | 288 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); |
0a7de745 | 289 | if (kpc_is_running_configurable(pmc_mask)) { |
39236c6e | 290 | cur_state |= KPC_CLASS_CONFIGURABLE_MASK; |
0a7de745 | 291 | } |
39236c6e | 292 | |
3e170ce0 | 293 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); |
0a7de745 | 294 | if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask)) { |
3e170ce0 | 295 | cur_state |= KPC_CLASS_POWER_MASK; |
0a7de745 | 296 | } |
3e170ce0 | 297 | |
39236c6e A |
298 | return cur_state; |
299 | } | |
300 | ||
3e170ce0 | 301 | /* may be called from an IPI */ |
39236c6e | 302 | int |
3e170ce0 | 303 | kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf) |
39236c6e | 304 | { |
0a7de745 | 305 | int enabled = 0, offset = 0; |
3e170ce0 | 306 | uint64_t pmc_mask = 0ULL; |
39236c6e | 307 | |
3e170ce0 | 308 | assert(buf); |
39236c6e | 309 | |
39236c6e A |
310 | enabled = ml_set_interrupts_enabled(FALSE); |
311 | ||
3e170ce0 | 312 | /* grab counters and CPU number as close as possible */ |
0a7de745 | 313 | if (curcpu) { |
39236c6e | 314 | *curcpu = current_processor()->cpu_id; |
0a7de745 | 315 | } |
39236c6e | 316 | |
3e170ce0 A |
317 | if (classes & KPC_CLASS_FIXED_MASK) { |
318 | kpc_get_fixed_counters(&buf[offset]); | |
39236c6e A |
319 | offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK); |
320 | } | |
321 | ||
3e170ce0 A |
322 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { |
323 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); | |
324 | kpc_get_configurable_counters(&buf[offset], pmc_mask); | |
325 | offset += kpc_popcount(pmc_mask); | |
326 | } | |
39236c6e | 327 | |
3e170ce0 A |
328 | if (classes & KPC_CLASS_POWER_MASK) { |
329 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); | |
330 | kpc_get_configurable_counters(&buf[offset], pmc_mask); | |
331 | offset += kpc_popcount(pmc_mask); | |
39236c6e A |
332 | } |
333 | ||
334 | ml_set_interrupts_enabled(enabled); | |
335 | ||
336 | return offset; | |
337 | } | |
338 | ||
3e170ce0 A |
339 | /* generic counter reading function, public api */ |
340 | int | |
341 | kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes, | |
0a7de745 | 342 | int *curcpu, uint64_t *buf) |
3e170ce0 A |
343 | { |
344 | assert(buf); | |
345 | ||
346 | /* | |
347 | * Unlike reading the current CPU counters, reading counters from all | |
348 | * CPUs is architecture dependent. This allows kpc to make the most of | |
349 | * the platform if memory mapped registers is supported. | |
350 | */ | |
0a7de745 | 351 | if (all_cpus) { |
3e170ce0 | 352 | return kpc_get_all_cpus_counters(classes, curcpu, buf); |
0a7de745 | 353 | } else { |
3e170ce0 | 354 | return kpc_get_curcpu_counters(classes, curcpu, buf); |
0a7de745 | 355 | } |
3e170ce0 A |
356 | } |
357 | ||
39236c6e | 358 | int |
3e170ce0 | 359 | kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes, |
0a7de745 | 360 | int *curcpu, uint64_t *buf) |
39236c6e | 361 | { |
3e170ce0 A |
362 | int curcpu_id = current_processor()->cpu_id; |
363 | uint32_t cfg_count = kpc_configurable_count(), offset = 0; | |
364 | uint64_t pmc_mask = 0ULL; | |
365 | boolean_t enabled; | |
39236c6e | 366 | |
3e170ce0 | 367 | assert(buf); |
39236c6e A |
368 | |
369 | enabled = ml_set_interrupts_enabled(FALSE); | |
370 | ||
3e170ce0 | 371 | curcpu_id = current_processor()->cpu_id; |
0a7de745 | 372 | if (curcpu) { |
3e170ce0 | 373 | *curcpu = curcpu_id; |
0a7de745 | 374 | } |
39236c6e | 375 | |
3e170ce0 A |
376 | for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) { |
377 | /* filter if the caller did not request all cpus */ | |
0a7de745 | 378 | if (!all_cpus && (cpu != curcpu_id)) { |
3e170ce0 | 379 | continue; |
0a7de745 | 380 | } |
39236c6e | 381 | |
3e170ce0 A |
382 | if (classes & KPC_CLASS_FIXED_MASK) { |
383 | uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); | |
384 | memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t)); | |
385 | offset += count; | |
386 | } | |
39236c6e | 387 | |
3e170ce0 A |
388 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { |
389 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); | |
39236c6e | 390 | |
0a7de745 A |
391 | for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) { |
392 | if ((1ULL << cfg_ctr) & pmc_mask) { | |
3e170ce0 | 393 | buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr); |
0a7de745 A |
394 | } |
395 | } | |
3e170ce0 | 396 | } |
39236c6e | 397 | |
3e170ce0 A |
398 | if (classes & KPC_CLASS_POWER_MASK) { |
399 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); | |
39236c6e | 400 | |
0a7de745 A |
401 | for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) { |
402 | if ((1ULL << cfg_ctr) & pmc_mask) { | |
3e170ce0 | 403 | buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr); |
0a7de745 A |
404 | } |
405 | } | |
3e170ce0 | 406 | } |
39236c6e A |
407 | } |
408 | ||
409 | ml_set_interrupts_enabled(enabled); | |
410 | ||
411 | return offset; | |
412 | } | |
413 | ||
414 | uint32_t | |
415 | kpc_get_counter_count(uint32_t classes) | |
416 | { | |
3e170ce0 | 417 | uint32_t count = 0; |
39236c6e | 418 | |
0a7de745 | 419 | if (classes & KPC_CLASS_FIXED_MASK) { |
39236c6e | 420 | count += kpc_fixed_count(); |
0a7de745 | 421 | } |
39236c6e | 422 | |
3e170ce0 A |
423 | if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) { |
424 | uint64_t pmc_msk = kpc_get_configurable_pmc_mask(classes); | |
425 | uint32_t pmc_cnt = kpc_popcount(pmc_msk); | |
426 | count += pmc_cnt; | |
427 | } | |
39236c6e A |
428 | |
429 | return count; | |
430 | } | |
431 | ||
432 | uint32_t | |
433 | kpc_get_config_count(uint32_t classes) | |
434 | { | |
3e170ce0 | 435 | uint32_t count = 0; |
39236c6e | 436 | |
0a7de745 | 437 | if (classes & KPC_CLASS_FIXED_MASK) { |
39236c6e | 438 | count += kpc_fixed_config_count(); |
0a7de745 | 439 | } |
39236c6e | 440 | |
3e170ce0 A |
441 | if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) { |
442 | uint64_t pmc_mask = kpc_get_configurable_pmc_mask(classes); | |
443 | count += kpc_configurable_config_count(pmc_mask); | |
444 | } | |
39236c6e | 445 | |
0a7de745 | 446 | if ((classes & KPC_CLASS_RAWPMU_MASK) && !kpc_multiple_clients()) { |
fe8ab488 | 447 | count += kpc_rawpmu_config_count(); |
0a7de745 | 448 | } |
fe8ab488 | 449 | |
39236c6e A |
450 | return count; |
451 | } | |
452 | ||
453 | int | |
454 | kpc_get_config(uint32_t classes, kpc_config_t *current_config) | |
455 | { | |
3e170ce0 | 456 | uint32_t count = 0; |
39236c6e | 457 | |
3e170ce0 A |
458 | assert(current_config); |
459 | ||
460 | if (classes & KPC_CLASS_FIXED_MASK) { | |
39236c6e A |
461 | kpc_get_fixed_config(¤t_config[count]); |
462 | count += kpc_get_config_count(KPC_CLASS_FIXED_MASK); | |
463 | } | |
464 | ||
3e170ce0 A |
465 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { |
466 | uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); | |
467 | kpc_get_configurable_config(¤t_config[count], pmc_mask); | |
39236c6e A |
468 | count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK); |
469 | } | |
470 | ||
3e170ce0 A |
471 | if (classes & KPC_CLASS_POWER_MASK) { |
472 | uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); | |
473 | kpc_get_configurable_config(¤t_config[count], pmc_mask); | |
474 | count += kpc_get_config_count(KPC_CLASS_POWER_MASK); | |
475 | } | |
476 | ||
0a7de745 | 477 | if (classes & KPC_CLASS_RAWPMU_MASK) { |
fe8ab488 A |
478 | // Client shouldn't ask for config words that aren't available. |
479 | // Most likely, they'd misinterpret the returned buffer if we | |
480 | // allowed this. | |
0a7de745 | 481 | if (kpc_multiple_clients()) { |
fe8ab488 A |
482 | return EPERM; |
483 | } | |
484 | kpc_get_rawpmu_config(¤t_config[count]); | |
485 | count += kpc_get_config_count(KPC_CLASS_RAWPMU_MASK); | |
486 | } | |
487 | ||
39236c6e A |
488 | return 0; |
489 | } | |
490 | ||
491 | int | |
492 | kpc_set_config(uint32_t classes, kpc_config_t *configv) | |
493 | { | |
4bd07ac2 | 494 | int ret = 0; |
3e170ce0 A |
495 | struct kpc_config_remote mp_config = { |
496 | .classes = classes, .configv = configv, | |
497 | .pmc_mask = kpc_get_configurable_pmc_mask(classes) | |
498 | }; | |
499 | ||
500 | assert(configv); | |
501 | ||
502 | /* don't allow RAWPMU configuration when sharing counters */ | |
503 | if ((classes & KPC_CLASS_RAWPMU_MASK) && kpc_multiple_clients()) { | |
504 | return EPERM; | |
505 | } | |
39236c6e | 506 | |
3e170ce0 A |
507 | /* no clients have the right to modify both classes */ |
508 | if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) && | |
0a7de745 | 509 | (classes & (KPC_CLASS_POWER_MASK))) { |
fe8ab488 A |
510 | return EPERM; |
511 | } | |
512 | ||
39236c6e A |
513 | lck_mtx_lock(&kpc_config_lock); |
514 | ||
3e170ce0 | 515 | /* translate the power class for the machine layer */ |
0a7de745 | 516 | if (classes & KPC_CLASS_POWER_MASK) { |
3e170ce0 | 517 | mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK; |
0a7de745 | 518 | } |
39236c6e | 519 | |
4bd07ac2 | 520 | ret = kpc_set_config_arch( &mp_config ); |
39236c6e A |
521 | |
522 | lck_mtx_unlock(&kpc_config_lock); | |
523 | ||
4bd07ac2 | 524 | return ret; |
39236c6e A |
525 | } |
526 | ||
d9a64523 A |
527 | uint32_t |
528 | kpc_get_counterbuf_size(void) | |
529 | { | |
530 | return COUNTERBUF_SIZE; | |
531 | } | |
532 | ||
3e170ce0 | 533 | /* allocate a buffer large enough for all possible counters */ |
39236c6e A |
534 | uint64_t * |
535 | kpc_counterbuf_alloc(void) | |
536 | { | |
3e170ce0 | 537 | uint64_t *buf = NULL; |
39236c6e | 538 | |
d9a64523 | 539 | buf = kalloc_tag(COUNTERBUF_SIZE, VM_KERN_MEMORY_DIAG); |
3e170ce0 A |
540 | if (buf) { |
541 | bzero(buf, COUNTERBUF_SIZE); | |
542 | } | |
39236c6e A |
543 | |
544 | return buf; | |
545 | } | |
546 | ||
547 | void | |
548 | kpc_counterbuf_free(uint64_t *buf) | |
549 | { | |
3e170ce0 A |
550 | if (buf) { |
551 | kfree(buf, COUNTERBUF_SIZE); | |
552 | } | |
39236c6e A |
553 | } |
554 | ||
3e170ce0 A |
555 | void |
556 | kpc_sample_kperf(uint32_t actionid) | |
39236c6e A |
557 | { |
558 | struct kperf_sample sbuf; | |
39236c6e | 559 | |
39037602 | 560 | BUF_DATA(PERF_KPC_HNDLR | DBG_FUNC_START); |
39236c6e | 561 | |
d9a64523 A |
562 | thread_t thread = current_thread(); |
563 | task_t task = get_threadtask(thread); | |
39236c6e | 564 | |
d9a64523 A |
565 | struct kperf_context ctx = { |
566 | .cur_thread = thread, | |
567 | .cur_task = task, | |
568 | .cur_pid = task_pid(task), | |
569 | .trigger_type = TRIGGER_TYPE_PMI, | |
570 | .trigger_id = 0, | |
571 | }; | |
39236c6e | 572 | |
39037602 | 573 | int r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER); |
39236c6e | 574 | |
39037602 | 575 | BUF_INFO(PERF_KPC_HNDLR | DBG_FUNC_END, r); |
39236c6e A |
576 | } |
577 | ||
578 | ||
3e170ce0 A |
579 | int |
580 | kpc_set_period(uint32_t classes, uint64_t *val) | |
39236c6e | 581 | { |
3e170ce0 A |
582 | struct kpc_config_remote mp_config = { |
583 | .classes = classes, .configv = val, | |
584 | .pmc_mask = kpc_get_configurable_pmc_mask(classes) | |
585 | }; | |
586 | ||
587 | assert(val); | |
588 | ||
589 | /* no clients have the right to modify both classes */ | |
590 | if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) && | |
0a7de745 | 591 | (classes & (KPC_CLASS_POWER_MASK))) { |
3e170ce0 A |
592 | return EPERM; |
593 | } | |
39236c6e A |
594 | |
595 | lck_mtx_lock(&kpc_config_lock); | |
596 | ||
3e170ce0 A |
597 | #ifdef FIXED_COUNTER_SHADOW |
598 | if ((classes & KPC_CLASS_FIXED_MASK) && !kpc_controls_fixed_counters()) { | |
599 | lck_mtx_unlock(&kpc_config_lock); | |
600 | return EPERM; | |
601 | } | |
602 | # else | |
39236c6e A |
603 | if (classes & KPC_CLASS_FIXED_MASK) { |
604 | lck_mtx_unlock(&kpc_config_lock); | |
3e170ce0 | 605 | return EINVAL; |
39236c6e A |
606 | } |
607 | #endif | |
608 | ||
3e170ce0 | 609 | /* translate the power class for the machine layer */ |
0a7de745 | 610 | if (classes & KPC_CLASS_POWER_MASK) { |
3e170ce0 | 611 | mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK; |
0a7de745 | 612 | } |
39236c6e | 613 | |
3e170ce0 | 614 | kprintf("setting period %u\n", classes); |
39236c6e A |
615 | kpc_set_period_arch( &mp_config ); |
616 | ||
617 | lck_mtx_unlock(&kpc_config_lock); | |
618 | ||
619 | return 0; | |
620 | } | |
621 | ||
3e170ce0 A |
622 | int |
623 | kpc_get_period(uint32_t classes, uint64_t *val) | |
39236c6e | 624 | { |
0a7de745 | 625 | uint32_t count = 0; |
3e170ce0 A |
626 | uint64_t pmc_mask = 0ULL; |
627 | ||
628 | assert(val); | |
39236c6e A |
629 | |
630 | lck_mtx_lock(&kpc_config_lock); | |
631 | ||
632 | if (classes & KPC_CLASS_FIXED_MASK) { | |
3e170ce0 | 633 | /* convert reload values to periods */ |
39236c6e | 634 | count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); |
0a7de745 | 635 | for (uint32_t i = 0; i < count; ++i) { |
3e170ce0 | 636 | *val++ = kpc_fixed_max() - FIXED_RELOAD(i); |
0a7de745 | 637 | } |
3e170ce0 | 638 | } |
39236c6e | 639 | |
3e170ce0 A |
640 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { |
641 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); | |
39236c6e | 642 | |
3e170ce0 A |
643 | /* convert reload values to periods */ |
644 | count = kpc_configurable_count(); | |
0a7de745 A |
645 | for (uint32_t i = 0; i < count; ++i) { |
646 | if ((1ULL << i) & pmc_mask) { | |
3e170ce0 | 647 | *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i); |
0a7de745 A |
648 | } |
649 | } | |
39236c6e A |
650 | } |
651 | ||
3e170ce0 A |
652 | if (classes & KPC_CLASS_POWER_MASK) { |
653 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); | |
39236c6e A |
654 | |
655 | /* convert reload values to periods */ | |
3e170ce0 | 656 | count = kpc_configurable_count(); |
0a7de745 A |
657 | for (uint32_t i = 0; i < count; ++i) { |
658 | if ((1ULL << i) & pmc_mask) { | |
3e170ce0 | 659 | *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i); |
0a7de745 A |
660 | } |
661 | } | |
39236c6e A |
662 | } |
663 | ||
664 | lck_mtx_unlock(&kpc_config_lock); | |
665 | ||
666 | return 0; | |
667 | } | |
668 | ||
3e170ce0 A |
669 | int |
670 | kpc_set_actionid(uint32_t classes, uint32_t *val) | |
39236c6e | 671 | { |
3e170ce0 A |
672 | uint32_t count = 0; |
673 | uint64_t pmc_mask = 0ULL; | |
674 | ||
675 | assert(val); | |
39236c6e A |
676 | |
677 | /* NOTE: what happens if a pmi occurs while actionids are being | |
678 | * set is undefined. */ | |
679 | lck_mtx_lock(&kpc_config_lock); | |
680 | ||
681 | if (classes & KPC_CLASS_FIXED_MASK) { | |
682 | count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); | |
0a7de745 | 683 | memcpy(&FIXED_ACTIONID(0), val, count * sizeof(uint32_t)); |
3e170ce0 | 684 | val += count; |
39236c6e A |
685 | } |
686 | ||
687 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { | |
3e170ce0 | 688 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); |
39236c6e | 689 | |
3e170ce0 | 690 | count = kpc_configurable_count(); |
0a7de745 A |
691 | for (uint32_t i = 0; i < count; ++i) { |
692 | if ((1ULL << i) & pmc_mask) { | |
3e170ce0 | 693 | CONFIGURABLE_ACTIONID(i) = *val++; |
0a7de745 A |
694 | } |
695 | } | |
3e170ce0 A |
696 | } |
697 | ||
698 | if (classes & KPC_CLASS_POWER_MASK) { | |
699 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); | |
700 | ||
701 | count = kpc_configurable_count(); | |
0a7de745 A |
702 | for (uint32_t i = 0; i < count; ++i) { |
703 | if ((1ULL << i) & pmc_mask) { | |
3e170ce0 | 704 | CONFIGURABLE_ACTIONID(i) = *val++; |
0a7de745 A |
705 | } |
706 | } | |
39236c6e A |
707 | } |
708 | ||
709 | lck_mtx_unlock(&kpc_config_lock); | |
710 | ||
711 | return 0; | |
712 | } | |
713 | ||
0a7de745 A |
714 | int |
715 | kpc_get_actionid(uint32_t classes, uint32_t *val) | |
39236c6e | 716 | { |
3e170ce0 A |
717 | uint32_t count = 0; |
718 | uint64_t pmc_mask = 0ULL; | |
719 | ||
720 | assert(val); | |
39236c6e A |
721 | |
722 | lck_mtx_lock(&kpc_config_lock); | |
723 | ||
724 | if (classes & KPC_CLASS_FIXED_MASK) { | |
725 | count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); | |
0a7de745 | 726 | memcpy(val, &FIXED_ACTIONID(0), count * sizeof(uint32_t)); |
3e170ce0 | 727 | val += count; |
39236c6e A |
728 | } |
729 | ||
730 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { | |
3e170ce0 | 731 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); |
39236c6e | 732 | |
3e170ce0 | 733 | count = kpc_configurable_count(); |
0a7de745 A |
734 | for (uint32_t i = 0; i < count; ++i) { |
735 | if ((1ULL << i) & pmc_mask) { | |
3e170ce0 | 736 | *val++ = CONFIGURABLE_ACTIONID(i); |
0a7de745 A |
737 | } |
738 | } | |
3e170ce0 A |
739 | } |
740 | ||
741 | if (classes & KPC_CLASS_POWER_MASK) { | |
742 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); | |
743 | ||
744 | count = kpc_configurable_count(); | |
0a7de745 A |
745 | for (uint32_t i = 0; i < count; ++i) { |
746 | if ((1ULL << i) & pmc_mask) { | |
3e170ce0 | 747 | *val++ = CONFIGURABLE_ACTIONID(i); |
0a7de745 A |
748 | } |
749 | } | |
39236c6e A |
750 | } |
751 | ||
752 | lck_mtx_unlock(&kpc_config_lock); | |
753 | ||
754 | return 0; | |
39236c6e | 755 | } |
3e170ce0 A |
756 | |
757 | int | |
758 | kpc_set_running(uint32_t classes) | |
759 | { | |
760 | uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK; | |
761 | struct kpc_running_remote mp_config = { | |
0a7de745 | 762 | .classes = classes, .cfg_target_mask = 0ULL, .cfg_state_mask = 0ULL |
3e170ce0 A |
763 | }; |
764 | ||
765 | /* target all available PMCs */ | |
766 | mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes); | |
767 | ||
768 | /* translate the power class for the machine layer */ | |
0a7de745 | 769 | if (classes & KPC_CLASS_POWER_MASK) { |
3e170ce0 | 770 | mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK; |
0a7de745 | 771 | } |
3e170ce0 A |
772 | |
773 | /* generate the state of each configurable PMCs */ | |
774 | mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes); | |
775 | ||
776 | return kpc_set_running_arch(&mp_config); | |
777 | } | |
778 | ||
779 | boolean_t | |
780 | kpc_register_pm_handler(kpc_pm_handler_t handler) | |
781 | { | |
782 | return kpc_reserve_pm_counters(0x38, handler, TRUE); | |
783 | } | |
784 | ||
785 | boolean_t | |
786 | kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler, | |
0a7de745 | 787 | boolean_t custom_config) |
3e170ce0 A |
788 | { |
789 | uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1; | |
790 | uint64_t req_mask = 0ULL; | |
791 | ||
792 | /* pre-condition */ | |
793 | assert(handler != NULL); | |
794 | assert(kpc_pm_handler == NULL); | |
795 | ||
796 | /* check number of counters requested */ | |
797 | req_mask = (pmc_mask & all_mask); | |
798 | assert(kpc_popcount(req_mask) <= kpc_configurable_count()); | |
799 | ||
800 | /* save the power manager states */ | |
801 | kpc_pm_has_custom_config = custom_config; | |
802 | kpc_pm_pmc_mask = req_mask; | |
803 | kpc_pm_handler = handler; | |
804 | ||
805 | printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n", | |
0a7de745 | 806 | req_mask, custom_config); |
3e170ce0 A |
807 | |
808 | /* post-condition */ | |
809 | { | |
810 | uint32_t cfg_count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK); | |
811 | uint32_t pwr_count = kpc_popcount(kpc_pm_pmc_mask); | |
812 | #pragma unused(cfg_count, pwr_count) | |
813 | assert((cfg_count + pwr_count) == kpc_configurable_count()); | |
814 | } | |
815 | ||
816 | return force_all_ctrs ? FALSE : TRUE; | |
817 | } | |
818 | ||
819 | void | |
820 | kpc_release_pm_counters(void) | |
821 | { | |
822 | /* pre-condition */ | |
823 | assert(kpc_pm_handler != NULL); | |
824 | ||
825 | /* release the counters */ | |
826 | kpc_pm_has_custom_config = FALSE; | |
827 | kpc_pm_pmc_mask = 0ULL; | |
828 | kpc_pm_handler = NULL; | |
829 | ||
830 | printf("kpc: pm released counters\n"); | |
831 | ||
832 | /* post-condition */ | |
833 | assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK) == kpc_configurable_count()); | |
834 | } | |
835 | ||
836 | uint8_t | |
837 | kpc_popcount(uint64_t value) | |
838 | { | |
839 | return __builtin_popcountll(value); | |
840 | } | |
841 | ||
842 | uint64_t | |
843 | kpc_get_configurable_pmc_mask(uint32_t classes) | |
844 | { | |
845 | uint32_t configurable_count = kpc_configurable_count(); | |
846 | uint64_t cfg_mask = 0ULL, pwr_mask = 0ULL, all_cfg_pmcs_mask = 0ULL; | |
847 | ||
848 | /* not configurable classes or no configurable counters */ | |
849 | if (((classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) == 0) || | |
0a7de745 | 850 | (configurable_count == 0)) { |
3e170ce0 A |
851 | goto exit; |
852 | } | |
853 | ||
854 | assert(configurable_count < 64); | |
855 | all_cfg_pmcs_mask = (1ULL << configurable_count) - 1; | |
856 | ||
857 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { | |
0a7de745 | 858 | if (force_all_ctrs == TRUE) { |
3e170ce0 | 859 | cfg_mask |= all_cfg_pmcs_mask; |
0a7de745 | 860 | } else { |
3e170ce0 | 861 | cfg_mask |= (~kpc_pm_pmc_mask) & all_cfg_pmcs_mask; |
0a7de745 | 862 | } |
3e170ce0 A |
863 | } |
864 | ||
865 | /* | |
866 | * The power class exists iff: | |
0a7de745 A |
867 | * - No tasks acquired all PMCs |
868 | * - PM registered and uses kpc to interact with PMCs | |
3e170ce0 A |
869 | */ |
870 | if ((force_all_ctrs == FALSE) && | |
871 | (kpc_pm_handler != NULL) && | |
872 | (kpc_pm_has_custom_config == FALSE) && | |
0a7de745 | 873 | (classes & KPC_CLASS_POWER_MASK)) { |
3e170ce0 A |
874 | pwr_mask |= kpc_pm_pmc_mask & all_cfg_pmcs_mask; |
875 | } | |
876 | ||
877 | exit: | |
878 | /* post-conditions */ | |
0a7de745 A |
879 | assert(((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 ); |
880 | assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count()); | |
881 | assert((cfg_mask & pwr_mask) == 0ULL ); | |
3e170ce0 A |
882 | |
883 | return cfg_mask | pwr_mask; | |
884 | } |