]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kpc_common.c
xnu-4570.51.1.tar.gz
[apple/xnu.git] / osfmk / kern / kpc_common.c
1 /*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <sys/errno.h>
34 #include <sys/vm.h>
35 #include <kperf/buffer.h>
36 #include <kern/thread.h>
37 #if defined(__arm64__) || defined(__arm__)
38 #include <arm/cpu_data_internal.h>
39 #endif
40
41 #include <kern/kpc.h>
42
43 #include <kperf/kperf.h>
44 #include <kperf/sample.h>
45 #include <kperf/context.h>
46 #include <kperf/action.h>
47
48 uint32_t kpc_actionid[KPC_MAX_COUNTERS];
49
50 #define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t))
51 #define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \
52 COUNTERBUF_SIZE_PER_CPU)
53
54 /* locks */
55 static lck_grp_attr_t *kpc_config_lckgrp_attr = NULL;
56 static lck_grp_t *kpc_config_lckgrp = NULL;
57 static lck_mtx_t kpc_config_lock;
58
59 /* state specifying if all counters have been requested by kperf */
60 static boolean_t force_all_ctrs = FALSE;
61
62 /* power manager */
63 static kpc_pm_handler_t kpc_pm_handler;
64 static boolean_t kpc_pm_has_custom_config;
65 static uint64_t kpc_pm_pmc_mask;
66 #if MACH_ASSERT
67 static bool kpc_calling_pm = false;
68 #endif /* MACH_ASSERT */
69
70 boolean_t kpc_context_switch_active = FALSE;
71
72 void kpc_common_init(void);
73 void
74 kpc_common_init(void)
75 {
76 kpc_config_lckgrp_attr = lck_grp_attr_alloc_init();
77 kpc_config_lckgrp = lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr);
78 lck_mtx_init(&kpc_config_lock, kpc_config_lckgrp, LCK_ATTR_NULL);
79 }
80
81 boolean_t
82 kpc_register_cpu(struct cpu_data *cpu_data)
83 {
84 assert(cpu_data);
85 assert(cpu_data->cpu_kpc_buf[0] == NULL);
86 assert(cpu_data->cpu_kpc_buf[1] == NULL);
87 assert(cpu_data->cpu_kpc_shadow == NULL);
88 assert(cpu_data->cpu_kpc_reload == NULL);
89
90 /*
91 * Buffers allocated through kpc_counterbuf_alloc() are large enough to
92 * store all PMCs values from all CPUs. This mimics the userspace API.
93 * This does not suit well with the per-CPU kpc buffers, since:
94 * 1. Buffers don't need to be this large.
95 * 2. The actual number of CPUs is not known at this point.
96 *
97 * CPUs are asked to callout into kpc when being registered, we'll
98 * allocate the memory here.
99 */
100
101 if ((cpu_data->cpu_kpc_buf[0] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
102 goto error;
103 if ((cpu_data->cpu_kpc_buf[1] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
104 goto error;
105 if ((cpu_data->cpu_kpc_shadow = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
106 goto error;
107 if ((cpu_data->cpu_kpc_reload = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
108 goto error;
109
110 memset(cpu_data->cpu_kpc_buf[0], 0, COUNTERBUF_SIZE_PER_CPU);
111 memset(cpu_data->cpu_kpc_buf[1], 0, COUNTERBUF_SIZE_PER_CPU);
112 memset(cpu_data->cpu_kpc_shadow, 0, COUNTERBUF_SIZE_PER_CPU);
113 memset(cpu_data->cpu_kpc_reload, 0, COUNTERBUF_SIZE_PER_CPU);
114
115 /* success */
116 return TRUE;
117
118 error:
119 kpc_unregister_cpu(cpu_data);
120 return FALSE;
121 }
122
123 void
124 kpc_unregister_cpu(struct cpu_data *cpu_data)
125 {
126 assert(cpu_data);
127 if (cpu_data->cpu_kpc_buf[0] != NULL) {
128 kfree(cpu_data->cpu_kpc_buf[0], COUNTERBUF_SIZE_PER_CPU);
129 cpu_data->cpu_kpc_buf[0] = NULL;
130 }
131 if (cpu_data->cpu_kpc_buf[1] != NULL) {
132 kfree(cpu_data->cpu_kpc_buf[1], COUNTERBUF_SIZE_PER_CPU);
133 cpu_data->cpu_kpc_buf[1] = NULL;
134 }
135 if (cpu_data->cpu_kpc_shadow != NULL) {
136 kfree(cpu_data->cpu_kpc_shadow, COUNTERBUF_SIZE_PER_CPU);
137 cpu_data->cpu_kpc_shadow = NULL;
138 }
139 if (cpu_data->cpu_kpc_reload != NULL) {
140 kfree(cpu_data->cpu_kpc_reload, COUNTERBUF_SIZE_PER_CPU);
141 cpu_data->cpu_kpc_reload = NULL;
142 }
143 }
144
145
146 static void
147 kpc_task_set_forced_all_ctrs(task_t task, boolean_t state)
148 {
149 assert(task);
150
151 task_lock(task);
152 if (state)
153 task->t_kpc |= TASK_KPC_FORCED_ALL_CTRS;
154 else
155 task->t_kpc &= ~TASK_KPC_FORCED_ALL_CTRS;
156 task_unlock(task);
157 }
158
159 static boolean_t
160 kpc_task_get_forced_all_ctrs(task_t task)
161 {
162 assert(task);
163 return task->t_kpc & TASK_KPC_FORCED_ALL_CTRS ? TRUE : FALSE;
164 }
165
166 int
167 kpc_force_all_ctrs(task_t task, int val)
168 {
169 boolean_t new_state = val ? TRUE : FALSE;
170 boolean_t old_state = kpc_get_force_all_ctrs();
171
172 /*
173 * Refuse to do the operation if the counters are already forced by
174 * another task.
175 */
176 if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task))
177 return EACCES;
178
179 /* nothing to do if the state is not changing */
180 if (old_state == new_state)
181 return 0;
182
183 /* notify the power manager */
184 if (kpc_pm_handler) {
185 #if MACH_ASSERT
186 kpc_calling_pm = true;
187 #endif /* MACH_ASSERT */
188 kpc_pm_handler( new_state ? FALSE : TRUE );
189 #if MACH_ASSERT
190 kpc_calling_pm = false;
191 #endif /* MACH_ASSERT */
192 }
193
194 /*
195 * This is a force -- ensure that counters are forced, even if power
196 * management fails to acknowledge it.
197 */
198 if (force_all_ctrs != new_state) {
199 force_all_ctrs = new_state;
200 }
201
202 /* update the task bits */
203 kpc_task_set_forced_all_ctrs(task, new_state);
204
205 return 0;
206 }
207
208 void
209 kpc_pm_acknowledge(boolean_t available_to_pm)
210 {
211 /*
212 * Force-all-counters should still be true when the counters are being
213 * made available to power management and false when counters are going
214 * to be taken away.
215 */
216 assert(force_all_ctrs == available_to_pm);
217 /*
218 * Make sure power management isn't playing games with us.
219 */
220 assert(kpc_calling_pm == true);
221
222 /*
223 * Counters being available means no one is forcing all counters.
224 */
225 force_all_ctrs = available_to_pm ? FALSE : TRUE;
226 }
227
228 int
229 kpc_get_force_all_ctrs(void)
230 {
231 return force_all_ctrs;
232 }
233
234 boolean_t
235 kpc_multiple_clients(void)
236 {
237 return kpc_pm_handler != NULL;
238 }
239
240 boolean_t
241 kpc_controls_fixed_counters(void)
242 {
243 return !kpc_pm_handler || force_all_ctrs || !kpc_pm_has_custom_config;
244 }
245
246 boolean_t
247 kpc_controls_counter(uint32_t ctr)
248 {
249 uint64_t pmc_mask = 0ULL;
250
251 assert(ctr < (kpc_fixed_count() + kpc_configurable_count()));
252
253 if (ctr < kpc_fixed_count())
254 return kpc_controls_fixed_counters();
255
256 /*
257 * By default kpc manages all PMCs, but if the Power Manager registered
258 * with custom_config=TRUE, the Power Manager manages its reserved PMCs.
259 * However, kpc takes ownership back if a task acquired all PMCs via
260 * force_all_ctrs.
261 */
262 pmc_mask = (1ULL << (ctr - kpc_fixed_count()));
263 if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs)
264 return FALSE;
265
266 return TRUE;
267 }
268
269 uint32_t
270 kpc_get_running(void)
271 {
272 uint64_t pmc_mask = 0;
273 uint32_t cur_state = 0;
274
275 if (kpc_is_running_fixed())
276 cur_state |= KPC_CLASS_FIXED_MASK;
277
278 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
279 if (kpc_is_running_configurable(pmc_mask))
280 cur_state |= KPC_CLASS_CONFIGURABLE_MASK;
281
282 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
283 if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask))
284 cur_state |= KPC_CLASS_POWER_MASK;
285
286 return cur_state;
287 }
288
289 /* may be called from an IPI */
290 int
291 kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf)
292 {
293 int enabled=0, offset=0;
294 uint64_t pmc_mask = 0ULL;
295
296 assert(buf);
297
298 enabled = ml_set_interrupts_enabled(FALSE);
299
300 /* grab counters and CPU number as close as possible */
301 if (curcpu)
302 *curcpu = current_processor()->cpu_id;
303
304 if (classes & KPC_CLASS_FIXED_MASK) {
305 kpc_get_fixed_counters(&buf[offset]);
306 offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
307 }
308
309 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
310 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
311 kpc_get_configurable_counters(&buf[offset], pmc_mask);
312 offset += kpc_popcount(pmc_mask);
313 }
314
315 if (classes & KPC_CLASS_POWER_MASK) {
316 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
317 kpc_get_configurable_counters(&buf[offset], pmc_mask);
318 offset += kpc_popcount(pmc_mask);
319 }
320
321 ml_set_interrupts_enabled(enabled);
322
323 return offset;
324 }
325
326 /* generic counter reading function, public api */
327 int
328 kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes,
329 int *curcpu, uint64_t *buf)
330 {
331 assert(buf);
332
333 /*
334 * Unlike reading the current CPU counters, reading counters from all
335 * CPUs is architecture dependent. This allows kpc to make the most of
336 * the platform if memory mapped registers is supported.
337 */
338 if (all_cpus)
339 return kpc_get_all_cpus_counters(classes, curcpu, buf);
340 else
341 return kpc_get_curcpu_counters(classes, curcpu, buf);
342 }
343
344 int
345 kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes,
346 int *curcpu, uint64_t *buf)
347 {
348 int curcpu_id = current_processor()->cpu_id;
349 uint32_t cfg_count = kpc_configurable_count(), offset = 0;
350 uint64_t pmc_mask = 0ULL;
351 boolean_t enabled;
352
353 assert(buf);
354
355 enabled = ml_set_interrupts_enabled(FALSE);
356
357 curcpu_id = current_processor()->cpu_id;
358 if (curcpu)
359 *curcpu = curcpu_id;
360
361 for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) {
362 /* filter if the caller did not request all cpus */
363 if (!all_cpus && (cpu != curcpu_id))
364 continue;
365
366 if (classes & KPC_CLASS_FIXED_MASK) {
367 uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
368 memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t));
369 offset += count;
370 }
371
372 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
373 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
374
375 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
376 if ((1ULL << cfg_ctr) & pmc_mask)
377 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
378 }
379
380 if (classes & KPC_CLASS_POWER_MASK) {
381 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
382
383 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
384 if ((1ULL << cfg_ctr) & pmc_mask)
385 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
386 }
387 }
388
389 ml_set_interrupts_enabled(enabled);
390
391 return offset;
392 }
393
394 uint32_t
395 kpc_get_counter_count(uint32_t classes)
396 {
397 uint32_t count = 0;
398
399 if (classes & KPC_CLASS_FIXED_MASK)
400 count += kpc_fixed_count();
401
402 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
403 uint64_t pmc_msk = kpc_get_configurable_pmc_mask(classes);
404 uint32_t pmc_cnt = kpc_popcount(pmc_msk);
405 count += pmc_cnt;
406 }
407
408 return count;
409 }
410
411 uint32_t
412 kpc_get_config_count(uint32_t classes)
413 {
414 uint32_t count = 0;
415
416 if (classes & KPC_CLASS_FIXED_MASK)
417 count += kpc_fixed_config_count();
418
419 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
420 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(classes);
421 count += kpc_configurable_config_count(pmc_mask);
422 }
423
424 if ((classes & KPC_CLASS_RAWPMU_MASK) && !kpc_multiple_clients())
425 count += kpc_rawpmu_config_count();
426
427 return count;
428 }
429
430 int
431 kpc_get_config(uint32_t classes, kpc_config_t *current_config)
432 {
433 uint32_t count = 0;
434
435 assert(current_config);
436
437 if (classes & KPC_CLASS_FIXED_MASK) {
438 kpc_get_fixed_config(&current_config[count]);
439 count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
440 }
441
442 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
443 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
444 kpc_get_configurable_config(&current_config[count], pmc_mask);
445 count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
446 }
447
448 if (classes & KPC_CLASS_POWER_MASK) {
449 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
450 kpc_get_configurable_config(&current_config[count], pmc_mask);
451 count += kpc_get_config_count(KPC_CLASS_POWER_MASK);
452 }
453
454 if (classes & KPC_CLASS_RAWPMU_MASK)
455 {
456 // Client shouldn't ask for config words that aren't available.
457 // Most likely, they'd misinterpret the returned buffer if we
458 // allowed this.
459 if( kpc_multiple_clients() )
460 {
461 return EPERM;
462 }
463 kpc_get_rawpmu_config(&current_config[count]);
464 count += kpc_get_config_count(KPC_CLASS_RAWPMU_MASK);
465 }
466
467 return 0;
468 }
469
470 int
471 kpc_set_config(uint32_t classes, kpc_config_t *configv)
472 {
473 int ret = 0;
474 struct kpc_config_remote mp_config = {
475 .classes = classes, .configv = configv,
476 .pmc_mask = kpc_get_configurable_pmc_mask(classes)
477 };
478
479 assert(configv);
480
481 /* don't allow RAWPMU configuration when sharing counters */
482 if ((classes & KPC_CLASS_RAWPMU_MASK) && kpc_multiple_clients()) {
483 return EPERM;
484 }
485
486 /* no clients have the right to modify both classes */
487 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
488 (classes & (KPC_CLASS_POWER_MASK)))
489 {
490 return EPERM;
491 }
492
493 lck_mtx_lock(&kpc_config_lock);
494
495 /* translate the power class for the machine layer */
496 if (classes & KPC_CLASS_POWER_MASK)
497 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
498
499 ret = kpc_set_config_arch( &mp_config );
500
501 lck_mtx_unlock(&kpc_config_lock);
502
503 return ret;
504 }
505
506 /* allocate a buffer large enough for all possible counters */
507 uint64_t *
508 kpc_counterbuf_alloc(void)
509 {
510 uint64_t *buf = NULL;
511
512 buf = kalloc(COUNTERBUF_SIZE);
513 if (buf) {
514 bzero(buf, COUNTERBUF_SIZE);
515 }
516
517 return buf;
518 }
519
520 void
521 kpc_counterbuf_free(uint64_t *buf)
522 {
523 if (buf) {
524 kfree(buf, COUNTERBUF_SIZE);
525 }
526 }
527
528 void
529 kpc_sample_kperf(uint32_t actionid)
530 {
531 struct kperf_sample sbuf;
532 struct kperf_context ctx;
533
534 BUF_DATA(PERF_KPC_HNDLR | DBG_FUNC_START);
535
536 ctx.cur_pid = 0;
537 ctx.cur_thread = current_thread();
538 ctx.cur_pid = task_pid(current_task());
539
540 ctx.trigger_type = TRIGGER_TYPE_PMI;
541 ctx.trigger_id = 0;
542
543 int r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER);
544
545 BUF_INFO(PERF_KPC_HNDLR | DBG_FUNC_END, r);
546 }
547
548
549 int
550 kpc_set_period(uint32_t classes, uint64_t *val)
551 {
552 struct kpc_config_remote mp_config = {
553 .classes = classes, .configv = val,
554 .pmc_mask = kpc_get_configurable_pmc_mask(classes)
555 };
556
557 assert(val);
558
559 /* no clients have the right to modify both classes */
560 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
561 (classes & (KPC_CLASS_POWER_MASK)))
562 {
563 return EPERM;
564 }
565
566 lck_mtx_lock(&kpc_config_lock);
567
568 #ifdef FIXED_COUNTER_SHADOW
569 if ((classes & KPC_CLASS_FIXED_MASK) && !kpc_controls_fixed_counters()) {
570 lck_mtx_unlock(&kpc_config_lock);
571 return EPERM;
572 }
573 # else
574 if (classes & KPC_CLASS_FIXED_MASK) {
575 lck_mtx_unlock(&kpc_config_lock);
576 return EINVAL;
577 }
578 #endif
579
580 /* translate the power class for the machine layer */
581 if (classes & KPC_CLASS_POWER_MASK)
582 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
583
584 kprintf("setting period %u\n", classes);
585 kpc_set_period_arch( &mp_config );
586
587 lck_mtx_unlock(&kpc_config_lock);
588
589 return 0;
590 }
591
592 int
593 kpc_get_period(uint32_t classes, uint64_t *val)
594 {
595 uint32_t count = 0 ;
596 uint64_t pmc_mask = 0ULL;
597
598 assert(val);
599
600 lck_mtx_lock(&kpc_config_lock);
601
602 if (classes & KPC_CLASS_FIXED_MASK) {
603 /* convert reload values to periods */
604 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
605 for (uint32_t i = 0; i < count; ++i)
606 *val++ = kpc_fixed_max() - FIXED_RELOAD(i);
607 }
608
609 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
610 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
611
612 /* convert reload values to periods */
613 count = kpc_configurable_count();
614 for (uint32_t i = 0; i < count; ++i)
615 if ((1ULL << i) & pmc_mask)
616 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
617 }
618
619 if (classes & KPC_CLASS_POWER_MASK) {
620 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
621
622 /* convert reload values to periods */
623 count = kpc_configurable_count();
624 for (uint32_t i = 0; i < count; ++i)
625 if ((1ULL << i) & pmc_mask)
626 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
627 }
628
629 lck_mtx_unlock(&kpc_config_lock);
630
631 return 0;
632 }
633
634 int
635 kpc_set_actionid(uint32_t classes, uint32_t *val)
636 {
637 uint32_t count = 0;
638 uint64_t pmc_mask = 0ULL;
639
640 assert(val);
641
642 /* NOTE: what happens if a pmi occurs while actionids are being
643 * set is undefined. */
644 lck_mtx_lock(&kpc_config_lock);
645
646 if (classes & KPC_CLASS_FIXED_MASK) {
647 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
648 memcpy(&FIXED_ACTIONID(0), val, count*sizeof(uint32_t));
649 val += count;
650 }
651
652 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
653 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
654
655 count = kpc_configurable_count();
656 for (uint32_t i = 0; i < count; ++i)
657 if ((1ULL << i) & pmc_mask)
658 CONFIGURABLE_ACTIONID(i) = *val++;
659 }
660
661 if (classes & KPC_CLASS_POWER_MASK) {
662 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
663
664 count = kpc_configurable_count();
665 for (uint32_t i = 0; i < count; ++i)
666 if ((1ULL << i) & pmc_mask)
667 CONFIGURABLE_ACTIONID(i) = *val++;
668 }
669
670 lck_mtx_unlock(&kpc_config_lock);
671
672 return 0;
673 }
674
675 int kpc_get_actionid(uint32_t classes, uint32_t *val)
676 {
677 uint32_t count = 0;
678 uint64_t pmc_mask = 0ULL;
679
680 assert(val);
681
682 lck_mtx_lock(&kpc_config_lock);
683
684 if (classes & KPC_CLASS_FIXED_MASK) {
685 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
686 memcpy(val, &FIXED_ACTIONID(0), count*sizeof(uint32_t));
687 val += count;
688 }
689
690 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
691 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
692
693 count = kpc_configurable_count();
694 for (uint32_t i = 0; i < count; ++i)
695 if ((1ULL << i) & pmc_mask)
696 *val++ = CONFIGURABLE_ACTIONID(i);
697 }
698
699 if (classes & KPC_CLASS_POWER_MASK) {
700 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
701
702 count = kpc_configurable_count();
703 for (uint32_t i = 0; i < count; ++i)
704 if ((1ULL << i) & pmc_mask)
705 *val++ = CONFIGURABLE_ACTIONID(i);
706 }
707
708 lck_mtx_unlock(&kpc_config_lock);
709
710 return 0;
711
712 }
713
714 int
715 kpc_set_running(uint32_t classes)
716 {
717 uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK;
718 struct kpc_running_remote mp_config = {
719 .classes = classes, .cfg_target_mask= 0ULL, .cfg_state_mask = 0ULL
720 };
721
722 /* target all available PMCs */
723 mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes);
724
725 /* translate the power class for the machine layer */
726 if (classes & KPC_CLASS_POWER_MASK)
727 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
728
729 /* generate the state of each configurable PMCs */
730 mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes);
731
732 return kpc_set_running_arch(&mp_config);
733 }
734
735 boolean_t
736 kpc_register_pm_handler(kpc_pm_handler_t handler)
737 {
738 return kpc_reserve_pm_counters(0x38, handler, TRUE);
739 }
740
741 boolean_t
742 kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler,
743 boolean_t custom_config)
744 {
745 uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1;
746 uint64_t req_mask = 0ULL;
747
748 /* pre-condition */
749 assert(handler != NULL);
750 assert(kpc_pm_handler == NULL);
751
752 /* check number of counters requested */
753 req_mask = (pmc_mask & all_mask);
754 assert(kpc_popcount(req_mask) <= kpc_configurable_count());
755
756 /* save the power manager states */
757 kpc_pm_has_custom_config = custom_config;
758 kpc_pm_pmc_mask = req_mask;
759 kpc_pm_handler = handler;
760
761 printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n",
762 req_mask, custom_config);
763
764 /* post-condition */
765 {
766 uint32_t cfg_count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
767 uint32_t pwr_count = kpc_popcount(kpc_pm_pmc_mask);
768 #pragma unused(cfg_count, pwr_count)
769 assert((cfg_count + pwr_count) == kpc_configurable_count());
770 }
771
772 return force_all_ctrs ? FALSE : TRUE;
773 }
774
775 void
776 kpc_release_pm_counters(void)
777 {
778 /* pre-condition */
779 assert(kpc_pm_handler != NULL);
780
781 /* release the counters */
782 kpc_pm_has_custom_config = FALSE;
783 kpc_pm_pmc_mask = 0ULL;
784 kpc_pm_handler = NULL;
785
786 printf("kpc: pm released counters\n");
787
788 /* post-condition */
789 assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK) == kpc_configurable_count());
790 }
791
792 uint8_t
793 kpc_popcount(uint64_t value)
794 {
795 return __builtin_popcountll(value);
796 }
797
798 uint64_t
799 kpc_get_configurable_pmc_mask(uint32_t classes)
800 {
801 uint32_t configurable_count = kpc_configurable_count();
802 uint64_t cfg_mask = 0ULL, pwr_mask = 0ULL, all_cfg_pmcs_mask = 0ULL;
803
804 /* not configurable classes or no configurable counters */
805 if (((classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) == 0) ||
806 (configurable_count == 0))
807 {
808 goto exit;
809 }
810
811 assert(configurable_count < 64);
812 all_cfg_pmcs_mask = (1ULL << configurable_count) - 1;
813
814 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
815 if (force_all_ctrs == TRUE)
816 cfg_mask |= all_cfg_pmcs_mask;
817 else
818 cfg_mask |= (~kpc_pm_pmc_mask) & all_cfg_pmcs_mask;
819 }
820
821 /*
822 * The power class exists iff:
823 * - No tasks acquired all PMCs
824 * - PM registered and uses kpc to interact with PMCs
825 */
826 if ((force_all_ctrs == FALSE) &&
827 (kpc_pm_handler != NULL) &&
828 (kpc_pm_has_custom_config == FALSE) &&
829 (classes & KPC_CLASS_POWER_MASK))
830 {
831 pwr_mask |= kpc_pm_pmc_mask & all_cfg_pmcs_mask;
832 }
833
834 exit:
835 /* post-conditions */
836 assert( ((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 );
837 assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count() );
838 assert( (cfg_mask & pwr_mask) == 0ULL );
839
840 return cfg_mask | pwr_mask;
841 }