2 * Copyright (c) 2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <sys/errno.h>
35 #include <kperf/buffer.h>
36 #include <kern/thread.h>
40 #include <kperf/kperf.h>
41 #include <kperf/sample.h>
42 #include <kperf/context.h>
43 #include <kperf/action.h>
45 #include <chud/chud_xnu.h>
47 uint32_t kpc_actionid
[KPC_MAX_COUNTERS
];
49 #define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t))
50 #define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \
51 COUNTERBUF_SIZE_PER_CPU)
54 static lck_grp_attr_t
*kpc_config_lckgrp_attr
= NULL
;
55 static lck_grp_t
*kpc_config_lckgrp
= NULL
;
56 static lck_mtx_t kpc_config_lock
;
58 /* state specifying if all counters have been requested by kperf */
59 static boolean_t force_all_ctrs
= FALSE
;
62 static kpc_pm_handler_t kpc_pm_handler
;
63 static boolean_t kpc_pm_has_custom_config
;
64 static uint64_t kpc_pm_pmc_mask
;
66 boolean_t kpc_context_switch_active
= FALSE
;
68 void kpc_common_init(void);
72 kpc_config_lckgrp_attr
= lck_grp_attr_alloc_init();
73 kpc_config_lckgrp
= lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr
);
74 lck_mtx_init(&kpc_config_lock
, kpc_config_lckgrp
, LCK_ATTR_NULL
);
78 kpc_register_cpu(struct cpu_data
*cpu_data
)
81 assert(cpu_data
->cpu_kpc_buf
[0] == NULL
);
82 assert(cpu_data
->cpu_kpc_buf
[1] == NULL
);
83 assert(cpu_data
->cpu_kpc_shadow
== NULL
);
84 assert(cpu_data
->cpu_kpc_reload
== NULL
);
87 * Buffers allocated through kpc_counterbuf_alloc() are large enough to
88 * store all PMCs values from all CPUs. This mimics the userspace API.
89 * This does not suit well with the per-CPU kpc buffers, since:
90 * 1. Buffers don't need to be this large.
91 * 2. The actual number of CPUs is not known at this point.
93 * CPUs are asked to callout into kpc when being registered, we'll
94 * allocate the memory here.
97 if ((cpu_data
->cpu_kpc_buf
[0] = kalloc(COUNTERBUF_SIZE_PER_CPU
)) == NULL
)
99 if ((cpu_data
->cpu_kpc_buf
[1] = kalloc(COUNTERBUF_SIZE_PER_CPU
)) == NULL
)
101 if ((cpu_data
->cpu_kpc_shadow
= kalloc(COUNTERBUF_SIZE_PER_CPU
)) == NULL
)
103 if ((cpu_data
->cpu_kpc_reload
= kalloc(COUNTERBUF_SIZE_PER_CPU
)) == NULL
)
106 memset(cpu_data
->cpu_kpc_buf
[0], 0, COUNTERBUF_SIZE_PER_CPU
);
107 memset(cpu_data
->cpu_kpc_buf
[1], 0, COUNTERBUF_SIZE_PER_CPU
);
108 memset(cpu_data
->cpu_kpc_shadow
, 0, COUNTERBUF_SIZE_PER_CPU
);
109 memset(cpu_data
->cpu_kpc_reload
, 0, COUNTERBUF_SIZE_PER_CPU
);
115 kpc_unregister_cpu(cpu_data
);
120 kpc_unregister_cpu(struct cpu_data
*cpu_data
)
123 if (cpu_data
->cpu_kpc_buf
[0] != NULL
) {
124 kfree(cpu_data
->cpu_kpc_buf
[0], COUNTERBUF_SIZE_PER_CPU
);
125 cpu_data
->cpu_kpc_buf
[0] = NULL
;
127 if (cpu_data
->cpu_kpc_buf
[1] != NULL
) {
128 kfree(cpu_data
->cpu_kpc_buf
[1], COUNTERBUF_SIZE_PER_CPU
);
129 cpu_data
->cpu_kpc_buf
[1] = NULL
;
131 if (cpu_data
->cpu_kpc_shadow
!= NULL
) {
132 kfree(cpu_data
->cpu_kpc_shadow
, COUNTERBUF_SIZE_PER_CPU
);
133 cpu_data
->cpu_kpc_shadow
= NULL
;
135 if (cpu_data
->cpu_kpc_reload
!= NULL
) {
136 kfree(cpu_data
->cpu_kpc_reload
, COUNTERBUF_SIZE_PER_CPU
);
137 cpu_data
->cpu_kpc_reload
= NULL
;
143 kpc_task_set_forced_all_ctrs(task_t task
, boolean_t state
)
149 task
->t_chud
|= TASK_KPC_FORCED_ALL_CTRS
;
151 task
->t_chud
&= ~TASK_KPC_FORCED_ALL_CTRS
;
156 kpc_task_get_forced_all_ctrs(task_t task
)
159 return task
->t_chud
& TASK_KPC_FORCED_ALL_CTRS
? TRUE
: FALSE
;
163 kpc_force_all_ctrs(task_t task
, int val
)
165 boolean_t new_state
= val
? TRUE
: FALSE
;
166 boolean_t old_state
= kpc_get_force_all_ctrs();
169 * Refuse to do the operation if the counters are already forced by
172 if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task
))
175 /* nothing to do if the state is not changing */
176 if (old_state
== new_state
)
179 /* notify the power manager */
181 kpc_pm_handler( new_state
? FALSE
: TRUE
);
183 /* update the task bits */
184 kpc_task_set_forced_all_ctrs(task
, val
);
186 /* update the internal state */
187 force_all_ctrs
= val
;
193 kpc_get_force_all_ctrs(void)
195 return force_all_ctrs
;
199 kpc_multiple_clients(void)
201 return kpc_pm_handler
!= NULL
;
205 kpc_controls_fixed_counters(void)
207 return !kpc_pm_handler
|| force_all_ctrs
|| !kpc_pm_has_custom_config
;
211 kpc_controls_counter(uint32_t ctr
)
213 uint64_t pmc_mask
= 0ULL;
215 assert(ctr
< (kpc_fixed_count() + kpc_configurable_count()));
217 if (ctr
< kpc_fixed_count())
218 return kpc_controls_fixed_counters();
221 * By default kpc manages all PMCs, but if the Power Manager registered
222 * with custom_config=TRUE, the Power Manager manages its reserved PMCs.
223 * However, kpc takes ownership back if a task acquired all PMCs via
226 pmc_mask
= (1ULL << (ctr
- kpc_fixed_count()));
227 if ((pmc_mask
& kpc_pm_pmc_mask
) && kpc_pm_has_custom_config
&& !force_all_ctrs
)
234 kpc_get_running(void)
236 uint64_t pmc_mask
= 0;
237 uint32_t cur_state
= 0;
239 if (kpc_is_running_fixed())
240 cur_state
|= KPC_CLASS_FIXED_MASK
;
242 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
243 if (kpc_is_running_configurable(pmc_mask
))
244 cur_state
|= KPC_CLASS_CONFIGURABLE_MASK
;
246 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
247 if ((pmc_mask
!= 0) && kpc_is_running_configurable(pmc_mask
))
248 cur_state
|= KPC_CLASS_POWER_MASK
;
253 /* may be called from an IPI */
255 kpc_get_curcpu_counters(uint32_t classes
, int *curcpu
, uint64_t *buf
)
257 int enabled
=0, offset
=0;
258 uint64_t pmc_mask
= 0ULL;
262 enabled
= ml_set_interrupts_enabled(FALSE
);
264 /* grab counters and CPU number as close as possible */
266 *curcpu
= current_processor()->cpu_id
;
268 if (classes
& KPC_CLASS_FIXED_MASK
) {
269 kpc_get_fixed_counters(&buf
[offset
]);
270 offset
+= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
273 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
274 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
275 kpc_get_configurable_counters(&buf
[offset
], pmc_mask
);
276 offset
+= kpc_popcount(pmc_mask
);
279 if (classes
& KPC_CLASS_POWER_MASK
) {
280 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
281 kpc_get_configurable_counters(&buf
[offset
], pmc_mask
);
282 offset
+= kpc_popcount(pmc_mask
);
285 ml_set_interrupts_enabled(enabled
);
290 /* generic counter reading function, public api */
292 kpc_get_cpu_counters(boolean_t all_cpus
, uint32_t classes
,
293 int *curcpu
, uint64_t *buf
)
298 * Unlike reading the current CPU counters, reading counters from all
299 * CPUs is architecture dependent. This allows kpc to make the most of
300 * the platform if memory mapped registers is supported.
303 return kpc_get_all_cpus_counters(classes
, curcpu
, buf
);
305 return kpc_get_curcpu_counters(classes
, curcpu
, buf
);
309 kpc_get_shadow_counters(boolean_t all_cpus
, uint32_t classes
,
310 int *curcpu
, uint64_t *buf
)
312 int curcpu_id
= current_processor()->cpu_id
;
313 uint32_t cfg_count
= kpc_configurable_count(), offset
= 0;
314 uint64_t pmc_mask
= 0ULL;
319 enabled
= ml_set_interrupts_enabled(FALSE
);
321 curcpu_id
= current_processor()->cpu_id
;
325 for (int cpu
= 0; cpu
< machine_info
.logical_cpu_max
; ++cpu
) {
326 /* filter if the caller did not request all cpus */
327 if (!all_cpus
&& (cpu
!= curcpu_id
))
330 if (classes
& KPC_CLASS_FIXED_MASK
) {
331 uint32_t count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
332 memcpy(&buf
[offset
], &FIXED_SHADOW_CPU(cpu
, 0), count
* sizeof(uint64_t));
336 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
337 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
339 for (uint32_t cfg_ctr
= 0; cfg_ctr
< cfg_count
; ++cfg_ctr
)
340 if ((1ULL << cfg_ctr
) & pmc_mask
)
341 buf
[offset
++] = CONFIGURABLE_SHADOW_CPU(cpu
, cfg_ctr
);
344 if (classes
& KPC_CLASS_POWER_MASK
) {
345 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
347 for (uint32_t cfg_ctr
= 0; cfg_ctr
< cfg_count
; ++cfg_ctr
)
348 if ((1ULL << cfg_ctr
) & pmc_mask
)
349 buf
[offset
++] = CONFIGURABLE_SHADOW_CPU(cpu
, cfg_ctr
);
353 ml_set_interrupts_enabled(enabled
);
359 kpc_get_counter_count(uint32_t classes
)
363 if (classes
& KPC_CLASS_FIXED_MASK
)
364 count
+= kpc_fixed_count();
366 if (classes
& (KPC_CLASS_CONFIGURABLE_MASK
| KPC_CLASS_POWER_MASK
)) {
367 uint64_t pmc_msk
= kpc_get_configurable_pmc_mask(classes
);
368 uint32_t pmc_cnt
= kpc_popcount(pmc_msk
);
376 kpc_get_config_count(uint32_t classes
)
380 if (classes
& KPC_CLASS_FIXED_MASK
)
381 count
+= kpc_fixed_config_count();
383 if (classes
& (KPC_CLASS_CONFIGURABLE_MASK
| KPC_CLASS_POWER_MASK
)) {
384 uint64_t pmc_mask
= kpc_get_configurable_pmc_mask(classes
);
385 count
+= kpc_configurable_config_count(pmc_mask
);
388 if ((classes
& KPC_CLASS_RAWPMU_MASK
) && !kpc_multiple_clients())
389 count
+= kpc_rawpmu_config_count();
395 kpc_get_config(uint32_t classes
, kpc_config_t
*current_config
)
399 assert(current_config
);
401 if (classes
& KPC_CLASS_FIXED_MASK
) {
402 kpc_get_fixed_config(¤t_config
[count
]);
403 count
+= kpc_get_config_count(KPC_CLASS_FIXED_MASK
);
406 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
407 uint64_t pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
408 kpc_get_configurable_config(¤t_config
[count
], pmc_mask
);
409 count
+= kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK
);
412 if (classes
& KPC_CLASS_POWER_MASK
) {
413 uint64_t pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
414 kpc_get_configurable_config(¤t_config
[count
], pmc_mask
);
415 count
+= kpc_get_config_count(KPC_CLASS_POWER_MASK
);
418 if (classes
& KPC_CLASS_RAWPMU_MASK
)
420 // Client shouldn't ask for config words that aren't available.
421 // Most likely, they'd misinterpret the returned buffer if we
423 if( kpc_multiple_clients() )
427 kpc_get_rawpmu_config(¤t_config
[count
]);
428 count
+= kpc_get_config_count(KPC_CLASS_RAWPMU_MASK
);
435 kpc_set_config(uint32_t classes
, kpc_config_t
*configv
)
438 struct kpc_config_remote mp_config
= {
439 .classes
= classes
, .configv
= configv
,
440 .pmc_mask
= kpc_get_configurable_pmc_mask(classes
)
445 /* don't allow RAWPMU configuration when sharing counters */
446 if ((classes
& KPC_CLASS_RAWPMU_MASK
) && kpc_multiple_clients()) {
450 /* no clients have the right to modify both classes */
451 if ((classes
& (KPC_CLASS_CONFIGURABLE_MASK
)) &&
452 (classes
& (KPC_CLASS_POWER_MASK
)))
457 lck_mtx_lock(&kpc_config_lock
);
459 /* translate the power class for the machine layer */
460 if (classes
& KPC_CLASS_POWER_MASK
)
461 mp_config
.classes
|= KPC_CLASS_CONFIGURABLE_MASK
;
463 ret
= kpc_set_config_arch( &mp_config
);
465 lck_mtx_unlock(&kpc_config_lock
);
470 /* allocate a buffer large enough for all possible counters */
472 kpc_counterbuf_alloc(void)
474 uint64_t *buf
= NULL
;
476 buf
= kalloc(COUNTERBUF_SIZE
);
478 bzero(buf
, COUNTERBUF_SIZE
);
485 kpc_counterbuf_free(uint64_t *buf
)
488 kfree(buf
, COUNTERBUF_SIZE
);
493 kpc_sample_kperf(uint32_t actionid
)
495 struct kperf_sample sbuf
;
496 struct kperf_context ctx
;
498 BUF_DATA(PERF_KPC_HNDLR
| DBG_FUNC_START
);
501 ctx
.cur_thread
= current_thread();
502 ctx
.cur_pid
= task_pid(current_task());
504 ctx
.trigger_type
= TRIGGER_TYPE_PMI
;
507 int r
= kperf_sample(&sbuf
, &ctx
, actionid
, SAMPLE_FLAG_PEND_USER
);
509 BUF_INFO(PERF_KPC_HNDLR
| DBG_FUNC_END
, r
);
514 kpc_set_period(uint32_t classes
, uint64_t *val
)
516 struct kpc_config_remote mp_config
= {
517 .classes
= classes
, .configv
= val
,
518 .pmc_mask
= kpc_get_configurable_pmc_mask(classes
)
523 /* no clients have the right to modify both classes */
524 if ((classes
& (KPC_CLASS_CONFIGURABLE_MASK
)) &&
525 (classes
& (KPC_CLASS_POWER_MASK
)))
530 lck_mtx_lock(&kpc_config_lock
);
532 #ifdef FIXED_COUNTER_SHADOW
533 if ((classes
& KPC_CLASS_FIXED_MASK
) && !kpc_controls_fixed_counters()) {
534 lck_mtx_unlock(&kpc_config_lock
);
538 if (classes
& KPC_CLASS_FIXED_MASK
) {
539 lck_mtx_unlock(&kpc_config_lock
);
544 /* translate the power class for the machine layer */
545 if (classes
& KPC_CLASS_POWER_MASK
)
546 mp_config
.classes
|= KPC_CLASS_CONFIGURABLE_MASK
;
548 kprintf("setting period %u\n", classes
);
549 kpc_set_period_arch( &mp_config
);
551 lck_mtx_unlock(&kpc_config_lock
);
557 kpc_get_period(uint32_t classes
, uint64_t *val
)
560 uint64_t pmc_mask
= 0ULL;
564 lck_mtx_lock(&kpc_config_lock
);
566 if (classes
& KPC_CLASS_FIXED_MASK
) {
567 /* convert reload values to periods */
568 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
569 for (uint32_t i
= 0; i
< count
; ++i
)
570 *val
++ = kpc_fixed_max() - FIXED_RELOAD(i
);
573 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
574 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
576 /* convert reload values to periods */
577 count
= kpc_configurable_count();
578 for (uint32_t i
= 0; i
< count
; ++i
)
579 if ((1ULL << i
) & pmc_mask
)
580 *val
++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i
);
583 if (classes
& KPC_CLASS_POWER_MASK
) {
584 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
586 /* convert reload values to periods */
587 count
= kpc_configurable_count();
588 for (uint32_t i
= 0; i
< count
; ++i
)
589 if ((1ULL << i
) & pmc_mask
)
590 *val
++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i
);
593 lck_mtx_unlock(&kpc_config_lock
);
599 kpc_set_actionid(uint32_t classes
, uint32_t *val
)
602 uint64_t pmc_mask
= 0ULL;
606 /* NOTE: what happens if a pmi occurs while actionids are being
607 * set is undefined. */
608 lck_mtx_lock(&kpc_config_lock
);
610 if (classes
& KPC_CLASS_FIXED_MASK
) {
611 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
612 memcpy(&FIXED_ACTIONID(0), val
, count
*sizeof(uint32_t));
616 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
617 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
619 count
= kpc_configurable_count();
620 for (uint32_t i
= 0; i
< count
; ++i
)
621 if ((1ULL << i
) & pmc_mask
)
622 CONFIGURABLE_ACTIONID(i
) = *val
++;
625 if (classes
& KPC_CLASS_POWER_MASK
) {
626 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
628 count
= kpc_configurable_count();
629 for (uint32_t i
= 0; i
< count
; ++i
)
630 if ((1ULL << i
) & pmc_mask
)
631 CONFIGURABLE_ACTIONID(i
) = *val
++;
634 lck_mtx_unlock(&kpc_config_lock
);
639 int kpc_get_actionid(uint32_t classes
, uint32_t *val
)
642 uint64_t pmc_mask
= 0ULL;
646 lck_mtx_lock(&kpc_config_lock
);
648 if (classes
& KPC_CLASS_FIXED_MASK
) {
649 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
650 memcpy(val
, &FIXED_ACTIONID(0), count
*sizeof(uint32_t));
654 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
655 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
657 count
= kpc_configurable_count();
658 for (uint32_t i
= 0; i
< count
; ++i
)
659 if ((1ULL << i
) & pmc_mask
)
660 *val
++ = CONFIGURABLE_ACTIONID(i
);
663 if (classes
& KPC_CLASS_POWER_MASK
) {
664 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
666 count
= kpc_configurable_count();
667 for (uint32_t i
= 0; i
< count
; ++i
)
668 if ((1ULL << i
) & pmc_mask
)
669 *val
++ = CONFIGURABLE_ACTIONID(i
);
672 lck_mtx_unlock(&kpc_config_lock
);
679 kpc_set_running(uint32_t classes
)
681 uint32_t all_cfg_classes
= KPC_CLASS_CONFIGURABLE_MASK
| KPC_CLASS_POWER_MASK
;
682 struct kpc_running_remote mp_config
= {
683 .classes
= classes
, .cfg_target_mask
= 0ULL, .cfg_state_mask
= 0ULL
686 /* target all available PMCs */
687 mp_config
.cfg_target_mask
= kpc_get_configurable_pmc_mask(all_cfg_classes
);
689 /* translate the power class for the machine layer */
690 if (classes
& KPC_CLASS_POWER_MASK
)
691 mp_config
.classes
|= KPC_CLASS_CONFIGURABLE_MASK
;
693 /* generate the state of each configurable PMCs */
694 mp_config
.cfg_state_mask
= kpc_get_configurable_pmc_mask(classes
);
696 return kpc_set_running_arch(&mp_config
);
700 kpc_register_pm_handler(kpc_pm_handler_t handler
)
702 return kpc_reserve_pm_counters(0x38, handler
, TRUE
);
706 kpc_reserve_pm_counters(uint64_t pmc_mask
, kpc_pm_handler_t handler
,
707 boolean_t custom_config
)
709 uint64_t all_mask
= (1ULL << kpc_configurable_count()) - 1;
710 uint64_t req_mask
= 0ULL;
713 assert(handler
!= NULL
);
714 assert(kpc_pm_handler
== NULL
);
716 /* check number of counters requested */
717 req_mask
= (pmc_mask
& all_mask
);
718 assert(kpc_popcount(req_mask
) <= kpc_configurable_count());
720 /* save the power manager states */
721 kpc_pm_has_custom_config
= custom_config
;
722 kpc_pm_pmc_mask
= req_mask
;
723 kpc_pm_handler
= handler
;
725 printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n",
726 req_mask
, custom_config
);
730 uint32_t cfg_count
= kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
);
731 uint32_t pwr_count
= kpc_popcount(kpc_pm_pmc_mask
);
732 #pragma unused(cfg_count, pwr_count)
733 assert((cfg_count
+ pwr_count
) == kpc_configurable_count());
736 return force_all_ctrs
? FALSE
: TRUE
;
740 kpc_release_pm_counters(void)
743 assert(kpc_pm_handler
!= NULL
);
745 /* release the counters */
746 kpc_pm_has_custom_config
= FALSE
;
747 kpc_pm_pmc_mask
= 0ULL;
748 kpc_pm_handler
= NULL
;
750 printf("kpc: pm released counters\n");
753 assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
) == kpc_configurable_count());
757 kpc_popcount(uint64_t value
)
759 return __builtin_popcountll(value
);
763 kpc_get_configurable_pmc_mask(uint32_t classes
)
765 uint32_t configurable_count
= kpc_configurable_count();
766 uint64_t cfg_mask
= 0ULL, pwr_mask
= 0ULL, all_cfg_pmcs_mask
= 0ULL;
768 /* not configurable classes or no configurable counters */
769 if (((classes
& (KPC_CLASS_CONFIGURABLE_MASK
| KPC_CLASS_POWER_MASK
)) == 0) ||
770 (configurable_count
== 0))
775 assert(configurable_count
< 64);
776 all_cfg_pmcs_mask
= (1ULL << configurable_count
) - 1;
778 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
779 if (force_all_ctrs
== TRUE
)
780 cfg_mask
|= all_cfg_pmcs_mask
;
782 cfg_mask
|= (~kpc_pm_pmc_mask
) & all_cfg_pmcs_mask
;
786 * The power class exists iff:
787 * - No tasks acquired all PMCs
788 * - PM registered and uses kpc to interact with PMCs
790 if ((force_all_ctrs
== FALSE
) &&
791 (kpc_pm_handler
!= NULL
) &&
792 (kpc_pm_has_custom_config
== FALSE
) &&
793 (classes
& KPC_CLASS_POWER_MASK
))
795 pwr_mask
|= kpc_pm_pmc_mask
& all_cfg_pmcs_mask
;
799 /* post-conditions */
800 assert( ((cfg_mask
| pwr_mask
) & (~all_cfg_pmcs_mask
)) == 0 );
801 assert( kpc_popcount(cfg_mask
| pwr_mask
) <= kpc_configurable_count() );
802 assert( (cfg_mask
& pwr_mask
) == 0ULL );
804 return cfg_mask
| pwr_mask
;