2 * Copyright (c) 2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <sys/errno.h>
34 #include <kperf/buffer.h>
35 #include <kern/thread.h>
39 #include <kperf/kperf.h>
40 #include <kperf/sample.h>
41 #include <kperf/context.h>
42 #include <kperf/action.h>
44 #include <chud/chud_xnu.h>
46 uint32_t kpc_actionid
[KPC_MAX_COUNTERS
];
48 #define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t))
49 #define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \
50 COUNTERBUF_SIZE_PER_CPU)
53 static lck_grp_attr_t
*kpc_config_lckgrp_attr
= NULL
;
54 static lck_grp_t
*kpc_config_lckgrp
= NULL
;
55 static lck_mtx_t kpc_config_lock
;
57 /* state specifying if all counters have been requested by kperf */
58 static boolean_t force_all_ctrs
= FALSE
;
61 static kpc_pm_handler_t kpc_pm_handler
;
62 static boolean_t kpc_pm_has_custom_config
;
63 static uint64_t kpc_pm_pmc_mask
;
65 void kpc_common_init(void);
69 kpc_config_lckgrp_attr
= lck_grp_attr_alloc_init();
70 kpc_config_lckgrp
= lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr
);
71 lck_mtx_init(&kpc_config_lock
, kpc_config_lckgrp
, LCK_ATTR_NULL
);
75 kpc_register_cpu(struct cpu_data
*cpu_data
)
78 assert(cpu_data
->cpu_kpc_buf
[0] == NULL
);
79 assert(cpu_data
->cpu_kpc_buf
[1] == NULL
);
80 assert(cpu_data
->cpu_kpc_shadow
== NULL
);
81 assert(cpu_data
->cpu_kpc_reload
== NULL
);
84 * Buffers allocated through kpc_counterbuf_alloc() are large enough to
85 * store all PMCs values from all CPUs. This mimics the userspace API.
86 * This does not suit well with the per-CPU kpc buffers, since:
87 * 1. Buffers don't need to be this large.
88 * 2. The actual number of CPUs is not known at this point.
90 * CPUs are asked to callout into kpc when being registered, we'll
91 * allocate the memory here.
94 if ((cpu_data
->cpu_kpc_buf
[0] = kalloc(COUNTERBUF_SIZE_PER_CPU
)) == NULL
)
96 if ((cpu_data
->cpu_kpc_buf
[1] = kalloc(COUNTERBUF_SIZE_PER_CPU
)) == NULL
)
98 if ((cpu_data
->cpu_kpc_shadow
= kalloc(COUNTERBUF_SIZE_PER_CPU
)) == NULL
)
100 if ((cpu_data
->cpu_kpc_reload
= kalloc(COUNTERBUF_SIZE_PER_CPU
)) == NULL
)
103 memset(cpu_data
->cpu_kpc_buf
[0], 0, COUNTERBUF_SIZE_PER_CPU
);
104 memset(cpu_data
->cpu_kpc_buf
[1], 0, COUNTERBUF_SIZE_PER_CPU
);
105 memset(cpu_data
->cpu_kpc_shadow
, 0, COUNTERBUF_SIZE_PER_CPU
);
106 memset(cpu_data
->cpu_kpc_reload
, 0, COUNTERBUF_SIZE_PER_CPU
);
112 kfree(cpu_data
->cpu_kpc_buf
[0], COUNTERBUF_SIZE_PER_CPU
);
113 kfree(cpu_data
->cpu_kpc_buf
[1], COUNTERBUF_SIZE_PER_CPU
);
114 kfree(cpu_data
->cpu_kpc_shadow
, COUNTERBUF_SIZE_PER_CPU
);
115 kfree(cpu_data
->cpu_kpc_reload
, COUNTERBUF_SIZE_PER_CPU
);
121 kpc_task_set_forced_all_ctrs(task_t task
, boolean_t state
)
127 task
->t_chud
|= TASK_KPC_FORCED_ALL_CTRS
;
129 task
->t_chud
&= ~TASK_KPC_FORCED_ALL_CTRS
;
134 kpc_task_get_forced_all_ctrs(task_t task
)
137 return task
->t_chud
& TASK_KPC_FORCED_ALL_CTRS
? TRUE
: FALSE
;
141 kpc_force_all_ctrs(task_t task
, int val
)
143 boolean_t new_state
= val
? TRUE
: FALSE
;
144 boolean_t old_state
= kpc_get_force_all_ctrs();
147 * Refuse to do the operation if the counters are already forced by
150 if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task
))
153 /* nothing to do if the state is not changing */
154 if (old_state
== new_state
)
157 /* notify the power manager */
159 kpc_pm_handler( new_state
? FALSE
: TRUE
);
161 /* update the task bits */
162 kpc_task_set_forced_all_ctrs(task
, val
);
164 /* update the internal state */
165 force_all_ctrs
= val
;
171 kpc_get_force_all_ctrs(void)
173 return force_all_ctrs
;
177 kpc_multiple_clients(void)
179 return kpc_pm_handler
!= NULL
;
183 kpc_controls_fixed_counters(void)
185 return !kpc_pm_handler
|| force_all_ctrs
|| !kpc_pm_has_custom_config
;
189 kpc_controls_counter(uint32_t ctr
)
191 uint64_t pmc_mask
= 0ULL;
193 assert(ctr
< (kpc_fixed_count() + kpc_configurable_count()));
195 if (ctr
< kpc_fixed_count())
196 return kpc_controls_fixed_counters();
199 * By default kpc manages all PMCs, but if the Power Manager registered
200 * with custom_config=TRUE, the Power Manager manages its reserved PMCs.
201 * However, kpc takes ownership back if a task acquired all PMCs via
204 pmc_mask
= (1ULL << (ctr
- kpc_fixed_count()));
205 if ((pmc_mask
& kpc_pm_pmc_mask
) && kpc_pm_has_custom_config
&& !force_all_ctrs
)
212 kpc_get_running(void)
214 uint64_t pmc_mask
= 0;
215 uint32_t cur_state
= 0;
217 if (kpc_is_running_fixed())
218 cur_state
|= KPC_CLASS_FIXED_MASK
;
220 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
221 if (kpc_is_running_configurable(pmc_mask
))
222 cur_state
|= KPC_CLASS_CONFIGURABLE_MASK
;
224 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
225 if ((pmc_mask
!= 0) && kpc_is_running_configurable(pmc_mask
))
226 cur_state
|= KPC_CLASS_POWER_MASK
;
231 /* may be called from an IPI */
233 kpc_get_curcpu_counters(uint32_t classes
, int *curcpu
, uint64_t *buf
)
235 int enabled
=0, offset
=0;
236 uint64_t pmc_mask
= 0ULL;
240 enabled
= ml_set_interrupts_enabled(FALSE
);
242 /* grab counters and CPU number as close as possible */
244 *curcpu
= current_processor()->cpu_id
;
246 if (classes
& KPC_CLASS_FIXED_MASK
) {
247 kpc_get_fixed_counters(&buf
[offset
]);
248 offset
+= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
251 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
252 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
253 kpc_get_configurable_counters(&buf
[offset
], pmc_mask
);
254 offset
+= kpc_popcount(pmc_mask
);
257 if (classes
& KPC_CLASS_POWER_MASK
) {
258 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
259 kpc_get_configurable_counters(&buf
[offset
], pmc_mask
);
260 offset
+= kpc_popcount(pmc_mask
);
263 ml_set_interrupts_enabled(enabled
);
268 /* generic counter reading function, public api */
270 kpc_get_cpu_counters(boolean_t all_cpus
, uint32_t classes
,
271 int *curcpu
, uint64_t *buf
)
276 * Unlike reading the current CPU counters, reading counters from all
277 * CPUs is architecture dependent. This allows kpc to make the most of
278 * the platform if memory mapped registers is supported.
281 return kpc_get_all_cpus_counters(classes
, curcpu
, buf
);
283 return kpc_get_curcpu_counters(classes
, curcpu
, buf
);
287 kpc_get_shadow_counters(boolean_t all_cpus
, uint32_t classes
,
288 int *curcpu
, uint64_t *buf
)
290 int curcpu_id
= current_processor()->cpu_id
;
291 uint32_t cfg_count
= kpc_configurable_count(), offset
= 0;
292 uint64_t pmc_mask
= 0ULL;
297 enabled
= ml_set_interrupts_enabled(FALSE
);
299 curcpu_id
= current_processor()->cpu_id
;
303 for (int cpu
= 0; cpu
< machine_info
.logical_cpu_max
; ++cpu
) {
304 /* filter if the caller did not request all cpus */
305 if (!all_cpus
&& (cpu
!= curcpu_id
))
308 if (classes
& KPC_CLASS_FIXED_MASK
) {
309 uint32_t count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
310 memcpy(&buf
[offset
], &FIXED_SHADOW_CPU(cpu
, 0), count
* sizeof(uint64_t));
314 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
315 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
317 for (uint32_t cfg_ctr
= 0; cfg_ctr
< cfg_count
; ++cfg_ctr
)
318 if ((1ULL << cfg_ctr
) & pmc_mask
)
319 buf
[offset
++] = CONFIGURABLE_SHADOW_CPU(cpu
, cfg_ctr
);
322 if (classes
& KPC_CLASS_POWER_MASK
) {
323 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
325 for (uint32_t cfg_ctr
= 0; cfg_ctr
< cfg_count
; ++cfg_ctr
)
326 if ((1ULL << cfg_ctr
) & pmc_mask
)
327 buf
[offset
++] = CONFIGURABLE_SHADOW_CPU(cpu
, cfg_ctr
);
331 ml_set_interrupts_enabled(enabled
);
337 kpc_get_counter_count(uint32_t classes
)
341 if (classes
& KPC_CLASS_FIXED_MASK
)
342 count
+= kpc_fixed_count();
344 if (classes
& (KPC_CLASS_CONFIGURABLE_MASK
| KPC_CLASS_POWER_MASK
)) {
345 uint64_t pmc_msk
= kpc_get_configurable_pmc_mask(classes
);
346 uint32_t pmc_cnt
= kpc_popcount(pmc_msk
);
354 kpc_get_config_count(uint32_t classes
)
358 if (classes
& KPC_CLASS_FIXED_MASK
)
359 count
+= kpc_fixed_config_count();
361 if (classes
& (KPC_CLASS_CONFIGURABLE_MASK
| KPC_CLASS_POWER_MASK
)) {
362 uint64_t pmc_mask
= kpc_get_configurable_pmc_mask(classes
);
363 count
+= kpc_configurable_config_count(pmc_mask
);
366 if ((classes
& KPC_CLASS_RAWPMU_MASK
) && !kpc_multiple_clients())
367 count
+= kpc_rawpmu_config_count();
373 kpc_get_config(uint32_t classes
, kpc_config_t
*current_config
)
377 assert(current_config
);
379 if (classes
& KPC_CLASS_FIXED_MASK
) {
380 kpc_get_fixed_config(¤t_config
[count
]);
381 count
+= kpc_get_config_count(KPC_CLASS_FIXED_MASK
);
384 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
385 uint64_t pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
386 kpc_get_configurable_config(¤t_config
[count
], pmc_mask
);
387 count
+= kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK
);
390 if (classes
& KPC_CLASS_POWER_MASK
) {
391 uint64_t pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
392 kpc_get_configurable_config(¤t_config
[count
], pmc_mask
);
393 count
+= kpc_get_config_count(KPC_CLASS_POWER_MASK
);
396 if (classes
& KPC_CLASS_RAWPMU_MASK
)
398 // Client shouldn't ask for config words that aren't available.
399 // Most likely, they'd misinterpret the returned buffer if we
401 if( kpc_multiple_clients() )
405 kpc_get_rawpmu_config(¤t_config
[count
]);
406 count
+= kpc_get_config_count(KPC_CLASS_RAWPMU_MASK
);
413 kpc_set_config(uint32_t classes
, kpc_config_t
*configv
)
416 struct kpc_config_remote mp_config
= {
417 .classes
= classes
, .configv
= configv
,
418 .pmc_mask
= kpc_get_configurable_pmc_mask(classes
)
423 /* don't allow RAWPMU configuration when sharing counters */
424 if ((classes
& KPC_CLASS_RAWPMU_MASK
) && kpc_multiple_clients()) {
428 /* no clients have the right to modify both classes */
429 if ((classes
& (KPC_CLASS_CONFIGURABLE_MASK
)) &&
430 (classes
& (KPC_CLASS_POWER_MASK
)))
435 lck_mtx_lock(&kpc_config_lock
);
437 /* translate the power class for the machine layer */
438 if (classes
& KPC_CLASS_POWER_MASK
)
439 mp_config
.classes
|= KPC_CLASS_CONFIGURABLE_MASK
;
441 ret
= kpc_set_config_arch( &mp_config
);
443 lck_mtx_unlock(&kpc_config_lock
);
448 /* allocate a buffer large enough for all possible counters */
450 kpc_counterbuf_alloc(void)
452 uint64_t *buf
= NULL
;
454 buf
= kalloc(COUNTERBUF_SIZE
);
456 bzero(buf
, COUNTERBUF_SIZE
);
463 kpc_counterbuf_free(uint64_t *buf
)
466 kfree(buf
, COUNTERBUF_SIZE
);
471 kpc_sample_kperf(uint32_t actionid
)
473 struct kperf_sample sbuf
;
474 struct kperf_context ctx
;
478 BUF_DATA1(PERF_KPC_HNDLR
| DBG_FUNC_START
, 0);
481 ctx
.cur_thread
= current_thread();
483 task
= chudxnu_task_for_thread(ctx
.cur_thread
);
485 ctx
.cur_pid
= chudxnu_pid_for_task(task
);
487 ctx
.trigger_type
= TRIGGER_TYPE_PMI
;
490 r
= kperf_sample(&sbuf
, &ctx
, actionid
, SAMPLE_FLAG_PEND_USER
);
492 BUF_INFO1(PERF_KPC_HNDLR
| DBG_FUNC_END
, r
);
497 kpc_set_period(uint32_t classes
, uint64_t *val
)
499 struct kpc_config_remote mp_config
= {
500 .classes
= classes
, .configv
= val
,
501 .pmc_mask
= kpc_get_configurable_pmc_mask(classes
)
506 /* no clients have the right to modify both classes */
507 if ((classes
& (KPC_CLASS_CONFIGURABLE_MASK
)) &&
508 (classes
& (KPC_CLASS_POWER_MASK
)))
513 lck_mtx_lock(&kpc_config_lock
);
515 #ifdef FIXED_COUNTER_SHADOW
516 if ((classes
& KPC_CLASS_FIXED_MASK
) && !kpc_controls_fixed_counters()) {
517 lck_mtx_unlock(&kpc_config_lock
);
521 if (classes
& KPC_CLASS_FIXED_MASK
) {
522 lck_mtx_unlock(&kpc_config_lock
);
527 /* translate the power class for the machine layer */
528 if (classes
& KPC_CLASS_POWER_MASK
)
529 mp_config
.classes
|= KPC_CLASS_CONFIGURABLE_MASK
;
531 kprintf("setting period %u\n", classes
);
532 kpc_set_period_arch( &mp_config
);
534 lck_mtx_unlock(&kpc_config_lock
);
540 kpc_get_period(uint32_t classes
, uint64_t *val
)
543 uint64_t pmc_mask
= 0ULL;
547 lck_mtx_lock(&kpc_config_lock
);
549 if (classes
& KPC_CLASS_FIXED_MASK
) {
550 /* convert reload values to periods */
551 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
552 for (uint32_t i
= 0; i
< count
; ++i
)
553 *val
++ = kpc_fixed_max() - FIXED_RELOAD(i
);
556 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
557 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
559 /* convert reload values to periods */
560 count
= kpc_configurable_count();
561 for (uint32_t i
= 0; i
< count
; ++i
)
562 if ((1ULL << i
) & pmc_mask
)
563 *val
++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i
);
566 if (classes
& KPC_CLASS_POWER_MASK
) {
567 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
569 /* convert reload values to periods */
570 count
= kpc_configurable_count();
571 for (uint32_t i
= 0; i
< count
; ++i
)
572 if ((1ULL << i
) & pmc_mask
)
573 *val
++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i
);
576 lck_mtx_unlock(&kpc_config_lock
);
582 kpc_set_actionid(uint32_t classes
, uint32_t *val
)
585 uint64_t pmc_mask
= 0ULL;
589 /* NOTE: what happens if a pmi occurs while actionids are being
590 * set is undefined. */
591 lck_mtx_lock(&kpc_config_lock
);
593 if (classes
& KPC_CLASS_FIXED_MASK
) {
594 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
595 memcpy(&FIXED_ACTIONID(0), val
, count
*sizeof(uint32_t));
599 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
600 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
602 count
= kpc_configurable_count();
603 for (uint32_t i
= 0; i
< count
; ++i
)
604 if ((1ULL << i
) & pmc_mask
)
605 CONFIGURABLE_ACTIONID(i
) = *val
++;
608 if (classes
& KPC_CLASS_POWER_MASK
) {
609 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
611 count
= kpc_configurable_count();
612 for (uint32_t i
= 0; i
< count
; ++i
)
613 if ((1ULL << i
) & pmc_mask
)
614 CONFIGURABLE_ACTIONID(i
) = *val
++;
617 lck_mtx_unlock(&kpc_config_lock
);
622 int kpc_get_actionid(uint32_t classes
, uint32_t *val
)
625 uint64_t pmc_mask
= 0ULL;
629 lck_mtx_lock(&kpc_config_lock
);
631 if (classes
& KPC_CLASS_FIXED_MASK
) {
632 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
633 memcpy(val
, &FIXED_ACTIONID(0), count
*sizeof(uint32_t));
637 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
638 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
640 count
= kpc_configurable_count();
641 for (uint32_t i
= 0; i
< count
; ++i
)
642 if ((1ULL << i
) & pmc_mask
)
643 *val
++ = CONFIGURABLE_ACTIONID(i
);
646 if (classes
& KPC_CLASS_POWER_MASK
) {
647 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
649 count
= kpc_configurable_count();
650 for (uint32_t i
= 0; i
< count
; ++i
)
651 if ((1ULL << i
) & pmc_mask
)
652 *val
++ = CONFIGURABLE_ACTIONID(i
);
655 lck_mtx_unlock(&kpc_config_lock
);
662 kpc_set_running(uint32_t classes
)
664 uint32_t all_cfg_classes
= KPC_CLASS_CONFIGURABLE_MASK
| KPC_CLASS_POWER_MASK
;
665 struct kpc_running_remote mp_config
= {
666 .classes
= classes
, .cfg_target_mask
= 0ULL, .cfg_state_mask
= 0ULL
669 /* target all available PMCs */
670 mp_config
.cfg_target_mask
= kpc_get_configurable_pmc_mask(all_cfg_classes
);
672 /* translate the power class for the machine layer */
673 if (classes
& KPC_CLASS_POWER_MASK
)
674 mp_config
.classes
|= KPC_CLASS_CONFIGURABLE_MASK
;
676 /* generate the state of each configurable PMCs */
677 mp_config
.cfg_state_mask
= kpc_get_configurable_pmc_mask(classes
);
679 return kpc_set_running_arch(&mp_config
);
683 kpc_register_pm_handler(kpc_pm_handler_t handler
)
685 return kpc_reserve_pm_counters(0x38, handler
, TRUE
);
689 kpc_reserve_pm_counters(uint64_t pmc_mask
, kpc_pm_handler_t handler
,
690 boolean_t custom_config
)
692 uint64_t all_mask
= (1ULL << kpc_configurable_count()) - 1;
693 uint64_t req_mask
= 0ULL;
696 assert(handler
!= NULL
);
697 assert(kpc_pm_handler
== NULL
);
699 /* check number of counters requested */
700 req_mask
= (pmc_mask
& all_mask
);
701 assert(kpc_popcount(req_mask
) <= kpc_configurable_count());
703 /* save the power manager states */
704 kpc_pm_has_custom_config
= custom_config
;
705 kpc_pm_pmc_mask
= req_mask
;
706 kpc_pm_handler
= handler
;
708 printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n",
709 req_mask
, custom_config
);
713 uint32_t cfg_count
= kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
);
714 uint32_t pwr_count
= kpc_popcount(kpc_pm_pmc_mask
);
715 #pragma unused(cfg_count, pwr_count)
716 assert((cfg_count
+ pwr_count
) == kpc_configurable_count());
719 return force_all_ctrs
? FALSE
: TRUE
;
723 kpc_release_pm_counters(void)
726 assert(kpc_pm_handler
!= NULL
);
728 /* release the counters */
729 kpc_pm_has_custom_config
= FALSE
;
730 kpc_pm_pmc_mask
= 0ULL;
731 kpc_pm_handler
= NULL
;
733 printf("kpc: pm released counters\n");
736 assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
) == kpc_configurable_count());
740 kpc_popcount(uint64_t value
)
742 return __builtin_popcountll(value
);
746 kpc_get_configurable_pmc_mask(uint32_t classes
)
748 uint32_t configurable_count
= kpc_configurable_count();
749 uint64_t cfg_mask
= 0ULL, pwr_mask
= 0ULL, all_cfg_pmcs_mask
= 0ULL;
751 /* not configurable classes or no configurable counters */
752 if (((classes
& (KPC_CLASS_CONFIGURABLE_MASK
| KPC_CLASS_POWER_MASK
)) == 0) ||
753 (configurable_count
== 0))
758 assert(configurable_count
< 64);
759 all_cfg_pmcs_mask
= (1ULL << configurable_count
) - 1;
761 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
762 if (force_all_ctrs
== TRUE
)
763 cfg_mask
|= all_cfg_pmcs_mask
;
765 cfg_mask
|= (~kpc_pm_pmc_mask
) & all_cfg_pmcs_mask
;
769 * The power class exists iff:
770 * - No tasks acquired all PMCs
771 * - PM registered and uses kpc to interact with PMCs
773 if ((force_all_ctrs
== FALSE
) &&
774 (kpc_pm_handler
!= NULL
) &&
775 (kpc_pm_has_custom_config
== FALSE
) &&
776 (classes
& KPC_CLASS_POWER_MASK
))
778 pwr_mask
|= kpc_pm_pmc_mask
& all_cfg_pmcs_mask
;
782 /* post-conditions */
783 assert( ((cfg_mask
| pwr_mask
) & (~all_cfg_pmcs_mask
)) == 0 );
784 assert( kpc_popcount(cfg_mask
| pwr_mask
) <= kpc_configurable_count() );
785 assert( (cfg_mask
& pwr_mask
) == 0ULL );
787 return cfg_mask
| pwr_mask
;