2 * Copyright (c) 2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <sys/errno.h>
35 #include <kperf/buffer.h>
36 #include <kern/thread.h>
37 #if defined(__arm64__) || defined(__arm__)
38 #include <arm/cpu_data_internal.h>
43 #include <kperf/kperf.h>
44 #include <kperf/sample.h>
45 #include <kperf/context.h>
46 #include <kperf/action.h>
48 uint32_t kpc_actionid
[KPC_MAX_COUNTERS
];
50 #define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t))
51 #define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \
52 COUNTERBUF_SIZE_PER_CPU)
55 static LCK_GRP_DECLARE(kpc_config_lckgrp
, "kpc");
56 static LCK_MTX_DECLARE(kpc_config_lock
, &kpc_config_lckgrp
);
58 /* state specifying if all counters have been requested by kperf */
59 static boolean_t force_all_ctrs
= FALSE
;
62 static kpc_pm_handler_t kpc_pm_handler
;
63 static boolean_t kpc_pm_has_custom_config
;
64 static uint64_t kpc_pm_pmc_mask
;
66 static bool kpc_calling_pm
= false;
67 #endif /* MACH_ASSERT */
69 boolean_t kpc_context_switch_active
= FALSE
;
70 bool kpc_supported
= true;
73 kpc_percpu_alloc(void)
75 return kheap_alloc_tag(KHEAP_DATA_BUFFERS
, COUNTERBUF_SIZE_PER_CPU
,
76 Z_WAITOK
| Z_ZERO
, VM_KERN_MEMORY_DIAG
);
80 kpc_percpu_free(uint64_t *buf
)
83 kheap_free(KHEAP_DATA_BUFFERS
, buf
, COUNTERBUF_SIZE_PER_CPU
);
88 kpc_register_cpu(struct cpu_data
*cpu_data
)
91 assert(cpu_data
->cpu_kpc_buf
[0] == NULL
);
92 assert(cpu_data
->cpu_kpc_buf
[1] == NULL
);
93 assert(cpu_data
->cpu_kpc_shadow
== NULL
);
94 assert(cpu_data
->cpu_kpc_reload
== NULL
);
97 * Buffers allocated through kpc_counterbuf_alloc() are large enough to
98 * store all PMCs values from all CPUs. This mimics the userspace API.
99 * This does not suit well with the per-CPU kpc buffers, since:
100 * 1. Buffers don't need to be this large.
101 * 2. The actual number of CPUs is not known at this point.
103 * CPUs are asked to callout into kpc when being registered, we'll
104 * allocate the memory here.
107 if ((cpu_data
->cpu_kpc_buf
[0] = kpc_percpu_alloc()) == NULL
) {
110 if ((cpu_data
->cpu_kpc_buf
[1] = kpc_percpu_alloc()) == NULL
) {
113 if ((cpu_data
->cpu_kpc_shadow
= kpc_percpu_alloc()) == NULL
) {
116 if ((cpu_data
->cpu_kpc_reload
= kpc_percpu_alloc()) == NULL
) {
124 kpc_unregister_cpu(cpu_data
);
129 kpc_unregister_cpu(struct cpu_data
*cpu_data
)
132 if (cpu_data
->cpu_kpc_buf
[0] != NULL
) {
133 kpc_percpu_free(cpu_data
->cpu_kpc_buf
[0]);
134 cpu_data
->cpu_kpc_buf
[0] = NULL
;
136 if (cpu_data
->cpu_kpc_buf
[1] != NULL
) {
137 kpc_percpu_free(cpu_data
->cpu_kpc_buf
[1]);
138 cpu_data
->cpu_kpc_buf
[1] = NULL
;
140 if (cpu_data
->cpu_kpc_shadow
!= NULL
) {
141 kpc_percpu_free(cpu_data
->cpu_kpc_shadow
);
142 cpu_data
->cpu_kpc_shadow
= NULL
;
144 if (cpu_data
->cpu_kpc_reload
!= NULL
) {
145 kpc_percpu_free(cpu_data
->cpu_kpc_reload
);
146 cpu_data
->cpu_kpc_reload
= NULL
;
152 kpc_task_set_forced_all_ctrs(task_t task
, boolean_t state
)
158 task
->t_kpc
|= TASK_KPC_FORCED_ALL_CTRS
;
160 task
->t_kpc
&= ~TASK_KPC_FORCED_ALL_CTRS
;
166 kpc_task_get_forced_all_ctrs(task_t task
)
169 return task
->t_kpc
& TASK_KPC_FORCED_ALL_CTRS
? TRUE
: FALSE
;
173 kpc_force_all_ctrs(task_t task
, int val
)
175 boolean_t new_state
= val
? TRUE
: FALSE
;
176 boolean_t old_state
= kpc_get_force_all_ctrs();
179 * Refuse to do the operation if the counters are already forced by
182 if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task
)) {
186 /* nothing to do if the state is not changing */
187 if (old_state
== new_state
) {
191 /* notify the power manager */
192 if (kpc_pm_handler
) {
194 kpc_calling_pm
= true;
195 #endif /* MACH_ASSERT */
196 kpc_pm_handler( new_state
? FALSE
: TRUE
);
198 kpc_calling_pm
= false;
199 #endif /* MACH_ASSERT */
203 * This is a force -- ensure that counters are forced, even if power
204 * management fails to acknowledge it.
206 if (force_all_ctrs
!= new_state
) {
207 force_all_ctrs
= new_state
;
210 /* update the task bits */
211 kpc_task_set_forced_all_ctrs(task
, new_state
);
217 kpc_pm_acknowledge(boolean_t available_to_pm
)
220 * Force-all-counters should still be true when the counters are being
221 * made available to power management and false when counters are going
224 assert(force_all_ctrs
== available_to_pm
);
226 * Make sure power management isn't playing games with us.
228 assert(kpc_calling_pm
== true);
231 * Counters being available means no one is forcing all counters.
233 force_all_ctrs
= available_to_pm
? FALSE
: TRUE
;
237 kpc_get_force_all_ctrs(void)
239 return force_all_ctrs
;
243 kpc_multiple_clients(void)
245 return kpc_pm_handler
!= NULL
;
249 kpc_controls_fixed_counters(void)
251 return !kpc_pm_handler
|| force_all_ctrs
|| !kpc_pm_has_custom_config
;
255 kpc_controls_counter(uint32_t ctr
)
257 uint64_t pmc_mask
= 0ULL;
259 assert(ctr
< (kpc_fixed_count() + kpc_configurable_count()));
261 if (ctr
< kpc_fixed_count()) {
262 return kpc_controls_fixed_counters();
266 * By default kpc manages all PMCs, but if the Power Manager registered
267 * with custom_config=TRUE, the Power Manager manages its reserved PMCs.
268 * However, kpc takes ownership back if a task acquired all PMCs via
271 pmc_mask
= (1ULL << (ctr
- kpc_fixed_count()));
272 if ((pmc_mask
& kpc_pm_pmc_mask
) && kpc_pm_has_custom_config
&& !force_all_ctrs
) {
280 kpc_get_running(void)
282 uint64_t pmc_mask
= 0;
283 uint32_t cur_state
= 0;
285 if (kpc_is_running_fixed()) {
286 cur_state
|= KPC_CLASS_FIXED_MASK
;
289 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
290 if (kpc_is_running_configurable(pmc_mask
)) {
291 cur_state
|= KPC_CLASS_CONFIGURABLE_MASK
;
294 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
295 if ((pmc_mask
!= 0) && kpc_is_running_configurable(pmc_mask
)) {
296 cur_state
|= KPC_CLASS_POWER_MASK
;
302 /* may be called from an IPI */
304 kpc_get_curcpu_counters(uint32_t classes
, int *curcpu
, uint64_t *buf
)
306 int enabled
= 0, offset
= 0;
307 uint64_t pmc_mask
= 0ULL;
311 enabled
= ml_set_interrupts_enabled(FALSE
);
313 /* grab counters and CPU number as close as possible */
315 *curcpu
= cpu_number();
318 if (classes
& KPC_CLASS_FIXED_MASK
) {
319 kpc_get_fixed_counters(&buf
[offset
]);
320 offset
+= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
323 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
324 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
325 kpc_get_configurable_counters(&buf
[offset
], pmc_mask
);
326 offset
+= kpc_popcount(pmc_mask
);
329 if (classes
& KPC_CLASS_POWER_MASK
) {
330 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
331 kpc_get_configurable_counters(&buf
[offset
], pmc_mask
);
332 offset
+= kpc_popcount(pmc_mask
);
335 ml_set_interrupts_enabled(enabled
);
340 /* generic counter reading function, public api */
342 kpc_get_cpu_counters(boolean_t all_cpus
, uint32_t classes
,
343 int *curcpu
, uint64_t *buf
)
348 * Unlike reading the current CPU counters, reading counters from all
349 * CPUs is architecture dependent. This allows kpc to make the most of
350 * the platform if memory mapped registers is supported.
353 return kpc_get_all_cpus_counters(classes
, curcpu
, buf
);
355 return kpc_get_curcpu_counters(classes
, curcpu
, buf
);
360 kpc_get_shadow_counters(boolean_t all_cpus
, uint32_t classes
,
361 int *curcpu
, uint64_t *buf
)
363 int curcpu_id
= cpu_number();
364 uint32_t cfg_count
= kpc_configurable_count(), offset
= 0;
365 uint64_t pmc_mask
= 0ULL;
370 enabled
= ml_set_interrupts_enabled(FALSE
);
372 curcpu_id
= cpu_number();
377 for (int cpu
= 0; cpu
< machine_info
.logical_cpu_max
; ++cpu
) {
378 /* filter if the caller did not request all cpus */
379 if (!all_cpus
&& (cpu
!= curcpu_id
)) {
383 if (classes
& KPC_CLASS_FIXED_MASK
) {
384 uint32_t count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
385 memcpy(&buf
[offset
], &FIXED_SHADOW_CPU(cpu
, 0), count
* sizeof(uint64_t));
389 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
390 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
392 for (uint32_t cfg_ctr
= 0; cfg_ctr
< cfg_count
; ++cfg_ctr
) {
393 if ((1ULL << cfg_ctr
) & pmc_mask
) {
394 buf
[offset
++] = CONFIGURABLE_SHADOW_CPU(cpu
, cfg_ctr
);
399 if (classes
& KPC_CLASS_POWER_MASK
) {
400 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
402 for (uint32_t cfg_ctr
= 0; cfg_ctr
< cfg_count
; ++cfg_ctr
) {
403 if ((1ULL << cfg_ctr
) & pmc_mask
) {
404 buf
[offset
++] = CONFIGURABLE_SHADOW_CPU(cpu
, cfg_ctr
);
410 ml_set_interrupts_enabled(enabled
);
416 kpc_get_counter_count(uint32_t classes
)
420 if (classes
& KPC_CLASS_FIXED_MASK
) {
421 count
+= kpc_fixed_count();
424 if (classes
& (KPC_CLASS_CONFIGURABLE_MASK
| KPC_CLASS_POWER_MASK
)) {
425 uint64_t pmc_msk
= kpc_get_configurable_pmc_mask(classes
);
426 uint32_t pmc_cnt
= kpc_popcount(pmc_msk
);
434 kpc_get_config_count(uint32_t classes
)
438 if (classes
& KPC_CLASS_FIXED_MASK
) {
439 count
+= kpc_fixed_config_count();
442 if (classes
& (KPC_CLASS_CONFIGURABLE_MASK
| KPC_CLASS_POWER_MASK
)) {
443 uint64_t pmc_mask
= kpc_get_configurable_pmc_mask(classes
);
444 count
+= kpc_configurable_config_count(pmc_mask
);
447 if ((classes
& KPC_CLASS_RAWPMU_MASK
) && !kpc_multiple_clients()) {
448 count
+= kpc_rawpmu_config_count();
455 kpc_get_config(uint32_t classes
, kpc_config_t
*current_config
)
459 assert(current_config
);
461 if (classes
& KPC_CLASS_FIXED_MASK
) {
462 kpc_get_fixed_config(¤t_config
[count
]);
463 count
+= kpc_get_config_count(KPC_CLASS_FIXED_MASK
);
466 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
467 uint64_t pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
468 kpc_get_configurable_config(¤t_config
[count
], pmc_mask
);
469 count
+= kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK
);
472 if (classes
& KPC_CLASS_POWER_MASK
) {
473 uint64_t pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
474 kpc_get_configurable_config(¤t_config
[count
], pmc_mask
);
475 count
+= kpc_get_config_count(KPC_CLASS_POWER_MASK
);
478 if (classes
& KPC_CLASS_RAWPMU_MASK
) {
479 // Client shouldn't ask for config words that aren't available.
480 // Most likely, they'd misinterpret the returned buffer if we
482 if (kpc_multiple_clients()) {
485 kpc_get_rawpmu_config(¤t_config
[count
]);
486 count
+= kpc_get_config_count(KPC_CLASS_RAWPMU_MASK
);
493 kpc_set_config(uint32_t classes
, kpc_config_t
*configv
)
496 struct kpc_config_remote mp_config
= {
497 .classes
= classes
, .configv
= configv
,
498 .pmc_mask
= kpc_get_configurable_pmc_mask(classes
)
503 /* don't allow RAWPMU configuration when sharing counters */
504 if ((classes
& KPC_CLASS_RAWPMU_MASK
) && kpc_multiple_clients()) {
508 /* no clients have the right to modify both classes */
509 if ((classes
& (KPC_CLASS_CONFIGURABLE_MASK
)) &&
510 (classes
& (KPC_CLASS_POWER_MASK
))) {
514 lck_mtx_lock(&kpc_config_lock
);
516 /* translate the power class for the machine layer */
517 if (classes
& KPC_CLASS_POWER_MASK
) {
518 mp_config
.classes
|= KPC_CLASS_CONFIGURABLE_MASK
;
521 ret
= kpc_set_config_arch( &mp_config
);
523 lck_mtx_unlock(&kpc_config_lock
);
529 kpc_get_counterbuf_size(void)
531 return COUNTERBUF_SIZE
;
534 /* allocate a buffer large enough for all possible counters */
536 kpc_counterbuf_alloc(void)
538 return kheap_alloc_tag(KHEAP_DATA_BUFFERS
, COUNTERBUF_SIZE
,
539 Z_WAITOK
| Z_ZERO
, VM_KERN_MEMORY_DIAG
);
543 kpc_counterbuf_free(uint64_t *buf
)
546 kheap_free(KHEAP_DATA_BUFFERS
, buf
, COUNTERBUF_SIZE
);
551 kpc_sample_kperf(uint32_t actionid
, uint32_t counter
, uint64_t config
,
552 uint64_t count
, uintptr_t pc
, kperf_kpc_flags_t flags
)
554 struct kperf_sample sbuf
;
556 uint64_t desc
= config
| (uint64_t)counter
<< 32 | (uint64_t)flags
<< 48;
558 BUF_DATA(PERF_KPC_HNDLR
| DBG_FUNC_START
, desc
, count
, pc
);
560 thread_t thread
= current_thread();
561 task_t task
= get_threadtask(thread
);
563 struct kperf_context ctx
= {
564 .cur_thread
= thread
,
566 .cur_pid
= task_pid(task
),
567 .trigger_type
= TRIGGER_TYPE_PMI
,
571 int r
= kperf_sample(&sbuf
, &ctx
, actionid
, SAMPLE_FLAG_PEND_USER
);
573 BUF_INFO(PERF_KPC_HNDLR
| DBG_FUNC_END
, r
);
578 kpc_set_period(uint32_t classes
, uint64_t *val
)
580 struct kpc_config_remote mp_config
= {
581 .classes
= classes
, .configv
= val
,
582 .pmc_mask
= kpc_get_configurable_pmc_mask(classes
)
587 /* no clients have the right to modify both classes */
588 if ((classes
& (KPC_CLASS_CONFIGURABLE_MASK
)) &&
589 (classes
& (KPC_CLASS_POWER_MASK
))) {
593 lck_mtx_lock(&kpc_config_lock
);
595 #ifdef FIXED_COUNTER_SHADOW
596 if ((classes
& KPC_CLASS_FIXED_MASK
) && !kpc_controls_fixed_counters()) {
597 lck_mtx_unlock(&kpc_config_lock
);
601 if (classes
& KPC_CLASS_FIXED_MASK
) {
602 lck_mtx_unlock(&kpc_config_lock
);
607 /* translate the power class for the machine layer */
608 if (classes
& KPC_CLASS_POWER_MASK
) {
609 mp_config
.classes
|= KPC_CLASS_CONFIGURABLE_MASK
;
612 kprintf("setting period %u\n", classes
);
613 kpc_set_period_arch( &mp_config
);
615 lck_mtx_unlock(&kpc_config_lock
);
621 kpc_get_period(uint32_t classes
, uint64_t *val
)
624 uint64_t pmc_mask
= 0ULL;
628 lck_mtx_lock(&kpc_config_lock
);
630 if (classes
& KPC_CLASS_FIXED_MASK
) {
631 /* convert reload values to periods */
632 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
633 for (uint32_t i
= 0; i
< count
; ++i
) {
634 *val
++ = kpc_fixed_max() - FIXED_RELOAD(i
);
638 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
639 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
641 /* convert reload values to periods */
642 count
= kpc_configurable_count();
643 for (uint32_t i
= 0; i
< count
; ++i
) {
644 if ((1ULL << i
) & pmc_mask
) {
645 *val
++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i
);
650 if (classes
& KPC_CLASS_POWER_MASK
) {
651 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
653 /* convert reload values to periods */
654 count
= kpc_configurable_count();
655 for (uint32_t i
= 0; i
< count
; ++i
) {
656 if ((1ULL << i
) & pmc_mask
) {
657 *val
++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i
);
662 lck_mtx_unlock(&kpc_config_lock
);
668 kpc_set_actionid(uint32_t classes
, uint32_t *val
)
671 uint64_t pmc_mask
= 0ULL;
675 /* NOTE: what happens if a pmi occurs while actionids are being
676 * set is undefined. */
677 lck_mtx_lock(&kpc_config_lock
);
679 if (classes
& KPC_CLASS_FIXED_MASK
) {
680 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
681 memcpy(&FIXED_ACTIONID(0), val
, count
* sizeof(uint32_t));
685 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
686 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
688 count
= kpc_configurable_count();
689 for (uint32_t i
= 0; i
< count
; ++i
) {
690 if ((1ULL << i
) & pmc_mask
) {
691 CONFIGURABLE_ACTIONID(i
) = *val
++;
696 if (classes
& KPC_CLASS_POWER_MASK
) {
697 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
699 count
= kpc_configurable_count();
700 for (uint32_t i
= 0; i
< count
; ++i
) {
701 if ((1ULL << i
) & pmc_mask
) {
702 CONFIGURABLE_ACTIONID(i
) = *val
++;
707 lck_mtx_unlock(&kpc_config_lock
);
713 kpc_get_actionid(uint32_t classes
, uint32_t *val
)
716 uint64_t pmc_mask
= 0ULL;
720 lck_mtx_lock(&kpc_config_lock
);
722 if (classes
& KPC_CLASS_FIXED_MASK
) {
723 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
724 memcpy(val
, &FIXED_ACTIONID(0), count
* sizeof(uint32_t));
728 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
729 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK
);
731 count
= kpc_configurable_count();
732 for (uint32_t i
= 0; i
< count
; ++i
) {
733 if ((1ULL << i
) & pmc_mask
) {
734 *val
++ = CONFIGURABLE_ACTIONID(i
);
739 if (classes
& KPC_CLASS_POWER_MASK
) {
740 pmc_mask
= kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK
);
742 count
= kpc_configurable_count();
743 for (uint32_t i
= 0; i
< count
; ++i
) {
744 if ((1ULL << i
) & pmc_mask
) {
745 *val
++ = CONFIGURABLE_ACTIONID(i
);
750 lck_mtx_unlock(&kpc_config_lock
);
756 kpc_set_running(uint32_t classes
)
758 uint32_t all_cfg_classes
= KPC_CLASS_CONFIGURABLE_MASK
| KPC_CLASS_POWER_MASK
;
759 struct kpc_running_remote mp_config
= {
760 .classes
= classes
, .cfg_target_mask
= 0ULL, .cfg_state_mask
= 0ULL
763 /* target all available PMCs */
764 mp_config
.cfg_target_mask
= kpc_get_configurable_pmc_mask(all_cfg_classes
);
766 /* translate the power class for the machine layer */
767 if (classes
& KPC_CLASS_POWER_MASK
) {
768 mp_config
.classes
|= KPC_CLASS_CONFIGURABLE_MASK
;
771 /* generate the state of each configurable PMCs */
772 mp_config
.cfg_state_mask
= kpc_get_configurable_pmc_mask(classes
);
774 return kpc_set_running_arch(&mp_config
);
778 kpc_register_pm_handler(kpc_pm_handler_t handler
)
780 return kpc_reserve_pm_counters(0x38, handler
, TRUE
);
784 kpc_reserve_pm_counters(uint64_t pmc_mask
, kpc_pm_handler_t handler
,
785 boolean_t custom_config
)
787 uint64_t all_mask
= (1ULL << kpc_configurable_count()) - 1;
788 uint64_t req_mask
= 0ULL;
791 assert(handler
!= NULL
);
792 assert(kpc_pm_handler
== NULL
);
794 /* check number of counters requested */
795 req_mask
= (pmc_mask
& all_mask
);
796 assert(kpc_popcount(req_mask
) <= kpc_configurable_count());
798 /* save the power manager states */
799 kpc_pm_has_custom_config
= custom_config
;
800 kpc_pm_pmc_mask
= req_mask
;
801 kpc_pm_handler
= handler
;
803 printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n",
804 req_mask
, custom_config
);
808 uint32_t cfg_count
= kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
);
809 uint32_t pwr_count
= kpc_popcount(kpc_pm_pmc_mask
);
810 #pragma unused(cfg_count, pwr_count)
811 assert((cfg_count
+ pwr_count
) == kpc_configurable_count());
814 return force_all_ctrs
? FALSE
: TRUE
;
818 kpc_release_pm_counters(void)
821 assert(kpc_pm_handler
!= NULL
);
823 /* release the counters */
824 kpc_pm_has_custom_config
= FALSE
;
825 kpc_pm_pmc_mask
= 0ULL;
826 kpc_pm_handler
= NULL
;
828 printf("kpc: pm released counters\n");
831 assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
) == kpc_configurable_count());
835 kpc_popcount(uint64_t value
)
837 return (uint8_t)__builtin_popcountll(value
);
841 kpc_get_configurable_pmc_mask(uint32_t classes
)
843 uint32_t configurable_count
= kpc_configurable_count();
844 uint64_t cfg_mask
= 0ULL, pwr_mask
= 0ULL, all_cfg_pmcs_mask
= 0ULL;
846 /* not configurable classes or no configurable counters */
847 if (((classes
& (KPC_CLASS_CONFIGURABLE_MASK
| KPC_CLASS_POWER_MASK
)) == 0) ||
848 (configurable_count
== 0)) {
852 assert(configurable_count
< 64);
853 all_cfg_pmcs_mask
= (1ULL << configurable_count
) - 1;
855 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
856 if (force_all_ctrs
== TRUE
) {
857 cfg_mask
|= all_cfg_pmcs_mask
;
859 cfg_mask
|= (~kpc_pm_pmc_mask
) & all_cfg_pmcs_mask
;
864 * The power class exists iff:
865 * - No tasks acquired all PMCs
866 * - PM registered and uses kpc to interact with PMCs
868 if ((force_all_ctrs
== FALSE
) &&
869 (kpc_pm_handler
!= NULL
) &&
870 (kpc_pm_has_custom_config
== FALSE
) &&
871 (classes
& KPC_CLASS_POWER_MASK
)) {
872 pwr_mask
|= kpc_pm_pmc_mask
& all_cfg_pmcs_mask
;
876 /* post-conditions */
877 assert(((cfg_mask
| pwr_mask
) & (~all_cfg_pmcs_mask
)) == 0 );
878 assert( kpc_popcount(cfg_mask
| pwr_mask
) <= kpc_configurable_count());
879 assert((cfg_mask
& pwr_mask
) == 0ULL );
881 return cfg_mask
| pwr_mask
;