2 * Copyright (c) 2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <kern/thread.h>
34 #include <sys/errno.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/cpu_internal.h>
40 /* PMU v2 based implementation for A7 */
41 static uint32_t saved_PMXEVTYPER
[MAX_CPUS
][KPC_ARM_TOTAL_COUNT
];
42 static uint32_t saved_PMCNTENSET
[MAX_CPUS
];
43 static uint64_t saved_counter
[MAX_CPUS
][KPC_ARM_TOTAL_COUNT
];
44 static uint32_t saved_PMOVSR
[MAX_CPUS
];
46 static uint32_t kpc_configured
= 0;
47 static uint32_t kpc_xcall_sync
;
48 static uint64_t kpc_running_cfg_pmc_mask
= 0;
49 static uint32_t kpc_running_classes
= 0;
50 static uint32_t kpc_reload_sync
;
51 static uint32_t kpc_enabled_counters
= 0;
53 static int first_time
= 1;
58 enable_counter(uint32_t counter
)
62 /* Cycle counter is MSB; configurable counters reside in LSBs */
63 uint32_t mask
= (counter
== 0) ? (1 << 31) : (1 << (counter
- 1));
66 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 1;" : "=r" (PMCNTENSET
));
68 enabled
= (PMCNTENSET
& mask
);
70 /* Counter interrupt enable (PMINTENSET) */
71 __asm__
volatile ("mcr p15, 0, %0, c9, c14, 1;" : : "r" (mask
));
73 /* Individual counter enable set (PMCNTENSET) */
74 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 1;" : : "r" (mask
));
76 kpc_enabled_counters
++;
78 /* 1st enabled counter? Set the master enable bit in PMCR */
79 if (kpc_enabled_counters
== 1) {
81 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR
));
89 disable_counter(uint32_t counter
)
93 /* Cycle counter is MSB; configurable counters reside in LSBs */
94 uint32_t mask
= (counter
== 0) ? (1 << 31) : (1 << (counter
- 1));
97 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 2;" : "=r" (PMCNTENCLR
));
99 enabled
= (PMCNTENCLR
& mask
);
101 /* Individual counter enable clear (PMCNTENCLR) */
102 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 2;" : : "r" (mask
));
104 /* Counter interrupt disable (PMINTENCLR) */
105 __asm__
volatile ("mcr p15, 0, %0, c9, c14, 2;" : : "r" (mask
));
107 kpc_enabled_counters
--;
109 /* Last enabled counter? Clear the master enable bit in PMCR */
110 if (kpc_enabled_counters
== 0) {
112 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR
));
120 read_counter(uint32_t counter
)
127 __asm__
volatile ("mrc p15, 0, %0, c9, c13, 0;" : "=r" (low
));
133 /* Configurable. Set PMSELR... */
134 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter
- 1));
135 /* ...then read PMXEVCNTR */
136 __asm__
volatile ("mrc p15, 0, %0, c9, c13, 2;" : "=r" (low
));
143 return (uint64_t)low
;
147 write_counter(uint32_t counter
, uint64_t value
)
149 uint32_t low
= value
& 0xFFFFFFFF;
154 __asm__
volatile ("mcr p15, 0, %0, c9, c13, 0;" : : "r" (low
));
160 /* Configurable. Set PMSELR... */
161 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter
- 1));
162 /* ...then write PMXEVCNTR */
163 __asm__
volatile ("mcr p15, 0, %0, c9, c13, 2;" : : "r" (low
));
172 kpc_reload_counter(int ctr
)
174 uint64_t old
= read_counter(ctr
);
175 write_counter(ctr
, FIXED_RELOAD(ctr
));
180 set_running_fixed(boolean_t on
)
184 int n
= KPC_ARM_FIXED_COUNT
;
186 enabled
= ml_set_interrupts_enabled(FALSE
);
188 for (i
= 0; i
< n
; i
++) {
196 ml_set_interrupts_enabled(enabled
);
200 set_running_configurable(uint64_t target_mask
, uint64_t state_mask
)
202 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
205 enabled
= ml_set_interrupts_enabled(FALSE
);
207 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
208 if (((1ULL << i
) & target_mask
) == 0) {
211 assert(kpc_controls_counter(offset
+ i
));
213 if ((1ULL << i
) & state_mask
) {
214 enable_counter(offset
+ i
);
216 disable_counter(offset
+ i
);
220 ml_set_interrupts_enabled(enabled
);
223 void kpc_pmi_handler(cpu_id_t source
);
225 kpc_pmi_handler(cpu_id_t source
)
231 enabled
= ml_set_interrupts_enabled(FALSE
);
233 /* The pmi must be delivered to the CPU that generated it */
234 if (source
!= getCpuDatap()->interrupt_nub
) {
235 panic("pmi from IOCPU %p delivered to IOCPU %p", source
, getCpuDatap()->interrupt_nub
);
239 ctr
< (KPC_ARM_FIXED_COUNT
+ KPC_ARM_CONFIGURABLE_COUNT
);
244 /* check the counter for overflow */
248 mask
= 1 << (ctr
- 1);
252 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR
));
255 extra
= kpc_reload_counter(ctr
);
258 += (kpc_fixed_max() - FIXED_RELOAD(ctr
) + 1 /* wrap */) + extra
;
260 if (FIXED_ACTIONID(ctr
)) {
261 kpc_sample_kperf(FIXED_ACTIONID(ctr
));
264 /* clear PMOVSR bit */
265 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 3;" : : "r" (mask
));
269 ml_set_interrupts_enabled(enabled
);
273 kpc_set_running_xcall( void *vstate
)
275 struct kpc_running_remote
*mp_config
= (struct kpc_running_remote
*) vstate
;
278 if (kpc_controls_fixed_counters()) {
279 set_running_fixed(mp_config
->classes
& KPC_CLASS_FIXED_MASK
);
282 set_running_configurable(mp_config
->cfg_target_mask
,
283 mp_config
->cfg_state_mask
);
285 if (hw_atomic_sub(&kpc_xcall_sync
, 1) == 0) {
286 thread_wakeup((event_t
) &kpc_xcall_sync
);
291 get_counter_config(uint32_t counter
)
297 /* Fixed counter accessed via top bit... */
299 /* Write PMSELR.SEL */
300 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter
));
301 /* Read PMXEVTYPER */
302 __asm__
volatile ("mcr p15, 0, %0, c9, c13, 1;" : "=r" (config
));
310 /* Write PMSELR.SEL to select the configurable counter */
311 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter
));
312 /* Read PMXEVTYPER to get the config */
313 __asm__
volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (config
));
323 set_counter_config(uint32_t counter
, uint64_t config
)
327 /* Write PMSELR.SEL */
328 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
329 /* Write PMXEVTYPER */
330 __asm__
volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config
& 0xFFFFFFFF));
336 /* Write PMSELR.SEL */
337 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter
- 1));
338 /* Write PMXEVTYPER */
339 __asm__
volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config
& 0xFFFFFFFF));
352 uint32_t event_counters
;
354 /* read PMOVSR and determine the number of event counters */
355 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 0;" : "=r" (PMCR
));
356 event_counters
= (PMCR
>> 11) & 0x1F;
358 assert(event_counters
>= KPC_ARM_CONFIGURABLE_COUNT
);
362 kpc_get_classes(void)
364 return KPC_CLASS_FIXED_MASK
| KPC_CLASS_CONFIGURABLE_MASK
;
368 kpc_fixed_count(void)
370 return KPC_ARM_FIXED_COUNT
;
374 kpc_configurable_count(void)
376 return KPC_ARM_CONFIGURABLE_COUNT
;
380 kpc_fixed_config_count(void)
382 return KPC_ARM_FIXED_COUNT
;
386 kpc_configurable_config_count(uint64_t pmc_mask
)
388 assert(kpc_popcount(pmc_mask
) <= kpc_configurable_count());
389 return kpc_popcount(pmc_mask
);
393 kpc_get_fixed_config(kpc_config_t
*configv
)
395 configv
[0] = get_counter_config(0);
402 return (1ULL << KPC_ARM_COUNTER_WIDTH
) - 1;
406 kpc_configurable_max(void)
408 return (1ULL << KPC_ARM_COUNTER_WIDTH
) - 1;
412 kpc_get_configurable_counters(uint64_t *counterv
, uint64_t pmc_mask
)
414 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
418 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
423 if (((1ULL << i
) & pmc_mask
) == 0) {
426 ctr
= read_counter(i
+ offset
);
428 /* check the counter for overflow */
432 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR
));
435 ctr
= CONFIGURABLE_SHADOW(i
) +
436 (kpc_configurable_max() - CONFIGURABLE_RELOAD(i
) + 1 /* Wrap */) +
439 ctr
= CONFIGURABLE_SHADOW(i
) +
440 (ctr
- CONFIGURABLE_RELOAD(i
));
450 kpc_get_fixed_counters(uint64_t *counterv
)
456 /* check the counter for overflow */
460 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR
));
462 ctr
= read_counter(0);
465 ctr
= FIXED_SHADOW(0) +
466 (kpc_fixed_max() - FIXED_RELOAD(0) + 1 /* Wrap */) +
469 ctr
= FIXED_SHADOW(0) +
470 (ctr
- FIXED_RELOAD(0));
478 kpc_is_running_fixed(void)
480 return (kpc_running_classes
& KPC_CLASS_FIXED_MASK
) == KPC_CLASS_FIXED_MASK
;
484 kpc_is_running_configurable(uint64_t pmc_mask
)
486 assert(kpc_popcount(pmc_mask
) <= kpc_configurable_count());
487 return ((kpc_running_classes
& KPC_CLASS_CONFIGURABLE_MASK
) == KPC_CLASS_CONFIGURABLE_MASK
) &&
488 ((kpc_running_cfg_pmc_mask
& pmc_mask
) == pmc_mask
);
492 kpc_set_running_arch(struct kpc_running_remote
*mp_config
)
499 kprintf( "kpc: setting PMI handler\n" );
500 PE_cpu_perfmon_interrupt_install_handler(kpc_pmi_handler
);
501 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
502 PE_cpu_perfmon_interrupt_enable(cpu_datap(cpu
)->cpu_id
,
508 /* dispatch to all CPUs */
509 cpu_broadcast_xcall(&kpc_xcall_sync
, TRUE
, kpc_set_running_xcall
,
512 kpc_running_cfg_pmc_mask
= mp_config
->cfg_state_mask
;
513 kpc_running_classes
= mp_config
->classes
;
523 int cpuid
= current_processor()->cpu_id
;
526 __asm__
volatile ("dmb ish");
528 /* Clear master enable */
529 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR
));
531 /* Save individual enable state */
532 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 1;" : "=r" (saved_PMCNTENSET
[cpuid
]));
535 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (saved_PMOVSR
[cpuid
]));
537 /* Select fixed counter with PMSELR.SEL */
538 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
539 /* Read PMXEVTYPER */
540 __asm__
volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER
[cpuid
][0]));
542 /* Save configurable event selections */
543 for (i
= 0; i
< 4; i
++) {
544 /* Select counter with PMSELR.SEL */
545 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i
));
546 /* Read PMXEVTYPER */
547 __asm__
volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER
[cpuid
][i
+ 1]));
550 /* Finally, save count for each counter */
551 for (i
= 0; i
< 5; i
++) {
552 saved_counter
[cpuid
][i
] = read_counter(i
);
560 int cpuid
= current_processor()->cpu_id
;
564 /* Restore counter values */
565 for (i
= 0; i
< 5; i
++) {
566 /* did we overflow? if so handle it now since we won't get a pmi */
569 /* check the counter for overflow */
576 if (saved_PMOVSR
[cpuid
] & mask
) {
577 extra
= kpc_reload_counter(i
);
580 * CONFIGURABLE_* directly follows FIXED, so we can simply
581 * increment the index here. Although it's ugly.
584 += (kpc_fixed_max() - FIXED_RELOAD(i
) + 1 /* Wrap */) + extra
;
586 if (FIXED_ACTIONID(i
)) {
587 kpc_sample_kperf(FIXED_ACTIONID(i
));
590 write_counter(i
, saved_counter
[cpuid
][i
]);
594 /* Restore configuration - first, the fixed... */
595 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
596 /* Write PMXEVTYPER */
597 __asm__
volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER
[cpuid
][0]));
599 /* ...then the configurable */
600 for (i
= 0; i
< 4; i
++) {
601 /* Select counter with PMSELR.SEL */
602 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i
));
603 /* Write PMXEVTYPER */
604 __asm__
volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER
[cpuid
][i
+ 1]));
607 /* Restore enable state */
608 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 1;" : : "r" (saved_PMCNTENSET
[cpuid
]));
610 /* Counter master re-enable */
611 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR
));
615 kpc_set_reload_xcall(void *vmp_config
)
617 struct kpc_config_remote
*mp_config
= vmp_config
;
618 uint32_t classes
= 0, count
= 0, offset
= kpc_fixed_count();
619 uint64_t *new_period
= NULL
, max
= kpc_configurable_max();
623 assert(mp_config
->configv
);
624 classes
= mp_config
->classes
;
625 new_period
= mp_config
->configv
;
627 enabled
= ml_set_interrupts_enabled(FALSE
);
629 if ((classes
& KPC_CLASS_FIXED_MASK
) && kpc_controls_fixed_counters()) {
630 /* update shadow counters */
631 kpc_get_fixed_counters(&FIXED_SHADOW(0));
633 /* set the new period */
634 count
= kpc_fixed_count();
635 for (uint32_t i
= 0; i
< count
; ++i
) {
636 if (*new_period
== 0) {
637 *new_period
= kpc_fixed_max();
639 FIXED_RELOAD(i
) = max
- *new_period
;
640 /* reload the counter if possible */
641 kpc_reload_counter(i
);
642 /* next period value */
647 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
649 * Update _all_ shadow counters, this cannot be done for only
650 * selected PMCs. Otherwise, we would corrupt the configurable
651 * shadow buffer since the PMCs are muxed according to the pmc
654 uint64_t all_cfg_mask
= (1ULL << kpc_configurable_count()) - 1;
655 kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask
);
657 /* set the new period */
658 count
= kpc_configurable_count();
659 for (uint32_t i
= 0; i
< count
; ++i
) {
660 /* ignore the counter */
661 if (((1ULL << i
) & mp_config
->pmc_mask
) == 0) {
664 if (*new_period
== 0) {
665 *new_period
= kpc_configurable_max();
667 CONFIGURABLE_RELOAD(i
) = max
- *new_period
;
668 /* reload the counter */
669 kpc_reload_counter(offset
+ i
);
670 /* next period value */
675 ml_set_interrupts_enabled(enabled
);
677 if (hw_atomic_sub(&kpc_reload_sync
, 1) == 0) {
678 thread_wakeup((event_t
) &kpc_reload_sync
);
684 kpc_set_period_arch(struct kpc_config_remote
*mp_config
)
686 /* dispatch to all CPUs */
687 cpu_broadcast_xcall(&kpc_reload_sync
, TRUE
, kpc_set_reload_xcall
, mp_config
);
695 kpc_get_configurable_config(kpc_config_t
*configv
, uint64_t pmc_mask
)
697 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
701 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
702 if ((1ULL << i
) & pmc_mask
) {
703 *configv
++ = get_counter_config(i
+ offset
);
711 kpc_set_configurable_config(kpc_config_t
*configv
, uint64_t pmc_mask
)
713 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
718 enabled
= ml_set_interrupts_enabled(FALSE
);
720 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
721 if (((1ULL << i
) & pmc_mask
) == 0) {
724 assert(kpc_controls_counter(i
+ offset
));
726 set_counter_config(i
+ offset
, *configv
++);
729 ml_set_interrupts_enabled(enabled
);
734 static uint32_t kpc_config_sync
;
736 kpc_set_config_xcall(void *vmp_config
)
738 struct kpc_config_remote
*mp_config
= vmp_config
;
739 kpc_config_t
*new_config
= NULL
;
740 uint32_t classes
= 0ULL;
743 assert(mp_config
->configv
);
744 classes
= mp_config
->classes
;
745 new_config
= mp_config
->configv
;
747 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
748 kpc_set_configurable_config(new_config
, mp_config
->pmc_mask
);
749 new_config
+= kpc_popcount(mp_config
->pmc_mask
);
752 if (hw_atomic_sub(&kpc_config_sync
, 1) == 0) {
753 thread_wakeup((event_t
) &kpc_config_sync
);
758 kpc_set_config_arch(struct kpc_config_remote
*mp_config
)
760 /* dispatch to all CPUs */
761 cpu_broadcast_xcall(&kpc_config_sync
, TRUE
, kpc_set_config_xcall
, mp_config
);
771 if (kpc_configured
) {
779 if (kpc_configured
) {
784 static uint32_t kpc_xread_sync
;
786 kpc_get_curcpu_counters_xcall(void *args
)
788 struct kpc_get_counters_remote
*handler
= args
;
789 int offset
= 0, r
= 0;
792 assert(handler
->buf
);
794 offset
= cpu_number() * handler
->buf_stride
;
795 r
= kpc_get_curcpu_counters(handler
->classes
, NULL
, &handler
->buf
[offset
]);
797 /* number of counters added by this CPU, needs to be atomic */
798 hw_atomic_add(&(handler
->nb_counters
), r
);
800 if (hw_atomic_sub(&kpc_xread_sync
, 1) == 0) {
801 thread_wakeup((event_t
) &kpc_xread_sync
);
806 kpc_get_all_cpus_counters(uint32_t classes
, int *curcpu
, uint64_t *buf
)
810 struct kpc_get_counters_remote hdl
= {
811 .classes
= classes
, .nb_counters
= 0,
812 .buf_stride
= kpc_get_counter_count(classes
),
818 enabled
= ml_set_interrupts_enabled(FALSE
);
821 *curcpu
= current_processor()->cpu_id
;
823 cpu_broadcast_xcall(&kpc_xread_sync
, TRUE
, kpc_get_curcpu_counters_xcall
, &hdl
);
825 ml_set_interrupts_enabled(enabled
);
827 return hdl
.nb_counters
;
831 kpc_get_pmu_version(void)
833 return KPC_PMU_ARM_V2
;
837 kpc_set_sw_inc( uint32_t mask
)
839 /* Only works with the configurable counters set to count the increment event (0x0) */
841 /* Write to PMSWINC */
842 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 4;" : : "r" (mask
));
858 kpc_get_classes(void)
864 kpc_fixed_count(void)
870 kpc_configurable_count(void)
876 kpc_fixed_config_count(void)
882 kpc_configurable_config_count(uint64_t pmc_mask __unused
)
888 kpc_get_fixed_config(kpc_config_t
*configv __unused
)
900 kpc_configurable_max(void)
906 kpc_get_configurable_config(kpc_config_t
*configv __unused
, uint64_t pmc_mask __unused
)
912 kpc_get_configurable_counters(uint64_t *counterv __unused
, uint64_t pmc_mask __unused
)
918 kpc_get_fixed_counters(uint64_t *counterv __unused
)
924 kpc_is_running_fixed(void)
930 kpc_is_running_configurable(uint64_t pmc_mask __unused
)
936 kpc_set_running_arch(struct kpc_running_remote
*mp_config __unused
)
942 kpc_set_period_arch(struct kpc_config_remote
*mp_config __unused
)
948 kpc_set_config_arch(struct kpc_config_remote
*mp_config __unused
)
966 kpc_get_all_cpus_counters(uint32_t classes
, int *curcpu
, uint64_t *buf
)
968 #pragma unused(classes)
969 #pragma unused(curcpu)
976 kpc_set_sw_inc( uint32_t mask __unused
)
982 kpc_get_pmu_version(void)
984 return KPC_PMU_ERROR
;
990 * RAWPMU isn't implemented for any of the 32-bit ARMs.
994 kpc_rawpmu_config_count(void)
1000 kpc_get_rawpmu_config(__unused kpc_config_t
*configv
)