2 * Copyright (c) 2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/thread.h>
33 #include <sys/errno.h>
34 #include <arm/cpu_data_internal.h>
35 #include <arm/cpu_internal.h>
39 /* PMU v2 based implementation for A7 */
40 static uint32_t saved_PMXEVTYPER
[MAX_CPUS
][KPC_ARM_TOTAL_COUNT
];
41 static uint32_t saved_PMCNTENSET
[MAX_CPUS
];
42 static uint64_t saved_counter
[MAX_CPUS
][KPC_ARM_TOTAL_COUNT
];
43 static uint32_t saved_PMOVSR
[MAX_CPUS
];
45 static uint32_t kpc_configured
= 0;
46 static uint32_t kpc_xcall_sync
;
47 static uint64_t kpc_running_cfg_pmc_mask
= 0;
48 static uint32_t kpc_running_classes
= 0;
49 static uint32_t kpc_reload_sync
;
50 static uint32_t kpc_enabled_counters
= 0;
52 static int first_time
= 1;
56 static uint64_t get_counter_config(uint32_t counter
);
60 enable_counter(uint32_t counter
)
64 /* Cycle counter is MSB; configurable counters reside in LSBs */
65 uint32_t mask
= (counter
== 0) ? (1 << 31) : (1 << (counter
- 1));
68 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 1;" : "=r" (PMCNTENSET
));
70 enabled
= (PMCNTENSET
& mask
);
72 /* Counter interrupt enable (PMINTENSET) */
73 __asm__
volatile ("mcr p15, 0, %0, c9, c14, 1;" : : "r" (mask
));
75 /* Individual counter enable set (PMCNTENSET) */
76 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 1;" : : "r" (mask
));
78 kpc_enabled_counters
++;
80 /* 1st enabled counter? Set the master enable bit in PMCR */
81 if (kpc_enabled_counters
== 1) {
83 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR
));
91 disable_counter(uint32_t counter
)
95 /* Cycle counter is MSB; configurable counters reside in LSBs */
96 uint32_t mask
= (counter
== 0) ? (1 << 31) : (1 << (counter
- 1));
99 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 2;" : "=r" (PMCNTENCLR
));
101 enabled
= (PMCNTENCLR
& mask
);
103 /* Individual counter enable clear (PMCNTENCLR) */
104 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 2;" : : "r" (mask
));
106 /* Counter interrupt disable (PMINTENCLR) */
107 __asm__
volatile ("mcr p15, 0, %0, c9, c14, 2;" : : "r" (mask
));
109 kpc_enabled_counters
--;
111 /* Last enabled counter? Clear the master enable bit in PMCR */
112 if (kpc_enabled_counters
== 0) {
114 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR
));
122 read_counter(uint32_t counter
)
129 __asm__
volatile ("mrc p15, 0, %0, c9, c13, 0;" : "=r" (low
));
135 /* Configurable. Set PMSELR... */
136 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter
- 1));
137 /* ...then read PMXEVCNTR */
138 __asm__
volatile ("mrc p15, 0, %0, c9, c13, 2;" : "=r" (low
));
145 return (uint64_t)low
;
149 write_counter(uint32_t counter
, uint64_t value
)
151 uint32_t low
= value
& 0xFFFFFFFF;
156 __asm__
volatile ("mcr p15, 0, %0, c9, c13, 0;" : : "r" (low
));
162 /* Configurable. Set PMSELR... */
163 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter
- 1));
164 /* ...then write PMXEVCNTR */
165 __asm__
volatile ("mcr p15, 0, %0, c9, c13, 2;" : : "r" (low
));
174 kpc_reload_counter(int ctr
)
176 uint64_t old
= read_counter(ctr
);
177 write_counter(ctr
, FIXED_RELOAD(ctr
));
182 set_running_fixed(boolean_t on
)
186 int n
= KPC_ARM_FIXED_COUNT
;
188 enabled
= ml_set_interrupts_enabled(FALSE
);
190 for (i
= 0; i
< n
; i
++) {
198 ml_set_interrupts_enabled(enabled
);
202 set_running_configurable(uint64_t target_mask
, uint64_t state_mask
)
204 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
207 enabled
= ml_set_interrupts_enabled(FALSE
);
209 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
210 if (((1ULL << i
) & target_mask
) == 0) {
213 assert(kpc_controls_counter(offset
+ i
));
215 if ((1ULL << i
) & state_mask
) {
216 enable_counter(offset
+ i
);
218 disable_counter(offset
+ i
);
222 ml_set_interrupts_enabled(enabled
);
226 get_interrupted_pc(bool *kernel_out
)
228 struct arm_saved_state
*state
= getCpuDatap()->cpu_int_state
;
233 bool kernel
= !PSR_IS_USER(get_saved_state_cpsr(state
));
234 *kernel_out
= kernel
;
235 uintptr_t pc
= get_saved_state_pc(state
);
237 pc
= VM_KERNEL_UNSLIDE(pc
);
242 void kpc_pmi_handler(cpu_id_t source
);
244 kpc_pmi_handler(cpu_id_t source
)
250 enabled
= ml_set_interrupts_enabled(FALSE
);
252 /* The pmi must be delivered to the CPU that generated it */
253 if (source
!= getCpuDatap()->interrupt_nub
) {
254 panic("pmi from IOCPU %p delivered to IOCPU %p", source
, getCpuDatap()->interrupt_nub
);
258 ctr
< (KPC_ARM_FIXED_COUNT
+ KPC_ARM_CONFIGURABLE_COUNT
);
263 /* check the counter for overflow */
267 mask
= 1 << (ctr
- 1);
271 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR
));
274 extra
= kpc_reload_counter(ctr
);
277 += (kpc_fixed_max() - FIXED_RELOAD(ctr
) + 1 /* wrap */) + extra
;
279 if (FIXED_ACTIONID(ctr
)) {
281 uintptr_t pc
= get_interrupted_pc(&kernel
);
282 kpc_sample_kperf(FIXED_ACTIONID(ctr
), ctr
, get_counter_config(ctr
),
283 FIXED_SHADOW(ctr
), pc
, kernel
? KPC_KERNEL_PC
: 0);
286 /* clear PMOVSR bit */
287 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 3;" : : "r" (mask
));
291 ml_set_interrupts_enabled(enabled
);
295 kpc_set_running_xcall( void *vstate
)
297 struct kpc_running_remote
*mp_config
= (struct kpc_running_remote
*) vstate
;
300 if (kpc_controls_fixed_counters()) {
301 set_running_fixed(mp_config
->classes
& KPC_CLASS_FIXED_MASK
);
304 set_running_configurable(mp_config
->cfg_target_mask
,
305 mp_config
->cfg_state_mask
);
307 if (os_atomic_dec(&kpc_xcall_sync
, relaxed
) == 0) {
308 thread_wakeup((event_t
) &kpc_xcall_sync
);
313 get_counter_config(uint32_t counter
)
319 /* Fixed counter accessed via top bit... */
321 /* Write PMSELR.SEL */
322 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter
));
323 /* Read PMXEVTYPER */
324 __asm__
volatile ("mcr p15, 0, %0, c9, c13, 1;" : "=r" (config
));
332 /* Write PMSELR.SEL to select the configurable counter */
333 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter
));
334 /* Read PMXEVTYPER to get the config */
335 __asm__
volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (config
));
345 set_counter_config(uint32_t counter
, uint64_t config
)
349 /* Write PMSELR.SEL */
350 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
351 /* Write PMXEVTYPER */
352 __asm__
volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config
& 0xFFFFFFFF));
358 /* Write PMSELR.SEL */
359 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter
- 1));
360 /* Write PMXEVTYPER */
361 __asm__
volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config
& 0xFFFFFFFF));
374 uint32_t event_counters
;
376 /* read PMOVSR and determine the number of event counters */
377 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 0;" : "=r" (PMCR
));
378 event_counters
= (PMCR
>> 11) & 0x1F;
380 assert(event_counters
>= KPC_ARM_CONFIGURABLE_COUNT
);
384 kpc_get_classes(void)
386 return KPC_CLASS_FIXED_MASK
| KPC_CLASS_CONFIGURABLE_MASK
;
390 kpc_fixed_count(void)
392 return KPC_ARM_FIXED_COUNT
;
396 kpc_configurable_count(void)
398 return KPC_ARM_CONFIGURABLE_COUNT
;
402 kpc_fixed_config_count(void)
404 return KPC_ARM_FIXED_COUNT
;
408 kpc_configurable_config_count(uint64_t pmc_mask
)
410 assert(kpc_popcount(pmc_mask
) <= kpc_configurable_count());
411 return kpc_popcount(pmc_mask
);
415 kpc_get_fixed_config(kpc_config_t
*configv
)
417 configv
[0] = get_counter_config(0);
424 return (1ULL << KPC_ARM_COUNTER_WIDTH
) - 1;
428 kpc_configurable_max(void)
430 return (1ULL << KPC_ARM_COUNTER_WIDTH
) - 1;
434 kpc_get_configurable_counters(uint64_t *counterv
, uint64_t pmc_mask
)
436 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
440 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
445 if (((1ULL << i
) & pmc_mask
) == 0) {
448 ctr
= read_counter(i
+ offset
);
450 /* check the counter for overflow */
454 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR
));
457 ctr
= CONFIGURABLE_SHADOW(i
) +
458 (kpc_configurable_max() - CONFIGURABLE_RELOAD(i
) + 1 /* Wrap */) +
461 ctr
= CONFIGURABLE_SHADOW(i
) +
462 (ctr
- CONFIGURABLE_RELOAD(i
));
472 kpc_get_fixed_counters(uint64_t *counterv
)
478 /* check the counter for overflow */
482 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR
));
484 ctr
= read_counter(0);
487 ctr
= FIXED_SHADOW(0) +
488 (kpc_fixed_max() - FIXED_RELOAD(0) + 1 /* Wrap */) +
491 ctr
= FIXED_SHADOW(0) +
492 (ctr
- FIXED_RELOAD(0));
500 kpc_is_running_fixed(void)
502 return (kpc_running_classes
& KPC_CLASS_FIXED_MASK
) == KPC_CLASS_FIXED_MASK
;
506 kpc_is_running_configurable(uint64_t pmc_mask
)
508 assert(kpc_popcount(pmc_mask
) <= kpc_configurable_count());
509 return ((kpc_running_classes
& KPC_CLASS_CONFIGURABLE_MASK
) == KPC_CLASS_CONFIGURABLE_MASK
) &&
510 ((kpc_running_cfg_pmc_mask
& pmc_mask
) == pmc_mask
);
514 kpc_set_running_arch(struct kpc_running_remote
*mp_config
)
521 kprintf( "kpc: setting PMI handler\n" );
522 PE_cpu_perfmon_interrupt_install_handler(kpc_pmi_handler
);
523 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
524 PE_cpu_perfmon_interrupt_enable(cpu_datap(cpu
)->cpu_id
,
530 /* dispatch to all CPUs */
531 cpu_broadcast_xcall(&kpc_xcall_sync
, TRUE
, kpc_set_running_xcall
,
534 kpc_running_cfg_pmc_mask
= mp_config
->cfg_state_mask
;
535 kpc_running_classes
= mp_config
->classes
;
545 int cpuid
= cpu_number();
548 __asm__
volatile ("dmb ish");
550 /* Clear master enable */
551 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR
));
553 /* Save individual enable state */
554 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 1;" : "=r" (saved_PMCNTENSET
[cpuid
]));
557 __asm__
volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (saved_PMOVSR
[cpuid
]));
559 /* Select fixed counter with PMSELR.SEL */
560 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
561 /* Read PMXEVTYPER */
562 __asm__
volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER
[cpuid
][0]));
564 /* Save configurable event selections */
565 for (i
= 0; i
< 4; i
++) {
566 /* Select counter with PMSELR.SEL */
567 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i
));
568 /* Read PMXEVTYPER */
569 __asm__
volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER
[cpuid
][i
+ 1]));
572 /* Finally, save count for each counter */
573 for (i
= 0; i
< 5; i
++) {
574 saved_counter
[cpuid
][i
] = read_counter(i
);
582 int cpuid
= cpu_number();
586 /* Restore counter values */
587 for (i
= 0; i
< 5; i
++) {
588 /* did we overflow? if so handle it now since we won't get a pmi */
591 /* check the counter for overflow */
598 if (saved_PMOVSR
[cpuid
] & mask
) {
599 extra
= kpc_reload_counter(i
);
602 * CONFIGURABLE_* directly follows FIXED, so we can simply
603 * increment the index here. Although it's ugly.
606 += (kpc_fixed_max() - FIXED_RELOAD(i
) + 1 /* Wrap */) + extra
;
608 if (FIXED_ACTIONID(i
)) {
610 uintptr_t pc
= get_interrupted_pc(&kernel
);
611 kpc_sample_kperf(FIXED_ACTIONID(i
), i
, get_counter_config(i
),
612 FIXED_SHADOW(i
), pc
, kernel
? KPC_KERNEL_PC
: 0);
615 write_counter(i
, saved_counter
[cpuid
][i
]);
619 /* Restore configuration - first, the fixed... */
620 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
621 /* Write PMXEVTYPER */
622 __asm__
volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER
[cpuid
][0]));
624 /* ...then the configurable */
625 for (i
= 0; i
< 4; i
++) {
626 /* Select counter with PMSELR.SEL */
627 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i
));
628 /* Write PMXEVTYPER */
629 __asm__
volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER
[cpuid
][i
+ 1]));
632 /* Restore enable state */
633 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 1;" : : "r" (saved_PMCNTENSET
[cpuid
]));
635 /* Counter master re-enable */
636 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR
));
640 kpc_set_reload_xcall(void *vmp_config
)
642 struct kpc_config_remote
*mp_config
= vmp_config
;
643 uint32_t classes
= 0, count
= 0, offset
= kpc_fixed_count();
644 uint64_t *new_period
= NULL
, max
= kpc_configurable_max();
648 assert(mp_config
->configv
);
649 classes
= mp_config
->classes
;
650 new_period
= mp_config
->configv
;
652 enabled
= ml_set_interrupts_enabled(FALSE
);
654 if ((classes
& KPC_CLASS_FIXED_MASK
) && kpc_controls_fixed_counters()) {
655 /* update shadow counters */
656 kpc_get_fixed_counters(&FIXED_SHADOW(0));
658 /* set the new period */
659 count
= kpc_fixed_count();
660 for (uint32_t i
= 0; i
< count
; ++i
) {
661 if (*new_period
== 0) {
662 *new_period
= kpc_fixed_max();
664 FIXED_RELOAD(i
) = max
- *new_period
;
665 /* reload the counter if possible */
666 kpc_reload_counter(i
);
667 /* next period value */
672 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
674 * Update _all_ shadow counters, this cannot be done for only
675 * selected PMCs. Otherwise, we would corrupt the configurable
676 * shadow buffer since the PMCs are muxed according to the pmc
679 uint64_t all_cfg_mask
= (1ULL << kpc_configurable_count()) - 1;
680 kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask
);
682 /* set the new period */
683 count
= kpc_configurable_count();
684 for (uint32_t i
= 0; i
< count
; ++i
) {
685 /* ignore the counter */
686 if (((1ULL << i
) & mp_config
->pmc_mask
) == 0) {
689 if (*new_period
== 0) {
690 *new_period
= kpc_configurable_max();
692 CONFIGURABLE_RELOAD(i
) = max
- *new_period
;
693 /* reload the counter */
694 kpc_reload_counter(offset
+ i
);
695 /* next period value */
700 ml_set_interrupts_enabled(enabled
);
702 if (os_atomic_dec(&kpc_reload_sync
, relaxed
) == 0) {
703 thread_wakeup((event_t
) &kpc_reload_sync
);
709 kpc_set_period_arch(struct kpc_config_remote
*mp_config
)
711 /* dispatch to all CPUs */
712 cpu_broadcast_xcall(&kpc_reload_sync
, TRUE
, kpc_set_reload_xcall
, mp_config
);
720 kpc_get_configurable_config(kpc_config_t
*configv
, uint64_t pmc_mask
)
722 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
726 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
727 if ((1ULL << i
) & pmc_mask
) {
728 *configv
++ = get_counter_config(i
+ offset
);
736 kpc_set_configurable_config(kpc_config_t
*configv
, uint64_t pmc_mask
)
738 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
743 enabled
= ml_set_interrupts_enabled(FALSE
);
745 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
746 if (((1ULL << i
) & pmc_mask
) == 0) {
749 assert(kpc_controls_counter(i
+ offset
));
751 set_counter_config(i
+ offset
, *configv
++);
754 ml_set_interrupts_enabled(enabled
);
759 static uint32_t kpc_config_sync
;
761 kpc_set_config_xcall(void *vmp_config
)
763 struct kpc_config_remote
*mp_config
= vmp_config
;
764 kpc_config_t
*new_config
= NULL
;
765 uint32_t classes
= 0ULL;
768 assert(mp_config
->configv
);
769 classes
= mp_config
->classes
;
770 new_config
= mp_config
->configv
;
772 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
773 kpc_set_configurable_config(new_config
, mp_config
->pmc_mask
);
774 new_config
+= kpc_popcount(mp_config
->pmc_mask
);
777 if (os_atomic_dec(&kpc_config_sync
, relaxed
) == 0) {
778 thread_wakeup((event_t
) &kpc_config_sync
);
783 kpc_set_config_arch(struct kpc_config_remote
*mp_config
)
785 /* dispatch to all CPUs */
786 cpu_broadcast_xcall(&kpc_config_sync
, TRUE
, kpc_set_config_xcall
, mp_config
);
796 if (kpc_configured
) {
804 if (kpc_configured
) {
809 static uint32_t kpc_xread_sync
;
811 kpc_get_curcpu_counters_xcall(void *args
)
813 struct kpc_get_counters_remote
*handler
= args
;
814 int offset
= 0, r
= 0;
817 assert(handler
->buf
);
819 offset
= cpu_number() * handler
->buf_stride
;
820 r
= kpc_get_curcpu_counters(handler
->classes
, NULL
, &handler
->buf
[offset
]);
822 /* number of counters added by this CPU, needs to be atomic */
823 os_atomic_add(&(handler
->nb_counters
), r
, relaxed
);
825 if (os_atomic_dec(&kpc_xread_sync
, relaxed
) == 0) {
826 thread_wakeup((event_t
) &kpc_xread_sync
);
831 kpc_get_all_cpus_counters(uint32_t classes
, int *curcpu
, uint64_t *buf
)
835 struct kpc_get_counters_remote hdl
= {
836 .classes
= classes
, .nb_counters
= 0,
837 .buf_stride
= kpc_get_counter_count(classes
),
843 enabled
= ml_set_interrupts_enabled(FALSE
);
846 *curcpu
= cpu_number();
848 cpu_broadcast_xcall(&kpc_xread_sync
, TRUE
, kpc_get_curcpu_counters_xcall
, &hdl
);
850 ml_set_interrupts_enabled(enabled
);
852 return hdl
.nb_counters
;
856 kpc_get_pmu_version(void)
858 return KPC_PMU_ARM_V2
;
862 kpc_set_sw_inc( uint32_t mask
)
864 /* Only works with the configurable counters set to count the increment event (0x0) */
866 /* Write to PMSWINC */
867 __asm__
volatile ("mcr p15, 0, %0, c9, c12, 4;" : : "r" (mask
));
883 kpc_get_classes(void)
889 kpc_fixed_count(void)
895 kpc_configurable_count(void)
901 kpc_fixed_config_count(void)
907 kpc_configurable_config_count(uint64_t pmc_mask __unused
)
913 kpc_get_fixed_config(kpc_config_t
*configv __unused
)
925 kpc_configurable_max(void)
931 kpc_get_configurable_config(kpc_config_t
*configv __unused
, uint64_t pmc_mask __unused
)
937 kpc_get_configurable_counters(uint64_t *counterv __unused
, uint64_t pmc_mask __unused
)
943 kpc_get_fixed_counters(uint64_t *counterv __unused
)
949 kpc_is_running_fixed(void)
955 kpc_is_running_configurable(uint64_t pmc_mask __unused
)
961 kpc_set_running_arch(struct kpc_running_remote
*mp_config __unused
)
967 kpc_set_period_arch(struct kpc_config_remote
*mp_config __unused
)
973 kpc_set_config_arch(struct kpc_config_remote
*mp_config __unused
)
991 kpc_get_all_cpus_counters(uint32_t classes
, int *curcpu
, uint64_t *buf
)
993 #pragma unused(classes)
994 #pragma unused(curcpu)
1001 kpc_set_sw_inc( uint32_t mask __unused
)
1007 kpc_get_pmu_version(void)
1009 return KPC_PMU_ERROR
;
1015 * RAWPMU isn't implemented for any of the 32-bit ARMs.
1019 kpc_rawpmu_config_count(void)
1025 kpc_get_rawpmu_config(__unused kpc_config_t
*configv
)