2 * Copyright (c) 2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <kern/thread.h>
34 #include <sys/errno.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/cpu_internal.h>
40 /* PMU v2 based implementation for A7 */
41 static uint32_t saved_PMXEVTYPER
[MAX_CPUS
][KPC_ARM_TOTAL_COUNT
];
42 static uint32_t saved_PMCNTENSET
[MAX_CPUS
];
43 static uint64_t saved_counter
[MAX_CPUS
][KPC_ARM_TOTAL_COUNT
];
44 static uint32_t saved_PMOVSR
[MAX_CPUS
];
46 static uint32_t kpc_configured
= 0;
47 static uint32_t kpc_xcall_sync
;
48 static uint64_t kpc_running_cfg_pmc_mask
= 0;
49 static uint32_t kpc_running_classes
= 0;
50 static uint32_t kpc_reload_sync
;
51 static uint32_t kpc_enabled_counters
= 0;
53 static int first_time
= 1;
58 enable_counter(uint32_t counter
)
62 /* Cycle counter is MSB; configurable counters reside in LSBs */
63 uint32_t mask
= (counter
== 0) ? (1 << 31) : (1 << (counter
- 1));
66 __asm__
volatile("mrc p15, 0, %0, c9, c12, 1;" : "=r" (PMCNTENSET
));
68 enabled
= (PMCNTENSET
& mask
);
70 /* Counter interrupt enable (PMINTENSET) */
71 __asm__
volatile("mcr p15, 0, %0, c9, c14, 1;" : : "r" (mask
));
73 /* Individual counter enable set (PMCNTENSET) */
74 __asm__
volatile("mcr p15, 0, %0, c9, c12, 1;" : : "r" (mask
));
76 kpc_enabled_counters
++;
78 /* 1st enabled counter? Set the master enable bit in PMCR */
79 if (kpc_enabled_counters
== 1) {
81 __asm__
volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR
));
89 disable_counter(uint32_t counter
)
93 /* Cycle counter is MSB; configurable counters reside in LSBs */
94 uint32_t mask
= (counter
== 0) ? (1 << 31) : (1 << (counter
- 1));
97 __asm__
volatile("mrc p15, 0, %0, c9, c12, 2;" : "=r" (PMCNTENCLR
));
99 enabled
= (PMCNTENCLR
& mask
);
101 /* Individual counter enable clear (PMCNTENCLR) */
102 __asm__
volatile("mcr p15, 0, %0, c9, c12, 2;" : : "r" (mask
));
104 /* Counter interrupt disable (PMINTENCLR) */
105 __asm__
volatile("mcr p15, 0, %0, c9, c14, 2;" : : "r" (mask
));
107 kpc_enabled_counters
--;
109 /* Last enabled counter? Clear the master enable bit in PMCR */
110 if (kpc_enabled_counters
== 0) {
112 __asm__
volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR
));
120 read_counter(uint32_t counter
)
127 __asm__
volatile("mrc p15, 0, %0, c9, c13, 0;" : "=r" (low
));
133 /* Configurable. Set PMSELR... */
134 __asm__
volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter
- 1));
135 /* ...then read PMXEVCNTR */
136 __asm__
volatile("mrc p15, 0, %0, c9, c13, 2;" : "=r" (low
));
143 return (uint64_t)low
;
147 write_counter(uint32_t counter
, uint64_t value
)
149 uint32_t low
= value
& 0xFFFFFFFF;
154 __asm__
volatile("mcr p15, 0, %0, c9, c13, 0;" : : "r" (low
));
160 /* Configurable. Set PMSELR... */
161 __asm__
volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter
- 1));
162 /* ...then write PMXEVCNTR */
163 __asm__
volatile("mcr p15, 0, %0, c9, c13, 2;" : : "r" (low
));
172 kpc_reload_counter(int ctr
)
174 uint64_t old
= read_counter(ctr
);
175 write_counter(ctr
, FIXED_RELOAD(ctr
));
180 set_running_fixed(boolean_t on
)
184 int n
= KPC_ARM_FIXED_COUNT
;
186 enabled
= ml_set_interrupts_enabled(FALSE
);
188 for( i
= 0; i
< n
; i
++ ) {
196 ml_set_interrupts_enabled(enabled
);
200 set_running_configurable(uint64_t target_mask
, uint64_t state_mask
)
202 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
205 enabled
= ml_set_interrupts_enabled(FALSE
);
207 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
208 if (((1ULL << i
) & target_mask
) == 0)
210 assert(kpc_controls_counter(offset
+ i
));
212 if ((1ULL << i
) & state_mask
) {
213 enable_counter(offset
+ i
);
215 disable_counter(offset
+ i
);
219 ml_set_interrupts_enabled(enabled
);
222 void kpc_pmi_handler(cpu_id_t source
);
224 kpc_pmi_handler(cpu_id_t source
)
230 enabled
= ml_set_interrupts_enabled(FALSE
);
232 /* The pmi must be delivered to the CPU that generated it */
233 if (source
!= getCpuDatap()->interrupt_nub
) {
234 panic("pmi from IOCPU %p delivered to IOCPU %p", source
, getCpuDatap()->interrupt_nub
);
238 ctr
< (KPC_ARM_FIXED_COUNT
+ KPC_ARM_CONFIGURABLE_COUNT
);
244 /* check the counter for overflow */
248 mask
= 1 << (ctr
- 1);
252 __asm__
volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR
));
255 extra
= kpc_reload_counter(ctr
);
258 += (kpc_fixed_max() - FIXED_RELOAD(ctr
) + 1 /* wrap */) + extra
;
260 if (FIXED_ACTIONID(ctr
))
261 kpc_sample_kperf(FIXED_ACTIONID(ctr
));
263 /* clear PMOVSR bit */
264 __asm__
volatile("mcr p15, 0, %0, c9, c12, 3;" : : "r" (mask
));
268 ml_set_interrupts_enabled(enabled
);
272 kpc_set_running_xcall( void *vstate
)
274 struct kpc_running_remote
*mp_config
= (struct kpc_running_remote
*) vstate
;
277 if (kpc_controls_fixed_counters())
278 set_running_fixed(mp_config
->classes
& KPC_CLASS_FIXED_MASK
);
280 set_running_configurable(mp_config
->cfg_target_mask
,
281 mp_config
->cfg_state_mask
);
283 if (hw_atomic_sub(&kpc_xcall_sync
, 1) == 0) {
284 thread_wakeup((event_t
) &kpc_xcall_sync
);
289 get_counter_config(uint32_t counter
)
295 /* Fixed counter accessed via top bit... */
297 /* Write PMSELR.SEL */
298 __asm__
volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter
));
299 /* Read PMXEVTYPER */
300 __asm__
volatile("mcr p15, 0, %0, c9, c13, 1;" : "=r" (config
));
308 /* Write PMSELR.SEL to select the configurable counter */
309 __asm__
volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter
));
310 /* Read PMXEVTYPER to get the config */
311 __asm__
volatile("mrc p15, 0, %0, c9, c13, 1;" : "=r" (config
));
321 set_counter_config(uint32_t counter
, uint64_t config
)
325 /* Write PMSELR.SEL */
326 __asm__
volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
327 /* Write PMXEVTYPER */
328 __asm__
volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config
& 0xFFFFFFFF));
334 /* Write PMSELR.SEL */
335 __asm__
volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter
- 1));
336 /* Write PMXEVTYPER */
337 __asm__
volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config
& 0xFFFFFFFF));
350 uint32_t event_counters
;
352 /* read PMOVSR and determine the number of event counters */
353 __asm__
volatile("mrc p15, 0, %0, c9, c12, 0;" : "=r" (PMCR
));
354 event_counters
= (PMCR
>> 11) & 0x1F;
356 assert(event_counters
>= KPC_ARM_CONFIGURABLE_COUNT
);
360 kpc_get_classes(void)
362 return KPC_CLASS_FIXED_MASK
| KPC_CLASS_CONFIGURABLE_MASK
;
366 kpc_fixed_count(void)
368 return KPC_ARM_FIXED_COUNT
;
372 kpc_configurable_count(void)
374 return KPC_ARM_CONFIGURABLE_COUNT
;
378 kpc_fixed_config_count(void)
380 return KPC_ARM_FIXED_COUNT
;
384 kpc_configurable_config_count(uint64_t pmc_mask
)
386 assert(kpc_popcount(pmc_mask
) <= kpc_configurable_count());
387 return kpc_popcount(pmc_mask
);
391 kpc_get_fixed_config(kpc_config_t
*configv
)
393 configv
[0] = get_counter_config(0);
400 return (1ULL << KPC_ARM_COUNTER_WIDTH
) - 1;
404 kpc_configurable_max(void)
406 return (1ULL << KPC_ARM_COUNTER_WIDTH
) - 1;
410 kpc_get_configurable_counters(uint64_t *counterv
, uint64_t pmc_mask
)
412 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
416 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
421 if (((1ULL << i
) & pmc_mask
) == 0)
423 ctr
= read_counter(i
+ offset
);
425 /* check the counter for overflow */
429 __asm__
volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR
));
432 ctr
= CONFIGURABLE_SHADOW(i
) +
433 (kpc_configurable_max() - CONFIGURABLE_RELOAD(i
) + 1 /* Wrap */) +
436 ctr
= CONFIGURABLE_SHADOW(i
) +
437 (ctr
- CONFIGURABLE_RELOAD(i
));
447 kpc_get_fixed_counters(uint64_t *counterv
)
453 /* check the counter for overflow */
457 __asm__
volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR
));
459 ctr
= read_counter(0);
462 ctr
= FIXED_SHADOW(0) +
463 (kpc_fixed_max() - FIXED_RELOAD(0) + 1 /* Wrap */) +
466 ctr
= FIXED_SHADOW(0) +
467 (ctr
- FIXED_RELOAD(0));
475 kpc_is_running_fixed(void)
477 return (kpc_running_classes
& KPC_CLASS_FIXED_MASK
) == KPC_CLASS_FIXED_MASK
;
481 kpc_is_running_configurable(uint64_t pmc_mask
)
483 assert(kpc_popcount(pmc_mask
) <= kpc_configurable_count());
484 return ((kpc_running_classes
& KPC_CLASS_CONFIGURABLE_MASK
) == KPC_CLASS_CONFIGURABLE_MASK
) &&
485 ((kpc_running_cfg_pmc_mask
& pmc_mask
) == pmc_mask
);
489 kpc_set_running_arch(struct kpc_running_remote
*mp_config
)
496 kprintf( "kpc: setting PMI handler\n" );
497 PE_cpu_perfmon_interrupt_install_handler(kpc_pmi_handler
);
498 for (cpu
= 0; cpu
< real_ncpus
; cpu
++)
499 PE_cpu_perfmon_interrupt_enable(cpu_datap(cpu
)->cpu_id
,
504 /* dispatch to all CPUs */
505 cpu_broadcast_xcall(&kpc_xcall_sync
, TRUE
, kpc_set_running_xcall
,
508 kpc_running_cfg_pmc_mask
= mp_config
->cfg_state_mask
;
509 kpc_running_classes
= mp_config
->classes
;
519 int cpuid
= current_processor()->cpu_id
;
522 __asm__
volatile("dmb ish");
524 /* Clear master enable */
525 __asm__
volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR
));
527 /* Save individual enable state */
528 __asm__
volatile("mrc p15, 0, %0, c9, c12, 1;" : "=r" (saved_PMCNTENSET
[cpuid
]));
531 __asm__
volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (saved_PMOVSR
[cpuid
]));
533 /* Select fixed counter with PMSELR.SEL */
534 __asm__
volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
535 /* Read PMXEVTYPER */
536 __asm__
volatile("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER
[cpuid
][0]));
538 /* Save configurable event selections */
539 for (i
= 0; i
< 4; i
++) {
540 /* Select counter with PMSELR.SEL */
541 __asm__
volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i
));
542 /* Read PMXEVTYPER */
543 __asm__
volatile("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER
[cpuid
][i
+ 1]));
546 /* Finally, save count for each counter */
547 for (i
=0; i
< 5; i
++) {
548 saved_counter
[cpuid
][i
] = read_counter(i
);
556 int cpuid
= current_processor()->cpu_id
;
560 /* Restore counter values */
561 for (i
= 0; i
< 5; i
++) {
562 /* did we overflow? if so handle it now since we won't get a pmi */
565 /* check the counter for overflow */
572 if (saved_PMOVSR
[cpuid
] & mask
) {
573 extra
= kpc_reload_counter(i
);
576 * CONFIGURABLE_* directly follows FIXED, so we can simply
577 * increment the index here. Although it's ugly.
580 += (kpc_fixed_max() - FIXED_RELOAD(i
) + 1 /* Wrap */) + extra
;
582 if (FIXED_ACTIONID(i
))
583 kpc_sample_kperf(FIXED_ACTIONID(i
));
585 write_counter(i
, saved_counter
[cpuid
][i
]);
589 /* Restore configuration - first, the fixed... */
590 __asm__
volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
591 /* Write PMXEVTYPER */
592 __asm__
volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER
[cpuid
][0]));
594 /* ...then the configurable */
595 for (i
= 0; i
< 4; i
++) {
596 /* Select counter with PMSELR.SEL */
597 __asm__
volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i
));
598 /* Write PMXEVTYPER */
599 __asm__
volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER
[cpuid
][i
+ 1]));
602 /* Restore enable state */
603 __asm__
volatile("mcr p15, 0, %0, c9, c12, 1;" : : "r" (saved_PMCNTENSET
[cpuid
]));
605 /* Counter master re-enable */
606 __asm__
volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR
));
610 kpc_set_reload_xcall(void *vmp_config
)
612 struct kpc_config_remote
*mp_config
= vmp_config
;
613 uint32_t classes
= 0, count
= 0, offset
= kpc_fixed_count();
614 uint64_t *new_period
= NULL
, max
= kpc_configurable_max();
618 assert(mp_config
->configv
);
619 classes
= mp_config
->classes
;
620 new_period
= mp_config
->configv
;
622 enabled
= ml_set_interrupts_enabled(FALSE
);
624 if ((classes
& KPC_CLASS_FIXED_MASK
) && kpc_controls_fixed_counters()) {
625 /* update shadow counters */
626 kpc_get_fixed_counters(&FIXED_SHADOW(0));
628 /* set the new period */
629 count
= kpc_fixed_count();
630 for (uint32_t i
= 0; i
< count
; ++i
) {
631 if (*new_period
== 0)
632 *new_period
= kpc_fixed_max();
633 FIXED_RELOAD(i
) = max
- *new_period
;
634 /* reload the counter if possible */
635 kpc_reload_counter(i
);
636 /* next period value */
641 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
643 * Update _all_ shadow counters, this cannot be done for only
644 * selected PMCs. Otherwise, we would corrupt the configurable
645 * shadow buffer since the PMCs are muxed according to the pmc
648 uint64_t all_cfg_mask
= (1ULL << kpc_configurable_count()) - 1;
649 kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask
);
651 /* set the new period */
652 count
= kpc_configurable_count();
653 for (uint32_t i
= 0; i
< count
; ++i
) {
654 /* ignore the counter */
655 if (((1ULL << i
) & mp_config
->pmc_mask
) == 0)
657 if (*new_period
== 0)
658 *new_period
= kpc_configurable_max();
659 CONFIGURABLE_RELOAD(i
) = max
- *new_period
;
660 /* reload the counter */
661 kpc_reload_counter(offset
+ i
);
662 /* next period value */
667 ml_set_interrupts_enabled(enabled
);
669 if (hw_atomic_sub(&kpc_reload_sync
, 1) == 0)
670 thread_wakeup((event_t
) &kpc_reload_sync
);
675 kpc_set_period_arch(struct kpc_config_remote
*mp_config
)
677 /* dispatch to all CPUs */
678 cpu_broadcast_xcall(&kpc_reload_sync
, TRUE
, kpc_set_reload_xcall
, mp_config
);
686 kpc_get_configurable_config(kpc_config_t
*configv
, uint64_t pmc_mask
)
688 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
692 for (uint32_t i
= 0; i
< cfg_count
; ++i
)
693 if ((1ULL << i
) & pmc_mask
)
694 *configv
++ = get_counter_config(i
+ offset
);
700 kpc_set_configurable_config(kpc_config_t
*configv
, uint64_t pmc_mask
)
702 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
707 enabled
= ml_set_interrupts_enabled(FALSE
);
709 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
710 if (((1ULL << i
) & pmc_mask
) == 0)
712 assert(kpc_controls_counter(i
+ offset
));
714 set_counter_config(i
+ offset
, *configv
++);
717 ml_set_interrupts_enabled(enabled
);
722 static uint32_t kpc_config_sync
;
724 kpc_set_config_xcall(void *vmp_config
)
726 struct kpc_config_remote
*mp_config
= vmp_config
;
727 kpc_config_t
*new_config
= NULL
;
728 uint32_t classes
= 0ULL;
731 assert(mp_config
->configv
);
732 classes
= mp_config
->classes
;
733 new_config
= mp_config
->configv
;
735 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
736 kpc_set_configurable_config(new_config
, mp_config
->pmc_mask
);
737 new_config
+= kpc_popcount(mp_config
->pmc_mask
);
740 if (hw_atomic_sub(&kpc_config_sync
, 1) == 0)
741 thread_wakeup((event_t
) &kpc_config_sync
);
745 kpc_set_config_arch(struct kpc_config_remote
*mp_config
)
747 /* dispatch to all CPUs */
748 cpu_broadcast_xcall(&kpc_config_sync
, TRUE
, kpc_set_config_xcall
, mp_config
);
769 static uint32_t kpc_xread_sync
;
771 kpc_get_curcpu_counters_xcall(void *args
)
773 struct kpc_get_counters_remote
*handler
= args
;
777 assert(handler
->buf
);
779 offset
= cpu_number() * handler
->buf_stride
;
780 r
= kpc_get_curcpu_counters(handler
->classes
, NULL
, &handler
->buf
[offset
]);
782 /* number of counters added by this CPU, needs to be atomic */
783 hw_atomic_add(&(handler
->nb_counters
), r
);
785 if (hw_atomic_sub(&kpc_xread_sync
, 1) == 0)
786 thread_wakeup((event_t
) &kpc_xread_sync
);
790 kpc_get_all_cpus_counters(uint32_t classes
, int *curcpu
, uint64_t *buf
)
794 struct kpc_get_counters_remote hdl
= {
795 .classes
= classes
, .nb_counters
= 0,
796 .buf_stride
= kpc_get_counter_count(classes
),
802 enabled
= ml_set_interrupts_enabled(FALSE
);
805 *curcpu
= current_processor()->cpu_id
;
806 cpu_broadcast_xcall(&kpc_xread_sync
, TRUE
, kpc_get_curcpu_counters_xcall
, &hdl
);
808 ml_set_interrupts_enabled(enabled
);
810 return hdl
.nb_counters
;
814 kpc_get_pmu_version(void)
816 return KPC_PMU_ARM_V2
;
820 kpc_set_sw_inc( uint32_t mask
)
822 /* Only works with the configurable counters set to count the increment event (0x0) */
824 /* Write to PMSWINC */
825 __asm__
volatile("mcr p15, 0, %0, c9, c12, 4;" : : "r" (mask
));
841 kpc_get_classes(void)
847 kpc_fixed_count(void)
853 kpc_configurable_count(void)
859 kpc_fixed_config_count(void)
865 kpc_configurable_config_count(uint64_t pmc_mask __unused
)
871 kpc_get_fixed_config(kpc_config_t
*configv __unused
)
883 kpc_configurable_max(void)
889 kpc_get_configurable_config(kpc_config_t
*configv __unused
, uint64_t pmc_mask __unused
)
895 kpc_get_configurable_counters(uint64_t *counterv __unused
, uint64_t pmc_mask __unused
)
901 kpc_get_fixed_counters(uint64_t *counterv __unused
)
907 kpc_is_running_fixed(void)
913 kpc_is_running_configurable(uint64_t pmc_mask __unused
)
919 kpc_set_running_arch(struct kpc_running_remote
*mp_config __unused
)
925 kpc_set_period_arch(struct kpc_config_remote
*mp_config __unused
)
931 kpc_set_config_arch(struct kpc_config_remote
*mp_config __unused
)
949 kpc_get_all_cpus_counters(uint32_t classes
, int *curcpu
, uint64_t *buf
)
951 #pragma unused(classes)
952 #pragma unused(curcpu)
959 kpc_set_sw_inc( uint32_t mask __unused
)
965 kpc_get_pmu_version(void)
967 return KPC_PMU_ERROR
;
973 * RAWPMU isn't implemented for any of the 32-bit ARMs.
977 kpc_rawpmu_config_count(void)
983 kpc_get_rawpmu_config(__unused kpc_config_t
*configv
)