X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/5ba3f43ea354af8ad55bea84372a2bc834d8757c..cb3231590a3c94ab4375e2228bd5e86b0cf1ad7e:/osfmk/arm64/kpc.c diff --git a/osfmk/arm64/kpc.c b/osfmk/arm64/kpc.c index b1eae91fe..3a5a4d444 100644 --- a/osfmk/arm64/kpc.c +++ b/osfmk/arm64/kpc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Apple Inc. All rights reserved. + * Copyright (c) 2012-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -37,10 +37,14 @@ #include #include +#if APPLE_ARM64_ARCH_FAMILY + #if MONOTONIC #include #endif /* MONOTONIC */ +void kpc_pmi_handler(unsigned int ctr); + /* * PMCs 8 and 9 were added to Hurricane and to maintain the existing bit * positions of the other PMCs, their configuration bits start at position 32. @@ -48,7 +52,7 @@ #define PMCR_PMC_8_9_OFFSET (32) #define PMCR_PMC_8_9_SHIFT(PMC) (((PMC) - 8) + PMCR_PMC_8_9_OFFSET) #define PMCR_PMC_SHIFT(PMC) (((PMC) <= 7) ? (PMC) : \ - PMCR_PMC_8_9_SHIFT(PMC)) + PMCR_PMC_8_9_SHIFT(PMC)) /* * PMCR0 controls enabling, interrupts, and overflow of performance counters. @@ -131,9 +135,9 @@ #endif #define PMCR1_EL_ALL_ENABLE_MASK(PMC) (PMCR1_EL0_A32_ENABLE_MASK(PMC) | \ - PMCR1_EL0_A64_ENABLE_MASK(PMC) | \ - PMCR1_EL1_A64_ENABLE_MASK(PMC) | \ - PMCR1_EL3_A64_ENABLE_MASK(PMC)) + PMCR1_EL0_A64_ENABLE_MASK(PMC) | \ + PMCR1_EL1_A64_ENABLE_MASK(PMC) | \ + PMCR1_EL3_A64_ENABLE_MASK(PMC)) #define PMCR1_EL_ALL_DISABLE_MASK(PMC) (~PMCR1_EL_ALL_ENABLE_MASK(PMC)) /* PMESR0 and PMESR1 are event selection registers */ @@ -178,12 +182,10 @@ #define SREG_PMC8 "S3_2_c15_c9_0" #define SREG_PMC9 "S3_2_c15_c10_0" -#if !defined(APPLECYCLONE) #define SREG_PMMMAP "S3_2_c15_c15_0" #define SREG_PMTRHLD2 "S3_2_c15_c14_0" #define SREG_PMTRHLD4 "S3_2_c15_c13_0" #define SREG_PMTRHLD6 "S3_2_c15_c12_0" -#endif /* * The low 8 bits of a configuration words select the event to program on @@ -206,8 +208,8 @@ */ #define SREG_WRITE(SR, V) __asm__ volatile("msr " SR ", %0 ; isb" : : "r"(V)) #define SREG_READ(SR) ({ uint64_t VAL; \ - __asm__ volatile("mrs %0, " SR : "=r"(VAL)); \ - VAL; }) + __asm__ volatile("mrs %0, " SR : "=r"(VAL)); \ + VAL; }) /* * Configuration registers that can be controlled by RAWPMU: @@ -215,11 +217,11 @@ * All: PMCR2-4, OPMAT0-1, OPMSK0-1. * Typhoon/Twister/Hurricane: PMMMAP, PMTRHLD2/4/6. */ -#if defined(APPLECYCLONE) +#if HAS_EARLY_APPLE_CPMU #define RAWPMU_CONFIG_COUNT 7 -#else +#else /* HAS_EARLY_APPLE_CPMU */ #define RAWPMU_CONFIG_COUNT 11 -#endif +#endif /* !HAS_EARLY_APPLE_CPMU */ /* TODO: allocate dynamically */ static uint64_t saved_PMCR[MAX_CPUS][2]; @@ -230,8 +232,6 @@ static uint64_t kpc_running_cfg_pmc_mask = 0; static uint32_t kpc_running_classes = 0; static uint32_t kpc_configured = 0; -static int first_time = 1; - /* * The whitelist is disabled by default on development/debug kernel. This can * be changed via the kpc.disable_whitelist sysctl. The whitelist is enabled on @@ -243,90 +243,94 @@ static boolean_t whitelist_disabled = TRUE; static boolean_t whitelist_disabled = FALSE; #endif -/* List of counter events that are allowed externally */ +#define CPMU_CORE_CYCLE 0x02 + +#if HAS_EARLY_APPLE_CPMU + +#define CPMU_BIU_UPSTREAM_CYCLE 0x19 +#define CPMU_BIU_DOWNSTREAM_CYCLE 0x1a +#define CPMU_L2C_AGENT_LD 0x22 +#define CPMU_L2C_AGENT_LD_MISS 0x23 +#define CPMU_L2C_AGENT_ST 0x24 +#define CPMU_L2C_AGENT_ST_MISS 0x25 +#define CPMU_INST_A32 0x78 +#define CPMU_INST_THUMB 0x79 +#define CPMU_INST_A64 0x7a +#define CPMU_INST_BRANCH 0x7b +#define CPMU_SYNC_DC_LOAD_MISS 0xb4 +#define CPMU_SYNC_DC_STORE_MISS 0xb5 +#define CPMU_SYNC_DTLB_MISS 0xb6 +#define CPMU_SYNC_ST_HIT_YNGR_LD 0xb9 +#define CPMU_SYNC_BR_ANY_MISP 0xc0 +#define CPMU_FED_IC_MISS_DEM 0xce +#define CPMU_FED_ITLB_MISS 0xcf + +#else /* HAS_EARLY_APPLE_CPMU */ + +#if HAS_CPMU_BIU_EVENTS +#define CPMU_BIU_UPSTREAM_CYCLE 0x13 +#define CPMU_BIU_DOWNSTREAM_CYCLE 0x14 +#endif /* HAS_CPMU_BIU_EVENTS */ + +#if HAS_CPMU_L2C_EVENTS +#define CPMU_L2C_AGENT_LD 0x1a +#define CPMU_L2C_AGENT_LD_MISS 0x1b +#define CPMU_L2C_AGENT_ST 0x1c +#define CPMU_L2C_AGENT_ST_MISS 0x1d +#endif /* HAS_CPMU_L2C_EVENTS */ + +#define CPMU_INST_A32 0x8a +#define CPMU_INST_THUMB 0x8b +#define CPMU_INST_A64 0x8c +#define CPMU_INST_BRANCH 0x8d +#define CPMU_SYNC_DC_LOAD_MISS 0xbf +#define CPMU_SYNC_DC_STORE_MISS 0xc0 +#define CPMU_SYNC_DTLB_MISS 0xc1 +#define CPMU_SYNC_ST_HIT_YNGR_LD 0xc4 +#define CPMU_SYNC_BR_ANY_MISP 0xcb +#define CPMU_FED_IC_MISS_DEM 0xd3 +#define CPMU_FED_ITLB_MISS 0xd4 + +#endif /* !HAS_EARLY_APPLE_CPMU */ + +/* List of counter events that are allowed to be used by 3rd-parties. */ static kpc_config_t whitelist[] = { - 0, /* NO_EVENT */ - -#if defined(APPLECYCLONE) - 0x02, /* CORE_CYCLE */ - 0x19, /* BIU_UPSTREAM_CYCLE */ - 0x1a, /* BIU_DOWNSTREAM_CYCLE */ - 0x22, /* L2C_AGENT_LD */ - 0x23, /* L2C_AGENT_LD_MISS */ - 0x24, /* L2C_AGENT_ST */ - 0x25, /* L2C_AGENT_ST_MISS */ - 0x78, /* INST_A32 */ - 0x79, /* INST_THUMB */ - 0x7a, /* INST_A64 */ - 0x7b, /* INST_BRANCH */ - 0xb4, /* SYNC_DC_LOAD_MISS */ - 0xb5, /* SYNC_DC_STORE_MISS */ - 0xb6, /* SYNC_DTLB_MISS */ - 0xb9, /* SYNC_ST_HIT_YNGR_LD */ - 0xc0, /* SYNC_BR_ANY_MISP */ - 0xce, /* FED_IC_MISS_DEM */ - 0xcf, /* FED_ITLB_MISS */ - -#elif defined(APPLETYPHOON) - 0x02, /* CORE_CYCLE */ - 0x13, /* BIU_UPSTREAM_CYCLE */ - 0x14, /* BIU_DOWNSTREAM_CYCLE */ - 0x1a, /* L2C_AGENT_LD */ - 0x1b, /* L2C_AGENT_LD_MISS */ - 0x1c, /* L2C_AGENT_ST */ - 0x1d, /* L2C_AGENT_ST_MISS */ - 0x8a, /* INST_A32 */ - 0x8b, /* INST_THUMB */ - 0x8c, /* INST_A64 */ - 0x8d, /* INST_BRANCH */ - 0xbf, /* SYNC_DC_LOAD_MISS */ - 0xc0, /* SYNC_DC_STORE_MISS */ - 0xc1, /* SYNC_DTLB_MISS */ - 0xc4, /* SYNC_ST_HIT_YNGR_LD */ - 0xcb, /* SYNC_BR_ANY_MISP */ - 0xd3, /* FED_IC_MISS_DEM */ - 0xd4, /* FED_ITLB_MISS */ - -#elif defined(APPLETWISTER) || defined(APPLEHURRICANE) - 0x02, /* CORE_CYCLE */ - 0x1a, /* L2C_AGENT_LD */ - 0x1b, /* L2C_AGENT_LD_MISS */ - 0x1c, /* L2C_AGENT_ST */ - 0x1d, /* L2C_AGENT_ST_MISS */ - 0x8a, /* INST_A32 */ - 0x8b, /* INST_THUMB */ - 0x8c, /* INST_A64 */ - 0x8d, /* INST_BRANCH */ - 0xbf, /* SYNC_DC_LOAD_MISS */ - 0xc0, /* SYNC_DC_STORE_MISS */ - 0xc1, /* SYNC_DTLB_MISS */ - 0xc4, /* SYNC_ST_HIT_YNGR_LD */ - 0xcb, /* SYNC_BR_ANY_MISP */ - 0xd3, /* FED_IC_MISS_DEM */ - 0xd4, /* FED_ITLB_MISS */ + 0, /* NO_EVENT */ -#else - /* An unknown CPU gets a trivial { NO_EVENT } whitelist. */ -#endif + CPMU_CORE_CYCLE, + +#if HAS_CPMU_BIU_EVENTS + CPMU_BIU_UPSTREAM_CYCLE, CPMU_BIU_DOWNSTREAM_CYCLE, +#endif /* HAS_CPMU_BIU_EVENTS */ + +#if HAS_CPMU_L2C_EVENTS + CPMU_L2C_AGENT_LD, CPMU_L2C_AGENT_LD_MISS, CPMU_L2C_AGENT_ST, + CPMU_L2C_AGENT_ST_MISS, +#endif /* HAS_CPMU_L2C_EVENTS */ + + CPMU_INST_A32, CPMU_INST_THUMB, CPMU_INST_A64, CPMU_INST_BRANCH, + CPMU_SYNC_DC_LOAD_MISS, CPMU_SYNC_DC_STORE_MISS, + CPMU_SYNC_DTLB_MISS, CPMU_SYNC_ST_HIT_YNGR_LD, + CPMU_SYNC_BR_ANY_MISP, CPMU_FED_IC_MISS_DEM, CPMU_FED_ITLB_MISS, }; -#define WHITELIST_COUNT (sizeof(whitelist)/sizeof(*whitelist)) +#define WHITELIST_COUNT (sizeof(whitelist) / sizeof(whitelist[0])) +#define EVENT_MASK 0xff -static boolean_t +static bool config_in_whitelist(kpc_config_t cfg) { - unsigned int i; - - for (i = 0; i < WHITELIST_COUNT; i++) { - if (cfg == whitelist[i]) { - return TRUE; + for (unsigned int i = 0; i < WHITELIST_COUNT; i++) { + /* Strip off any EL configuration bits -- just look at the event. */ + if ((cfg & EVENT_MASK) == whitelist[i]) { + return true; } } - - return FALSE; + return false; } #ifdef KPC_DEBUG -static void dump_regs(void) +static void +dump_regs(void) { uint64_t val; kprintf("PMCR0 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR0)); @@ -454,19 +458,19 @@ static uint64_t read_counter(uint32_t counter) { switch (counter) { - // case 0: return SREG_READ(SREG_PMC0); - // case 1: return SREG_READ(SREG_PMC1); - case 2: return SREG_READ(SREG_PMC2); - case 3: return SREG_READ(SREG_PMC3); - case 4: return SREG_READ(SREG_PMC4); - case 5: return SREG_READ(SREG_PMC5); - case 6: return SREG_READ(SREG_PMC6); - case 7: return SREG_READ(SREG_PMC7); + // case 0: return SREG_READ(SREG_PMC0); + // case 1: return SREG_READ(SREG_PMC1); + case 2: return SREG_READ(SREG_PMC2); + case 3: return SREG_READ(SREG_PMC3); + case 4: return SREG_READ(SREG_PMC4); + case 5: return SREG_READ(SREG_PMC5); + case 6: return SREG_READ(SREG_PMC6); + case 7: return SREG_READ(SREG_PMC7); #if (KPC_ARM64_CONFIGURABLE_COUNT > 6) - case 8: return SREG_READ(SREG_PMC8); - case 9: return SREG_READ(SREG_PMC9); + case 8: return SREG_READ(SREG_PMC8); + case 9: return SREG_READ(SREG_PMC9); #endif - default: return 0; + default: return 0; } } @@ -474,19 +478,19 @@ static void write_counter(uint32_t counter, uint64_t value) { switch (counter) { - // case 0: SREG_WRITE(SREG_PMC0, value); break; - // case 1: SREG_WRITE(SREG_PMC1, value); break; - case 2: SREG_WRITE(SREG_PMC2, value); break; - case 3: SREG_WRITE(SREG_PMC3, value); break; - case 4: SREG_WRITE(SREG_PMC4, value); break; - case 5: SREG_WRITE(SREG_PMC5, value); break; - case 6: SREG_WRITE(SREG_PMC6, value); break; - case 7: SREG_WRITE(SREG_PMC7, value); break; + // case 0: SREG_WRITE(SREG_PMC0, value); break; + // case 1: SREG_WRITE(SREG_PMC1, value); break; + case 2: SREG_WRITE(SREG_PMC2, value); break; + case 3: SREG_WRITE(SREG_PMC3, value); break; + case 4: SREG_WRITE(SREG_PMC4, value); break; + case 5: SREG_WRITE(SREG_PMC5, value); break; + case 6: SREG_WRITE(SREG_PMC6, value); break; + case 7: SREG_WRITE(SREG_PMC7, value); break; #if (KPC_ARM64_CONFIGURABLE_COUNT > 6) - case 8: SREG_WRITE(SREG_PMC8, value); break; - case 9: SREG_WRITE(SREG_PMC9, value); break; + case 8: SREG_WRITE(SREG_PMC8, value); break; + case 9: SREG_WRITE(SREG_PMC9, value); break; #endif - default: break; + default: break; } } @@ -539,7 +543,7 @@ save_regs(void) { int cpuid = cpu_number(); - __asm__ volatile("dmb ish"); + __asm__ volatile ("dmb ish"); assert(ml_get_interrupts_enabled() == FALSE); @@ -588,24 +592,24 @@ get_counter_config(uint32_t counter) uint64_t pmesr; switch (counter) { - case 2: /* FALLTHROUGH */ - case 3: /* FALLTHROUGH */ - case 4: /* FALLTHROUGH */ - case 5: - pmesr = PMESR_EVT_DECODE(SREG_READ(SREG_PMESR0), counter, 2); - break; - case 6: /* FALLTHROUGH */ - case 7: + case 2: /* FALLTHROUGH */ + case 3: /* FALLTHROUGH */ + case 4: /* FALLTHROUGH */ + case 5: + pmesr = PMESR_EVT_DECODE(SREG_READ(SREG_PMESR0), counter, 2); + break; + case 6: /* FALLTHROUGH */ + case 7: #if (KPC_ARM64_CONFIGURABLE_COUNT > 6) - /* FALLTHROUGH */ - case 8: /* FALLTHROUGH */ - case 9: + /* FALLTHROUGH */ + case 8: /* FALLTHROUGH */ + case 9: #endif - pmesr = PMESR_EVT_DECODE(SREG_READ(SREG_PMESR1), counter, 6); - break; - default: - pmesr = 0; - break; + pmesr = PMESR_EVT_DECODE(SREG_READ(SREG_PMESR1), counter, 6); + break; + default: + pmesr = 0; + break; } kpc_config_t config = pmesr; @@ -640,32 +644,32 @@ set_counter_config(uint32_t counter, uint64_t config) uint64_t pmesr = 0; switch (counter) { - case 2: /* FALLTHROUGH */ - case 3: /* FALLTHROUGH */ - case 4: /* FALLTHROUGH */ - case 5: - pmesr = SREG_READ(SREG_PMESR0); - pmesr &= PMESR_EVT_CLEAR(counter, 2); - pmesr |= PMESR_EVT_ENCODE(config, counter, 2); - SREG_WRITE(SREG_PMESR0, pmesr); - saved_PMESR[cpuid][0] = pmesr; - break; - - case 6: /* FALLTHROUGH */ - case 7: + case 2: /* FALLTHROUGH */ + case 3: /* FALLTHROUGH */ + case 4: /* FALLTHROUGH */ + case 5: + pmesr = SREG_READ(SREG_PMESR0); + pmesr &= PMESR_EVT_CLEAR(counter, 2); + pmesr |= PMESR_EVT_ENCODE(config, counter, 2); + SREG_WRITE(SREG_PMESR0, pmesr); + saved_PMESR[cpuid][0] = pmesr; + break; + + case 6: /* FALLTHROUGH */ + case 7: #if KPC_ARM64_CONFIGURABLE_COUNT > 6 - /* FALLTHROUGH */ - case 8: /* FALLTHROUGH */ - case 9: + /* FALLTHROUGH */ + case 8: /* FALLTHROUGH */ + case 9: #endif - pmesr = SREG_READ(SREG_PMESR1); - pmesr &= PMESR_EVT_CLEAR(counter, 6); - pmesr |= PMESR_EVT_ENCODE(config, counter, 6); - SREG_WRITE(SREG_PMESR1, pmesr); - saved_PMESR[cpuid][1] = pmesr; - break; - default: - break; + pmesr = SREG_READ(SREG_PMESR1); + pmesr &= PMESR_EVT_CLEAR(counter, 6); + pmesr |= PMESR_EVT_ENCODE(config, counter, 6); + SREG_WRITE(SREG_PMESR1, pmesr); + saved_PMESR[cpuid][1] = pmesr; + break; + default: + break; } set_modes(counter, config); @@ -744,8 +748,9 @@ set_running_configurable(uint64_t target_mask, uint64_t state_mask) enabled = ml_set_interrupts_enabled(FALSE); for (uint32_t i = 0; i < cfg_count; ++i) { - if (((1ULL << i) & target_mask) == 0) + if (((1ULL << i) & target_mask) == 0) { continue; + } assert(kpc_controls_counter(offset + i)); if ((1ULL << i) & state_mask) { @@ -766,10 +771,11 @@ kpc_set_running_xcall( void *vstate ) assert(mp_config); set_running_configurable(mp_config->cfg_target_mask, - mp_config->cfg_state_mask); + mp_config->cfg_state_mask); - if (hw_atomic_sub(&kpc_xcall_sync, 1) == 0) + if (os_atomic_dec(&kpc_xcall_sync, relaxed) == 0) { thread_wakeup((event_t) &kpc_xcall_sync); + } } static uint32_t kpc_xread_sync; @@ -785,9 +791,9 @@ kpc_get_curcpu_counters_xcall(void *args) int r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]); /* number of counters added by this CPU, needs to be atomic */ - hw_atomic_add(&(handler->nb_counters), r); + os_atomic_add(&(handler->nb_counters), r, relaxed); - if (hw_atomic_sub(&kpc_xread_sync, 1) == 0) { + if (os_atomic_dec(&kpc_xread_sync, relaxed) == 0) { thread_wakeup((event_t) &kpc_xread_sync); } } @@ -840,17 +846,18 @@ kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask) assert(counterv); for (uint32_t i = 0; i < cfg_count; ++i) { - if (((1ULL << i) & pmc_mask) == 0) + if (((1ULL << i) & pmc_mask) == 0) { continue; + } ctr = read_counter(i + offset); if (ctr & KPC_ARM64_COUNTER_OVF_MASK) { ctr = CONFIGURABLE_SHADOW(i) + - (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) + - (ctr & KPC_ARM64_COUNTER_MASK); + (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) + + (ctr & KPC_ARM64_COUNTER_MASK); } else { ctr = CONFIGURABLE_SHADOW(i) + - (ctr - CONFIGURABLE_RELOAD(i)); + (ctr - CONFIGURABLE_RELOAD(i)); } *counterv++ = ctr; @@ -866,9 +873,11 @@ kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) assert(configv); - for (uint32_t i = 0; i < cfg_count; ++i) - if ((1ULL << i) & pmc_mask) + for (uint32_t i = 0; i < cfg_count; ++i) { + if ((1ULL << i) & pmc_mask) { *configv++ = get_counter_config(i + offset); + } + } return 0; } @@ -883,8 +892,9 @@ kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) enabled = ml_set_interrupts_enabled(FALSE); for (uint32_t i = 0; i < cfg_count; ++i) { - if (((1ULL << i) & pmc_mask) == 0) + if (((1ULL << i) & pmc_mask) == 0) { continue; + } assert(kpc_controls_counter(i + offset)); set_counter_config(i + offset, *configv++); @@ -918,8 +928,9 @@ kpc_set_config_xcall(void *vmp_config) new_config += RAWPMU_CONFIG_COUNT; } - if (hw_atomic_sub(&kpc_config_sync, 1) == 0) + if (os_atomic_dec(&kpc_config_sync, relaxed) == 0) { thread_wakeup((event_t) &kpc_config_sync); + } } static uint64_t @@ -927,13 +938,19 @@ kpc_reload_counter(uint32_t ctr) { assert(ctr < (kpc_configurable_count() + kpc_fixed_count())); - /* don't reload counters reserved for power management */ - if (!kpc_controls_counter(ctr)) - return 0ULL; - uint64_t old = read_counter(ctr); - write_counter(ctr, FIXED_RELOAD(ctr)); - return old & KPC_ARM64_COUNTER_MASK; + + if (kpc_controls_counter(ctr)) { + write_counter(ctr, FIXED_RELOAD(ctr)); + return old & KPC_ARM64_COUNTER_MASK; + } else { + /* + * Unset the overflow bit to clear the condition that drives + * PMIs. The power manager is not interested in handling PMIs. + */ + write_counter(ctr, old & KPC_ARM64_COUNTER_MASK); + return 0; + } } static uint32_t kpc_reload_sync; @@ -966,10 +983,12 @@ kpc_set_reload_xcall(void *vmp_config) count = kpc_configurable_count(); for (uint32_t i = 0; i < count; ++i) { /* ignore the counter */ - if (((1ULL << i) & mp_config->pmc_mask) == 0) + if (((1ULL << i) & mp_config->pmc_mask) == 0) { continue; - if (*new_period == 0) + } + if (*new_period == 0) { *new_period = kpc_configurable_max(); + } CONFIGURABLE_RELOAD(i) = max - *new_period; /* reload the counter */ kpc_reload_counter(offset + i); @@ -980,47 +999,21 @@ kpc_set_reload_xcall(void *vmp_config) ml_set_interrupts_enabled(enabled); - if (hw_atomic_sub(&kpc_reload_sync, 1) == 0) + if (os_atomic_dec(&kpc_reload_sync, relaxed) == 0) { thread_wakeup((event_t) &kpc_reload_sync); + } } -void kpc_pmi_handler(cpu_id_t source); void -kpc_pmi_handler(cpu_id_t source __unused) +kpc_pmi_handler(unsigned int ctr) { - uint64_t PMSR, extra; - int ctr; - int enabled; - - enabled = ml_set_interrupts_enabled(FALSE); - - /* The pmi must be delivered to the CPU that generated it */ - if (source != getCpuDatap()->interrupt_nub) { - panic("pmi from IOCPU %p delivered to IOCPU %p", source, getCpuDatap()->interrupt_nub); - } - - /* Get the PMSR which has the overflow bits for all the counters */ - __asm__ volatile("mrs %0, S3_1_c15_c13_0" : "=r"(PMSR)); + uint64_t extra = kpc_reload_counter(ctr); - for (ctr = 0; ctr < (KPC_ARM64_FIXED_COUNT + KPC_ARM64_CONFIGURABLE_COUNT); ctr++) { - if ((1ull << ctr) & PMSR) { - if (ctr < 2) { -#if MONOTONIC - mt_cpu_pmi(getCpuDatap(), PMSR); -#endif /* MONOTONIC */ - } else { - extra = kpc_reload_counter(ctr); - - FIXED_SHADOW(ctr) - += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra; + FIXED_SHADOW(ctr) += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra; - if (FIXED_ACTIONID(ctr)) - kpc_sample_kperf(FIXED_ACTIONID(ctr)); - } - } + if (FIXED_ACTIONID(ctr)) { + kpc_sample_kperf(FIXED_ACTIONID(ctr)); } - - ml_set_interrupts_enabled(enabled); } uint32_t @@ -1032,20 +1025,7 @@ kpc_get_classes(void) int kpc_set_running_arch(struct kpc_running_remote *mp_config) { - int cpu; - - assert(mp_config); - - if (first_time) { - PE_cpu_perfmon_interrupt_install_handler(kpc_pmi_handler); - int max_cpu = ml_get_max_cpu_number(); - for (cpu = 0; cpu <= max_cpu; cpu++) { - cpu_data_t *target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; - if (target_cpu_datap != NULL) - PE_cpu_perfmon_interrupt_enable(target_cpu_datap->cpu_id, TRUE); - } - first_time = 0; - } + assert(mp_config != NULL); /* dispatch to all CPUs */ cpu_broadcast_xcall(&kpc_xcall_sync, TRUE, kpc_set_running_xcall, mp_config); @@ -1093,7 +1073,7 @@ kpc_set_config_arch(struct kpc_config_remote *mp_config) return 0; } -void +void kpc_idle(void) { if (kpc_configured) { @@ -1101,8 +1081,8 @@ kpc_idle(void) } } -void -kpc_idle_exit(void) +void +kpc_idle_exit(void) { if (kpc_configured) { restore_regs(); @@ -1133,3 +1113,165 @@ kpc_get_pmu_version(void) { return KPC_PMU_ARM_APPLE; } + +#else /* APPLE_ARM64_ARCH_FAMILY */ + +/* We don't currently support non-Apple arm64 PMU configurations like PMUv3 */ + +void +kpc_arch_init(void) +{ + /* No-op */ +} + +uint32_t +kpc_get_classes(void) +{ + return 0; +} + +uint32_t +kpc_fixed_count(void) +{ + return 0; +} + +uint32_t +kpc_configurable_count(void) +{ + return 0; +} + +uint32_t +kpc_fixed_config_count(void) +{ + return 0; +} + +uint32_t +kpc_configurable_config_count(uint64_t pmc_mask __unused) +{ + return 0; +} + +int +kpc_get_fixed_config(kpc_config_t *configv __unused) +{ + return 0; +} + +uint64_t +kpc_fixed_max(void) +{ + return 0; +} + +uint64_t +kpc_configurable_max(void) +{ + return 0; +} + +int +kpc_get_configurable_config(kpc_config_t *configv __unused, uint64_t pmc_mask __unused) +{ + return ENOTSUP; +} + +int +kpc_get_configurable_counters(uint64_t *counterv __unused, uint64_t pmc_mask __unused) +{ + return ENOTSUP; +} + +int +kpc_get_fixed_counters(uint64_t *counterv __unused) +{ + return 0; +} + +boolean_t +kpc_is_running_fixed(void) +{ + return FALSE; +} + +boolean_t +kpc_is_running_configurable(uint64_t pmc_mask __unused) +{ + return FALSE; +} + +int +kpc_set_running_arch(struct kpc_running_remote *mp_config __unused) +{ + return ENOTSUP; +} + +int +kpc_set_period_arch(struct kpc_config_remote *mp_config __unused) +{ + return ENOTSUP; +} + +int +kpc_set_config_arch(struct kpc_config_remote *mp_config __unused) +{ + return ENOTSUP; +} + +void +kpc_idle(void) +{ + // do nothing +} + +void +kpc_idle_exit(void) +{ + // do nothing +} + +int +kpc_get_all_cpus_counters(uint32_t classes __unused, int *curcpu __unused, uint64_t *buf __unused) +{ + return 0; +} + +int +kpc_set_sw_inc( uint32_t mask __unused ) +{ + return ENOTSUP; +} + +int +kpc_get_pmu_version(void) +{ + return KPC_PMU_ERROR; +} + +uint32_t +kpc_rawpmu_config_count(void) +{ + return 0; +} + +int +kpc_get_rawpmu_config(__unused kpc_config_t *configv) +{ + return 0; +} + +int +kpc_disable_whitelist( int val __unused ) +{ + return 0; +} + +int +kpc_get_whitelist_disabled( void ) +{ + return 0; +} + +#endif /* !APPLE_ARM64_ARCH_FAMILY */