X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d9a64523371fa019c4575bb400cbbc3a50ac9903..2a1bd2d3eef5c7a7bb14f4bb9fdbca9a96ee4752:/osfmk/x86_64/kpc_x86.c?ds=sidebyside diff --git a/osfmk/x86_64/kpc_x86.c b/osfmk/x86_64/kpc_x86.c index f24bbfa31..fcd3a54c7 100644 --- a/osfmk/x86_64/kpc_x86.c +++ b/osfmk/x86_64/kpc_x86.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,14 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include #include #include -#include #include #include #include @@ -43,10 +42,15 @@ #include #include +#include + /* Fixed counter mask -- three counters, each with OS and USER */ #define IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS (0x333) #define IA32_FIXED_CTR_ENABLE_ALL_PMI (0x888) +#define IA32_PERFEVT_USER_EN (0x10000) +#define IA32_PERFEVT_OS_EN (0x20000) + #define IA32_PERFEVTSEL_PMI (1ull << 20) #define IA32_PERFEVTSEL_EN (1ull << 22) @@ -67,16 +71,6 @@ IA32_FIXED_CTR_CTRL(void) return rdmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL ); } -static uint64_t -IA32_FIXED_CTRx(uint32_t ctr) -{ -#ifdef USE_RDPMC - return rdpmc64(RDPMC_FIXED_COUNTER_SELECTOR | ctr); -#else /* !USE_RDPMC */ - return rdmsr64(MSR_IA32_PERF_FIXED_CTR0 + ctr); -#endif /* !USE_RDPMC */ -} - #ifdef FIXED_COUNTER_RELOAD static void wrIA32_FIXED_CTRx(uint32_t ctr, uint64_t value) @@ -133,7 +127,7 @@ kpc_is_running_configurable(uint64_t pmc_mask) uint32_t kpc_fixed_count(void) { - i386_cpu_info_t *info = NULL; + i386_cpu_info_t *info = NULL; info = cpuid_info(); return info->cpuid_arch_perf_leaf.fixed_number; } @@ -141,7 +135,7 @@ kpc_fixed_count(void) uint32_t kpc_configurable_count(void) { - i386_cpu_info_t *info = NULL; + i386_cpu_info_t *info = NULL; info = cpuid_info(); return info->cpuid_arch_perf_leaf.number; } @@ -175,8 +169,8 @@ kpc_get_rawpmu_config(__unused kpc_config_t *configv) static uint8_t kpc_fixed_width(void) { - i386_cpu_info_t *info = NULL; - + i386_cpu_info_t *info = NULL; + info = cpuid_info(); return info->cpuid_arch_perf_leaf.fixed_width; @@ -185,7 +179,7 @@ kpc_fixed_width(void) static uint8_t kpc_configurable_width(void) { - i386_cpu_info_t *info = NULL; + i386_cpu_info_t *info = NULL; info = cpuid_info(); @@ -227,7 +221,7 @@ kpc_reload_configurable(int ctr) return old; } -void kpc_pmi_handler(x86_saved_state_t *state); +void kpc_pmi_handler(void); static void set_running_fixed(boolean_t on) @@ -236,12 +230,13 @@ set_running_fixed(boolean_t on) int i; boolean_t enabled; - if( on ) + if (on) { /* these are per-thread in SMT */ fixed_ctrl = IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS | IA32_FIXED_CTR_ENABLE_ALL_PMI; - else + } else { /* don't allow disabling fixed counters */ return; + } wrmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL, fixed_ctrl ); @@ -249,13 +244,15 @@ set_running_fixed(boolean_t on) /* rmw the global control */ global = rdmsr64(MSR_IA32_PERF_GLOBAL_CTRL); - for( i = 0; i < (int) kpc_fixed_count(); i++ ) - mask |= (1ULL<<(32+i)); + for (i = 0; i < (int) kpc_fixed_count(); i++) { + mask |= (1ULL << (32 + i)); + } - if( on ) + if (on) { global |= mask; - else + } else { global &= ~mask; + } wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global); @@ -283,8 +280,8 @@ set_running_configurable(uint64_t target_mask, uint64_t state_mask) } /* update the global control value */ - global &= ~target_mask; /* clear the targeted PMCs bits */ - global |= state_mask; /* update the targeted PMCs bits with their new states */ + global &= ~target_mask; /* clear the targeted PMCs bits */ + global |= state_mask; /* update the targeted PMCs bits with their new states */ wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global); ml_set_interrupts_enabled(enabled); @@ -296,11 +293,12 @@ kpc_set_running_mp_call( void *vstate ) struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate; assert(mp_config); - if (kpc_controls_fixed_counters()) + if (kpc_controls_fixed_counters()) { set_running_fixed(mp_config->classes & KPC_CLASS_FIXED_MASK); + } set_running_configurable(mp_config->cfg_target_mask, - mp_config->cfg_state_mask); + mp_config->cfg_state_mask); } int @@ -322,35 +320,13 @@ kpc_set_fixed_config(kpc_config_t *configv) int kpc_get_fixed_counters(uint64_t *counterv) { - int i, n = kpc_fixed_count(); - -#ifdef FIXED_COUNTER_SHADOW - uint64_t status; - - /* snap the counters */ - for( i = 0; i < n; i++ ) { - counterv[i] = FIXED_SHADOW(ctr) + - (IA32_FIXED_CTRx(i) - FIXED_RELOAD(ctr)); - } - - /* Grab the overflow bits */ - status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS); - - /* If the overflow bit is set for a counter, our previous read may or may not have been - * before the counter overflowed. Re-read any counter with it's overflow bit set so - * we know for sure that it has overflowed. The reason this matters is that the math - * is different for a counter that has overflowed. */ - for( i = 0; i < n; i++ ) { - if ((1ull << (i + 32)) & status) - counterv[i] = FIXED_SHADOW(ctr) + - (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + IA32_FIXED_CTRx(i); - } -#else - for( i = 0; i < n; i++ ) - counterv[i] = IA32_FIXED_CTRx(i); -#endif - +#if MONOTONIC + mt_fixed_counts(counterv); return 0; +#else /* MONOTONIC */ +#pragma unused(counterv) + return ENOTSUP; +#endif /* !MONOTONIC */ } int @@ -360,9 +336,11 @@ kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) assert(configv); - for (uint32_t i = 0; i < cfg_count; ++i) - if ((1ULL << i) & pmc_mask) + for (uint32_t i = 0; i < cfg_count; ++i) { + if ((1ULL << i) & pmc_mask) { *configv++ = IA32_PERFEVTSELx(i); + } + } return 0; } @@ -372,9 +350,10 @@ kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) uint32_t cfg_count = kpc_configurable_count(); uint64_t save; - for (uint32_t i = 0; i < cfg_count; i++ ) { - if (((1ULL << i) & pmc_mask) == 0) + for (uint32_t i = 0; i < cfg_count; i++) { + if (((1ULL << i) & pmc_mask) == 0) { continue; + } /* need to save and restore counter since it resets when reconfigured */ save = IA32_PMCx(i); @@ -421,7 +400,7 @@ kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask) for (uint32_t i = 0; i < cfg_count; ++i) { if ((1ULL << i) & pmc_mask) { *it_counterv++ = CONFIGURABLE_SHADOW(i) + - (IA32_PMCx(i) - CONFIGURABLE_RELOAD(i)); + (IA32_PMCx(i) - CONFIGURABLE_RELOAD(i)); } } @@ -439,10 +418,9 @@ kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask) */ for (uint32_t i = 0; i < cfg_count; ++i) { if (((1ULL << i) & pmc_mask) && - ((1ULL << i) & status)) - { + ((1ULL << i) & status)) { *it_counterv++ = CONFIGURABLE_SHADOW(i) + - (kpc_configurable_max() - CONFIGURABLE_RELOAD(i)) + IA32_PMCx(i); + (kpc_configurable_max() - CONFIGURABLE_RELOAD(i)) + IA32_PMCx(i); } } @@ -453,7 +431,7 @@ static void kpc_get_curcpu_counters_mp_call(void *args) { struct kpc_get_counters_remote *handler = args; - int offset=0, r=0; + int offset = 0, r = 0; assert(handler); assert(handler->buf); @@ -462,7 +440,7 @@ kpc_get_curcpu_counters_mp_call(void *args) r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]); /* number of counters added by this CPU, needs to be atomic */ - hw_atomic_add(&(handler->nb_counters), r); + os_atomic_add(&(handler->nb_counters), r, relaxed); } int @@ -479,8 +457,9 @@ kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf) enabled = ml_set_interrupts_enabled(FALSE); - if (curcpu) - *curcpu = current_processor()->cpu_id; + if (curcpu) { + *curcpu = cpu_number(); + } mp_cpus_call(CPUMASK_ALL, ASYNC, kpc_get_curcpu_counters_mp_call, &hdl); ml_set_interrupts_enabled(enabled); @@ -491,7 +470,6 @@ kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf) static void kpc_set_config_mp_call(void *vmp_config) { - struct kpc_config_remote *mp_config = vmp_config; kpc_config_t *new_config = NULL; uint32_t classes = 0, count = 0; @@ -503,9 +481,8 @@ kpc_set_config_mp_call(void *vmp_config) new_config = mp_config->configv; enabled = ml_set_interrupts_enabled(FALSE); - - if (classes & KPC_CLASS_FIXED_MASK) - { + + if (classes & KPC_CLASS_FIXED_MASK) { kpc_set_fixed_config(&new_config[count]); count += kpc_get_config_count(KPC_CLASS_FIXED_MASK); } @@ -547,11 +524,13 @@ kpc_set_reload_mp_call(void *vmp_config) count = kpc_configurable_count(); for (uint32_t i = 0; i < count; ++i) { /* ignore the counter */ - if (((1ULL << i) & mp_config->pmc_mask) == 0) + if (((1ULL << i) & mp_config->pmc_mask) == 0) { continue; + } - if (*new_period == 0) + if (*new_period == 0) { *new_period = kpc_configurable_max(); + } CONFIGURABLE_RELOAD(i) = max - *new_period; @@ -621,8 +600,55 @@ kpc_set_config_arch(struct kpc_config_remote *mp_config) return 0; } -/* PMI stuff */ -void kpc_pmi_handler(__unused x86_saved_state_t *state) +static uintptr_t +get_interrupted_pc(bool *kernel_out) +{ + x86_saved_state_t *state = current_cpu_datap()->cpu_int_state; + if (!state) { + return 0; + } + + bool state_64 = is_saved_state64(state); + uint64_t cs; + if (state_64) { + cs = saved_state64(state)->isf.cs; + } else { + cs = saved_state32(state)->cs; + } + bool kernel = (cs & SEL_PL) != SEL_PL_U; + *kernel_out = kernel; + + uintptr_t pc = 0; + if (state_64) { + pc = saved_state64(state)->isf.rip; + } else { + pc = saved_state32(state)->eip; + } + if (kernel) { + pc = VM_KERNEL_UNSLIDE(pc); + } + return pc; +} + +static void +kpc_sample_kperf_x86(uint32_t ctr, uint32_t actionid, uint64_t count, + uint64_t config) +{ + bool kernel = false; + uintptr_t pc = get_interrupted_pc(&kernel); + kperf_kpc_flags_t flags = kernel ? KPC_KERNEL_PC : 0; + if ((config) & IA32_PERFEVT_USER_EN) { + flags |= KPC_USER_COUNTING; + } + if ((config) & IA32_PERFEVT_OS_EN) { + flags |= KPC_KERNEL_COUNTING; + } + kpc_sample_kperf(actionid, ctr, + config & 0xffff /* just the number and umask */, count, pc, flags); +} + +void +kpc_pmi_handler(void) { uint64_t status, extra; uint32_t ctr; @@ -638,31 +664,37 @@ void kpc_pmi_handler(__unused x86_saved_state_t *state) extra = kpc_reload_fixed(ctr); FIXED_SHADOW(ctr) - += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra; + += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra; - BUF_INFO(PERF_KPC_FCOUNTER, ctr, FIXED_SHADOW(ctr), extra, FIXED_ACTIONID(ctr)); + uint32_t actionid = FIXED_ACTIONID(ctr); + BUF_INFO(PERF_KPC_FCOUNTER, ctr, FIXED_SHADOW(ctr), extra, actionid); - if (FIXED_ACTIONID(ctr)) - kpc_sample_kperf(FIXED_ACTIONID(ctr)); + if (actionid != 0) { + kpc_sample_kperf_x86(ctr, actionid, FIXED_SHADOW(ctr) + extra, 0); + } } } -#endif +#endif // FIXED_COUNTER_SHADOW for (ctr = 0; ctr < kpc_configurable_count(); ctr++) { if ((1ULL << ctr) & status) { extra = kpc_reload_configurable(ctr); - CONFIGURABLE_SHADOW(ctr) - += kpc_configurable_max() - CONFIGURABLE_RELOAD(ctr) + extra; + CONFIGURABLE_SHADOW(ctr) += kpc_configurable_max() - + CONFIGURABLE_RELOAD(ctr) + extra; /* kperf can grab the PMCs when it samples so we need to make sure the overflow * bits are in the correct state before the call to kperf_sample */ wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << ctr); - BUF_INFO(PERF_KPC_COUNTER, ctr, CONFIGURABLE_SHADOW(ctr), extra, CONFIGURABLE_ACTIONID(ctr)); - - if (CONFIGURABLE_ACTIONID(ctr)) - kpc_sample_kperf(CONFIGURABLE_ACTIONID(ctr)); + unsigned int actionid = CONFIGURABLE_ACTIONID(ctr); + BUF_INFO(PERF_KPC_COUNTER, ctr, CONFIGURABLE_SHADOW(ctr), extra, actionid); + + if (actionid != 0) { + uint64_t config = IA32_PERFEVTSELx(ctr); + kpc_sample_kperf_x86(ctr + kpc_fixed_count(), actionid, + CONFIGURABLE_SHADOW(ctr) + extra, config); + } } } @@ -690,4 +722,3 @@ kpc_get_pmu_version(void) return KPC_PMU_ERROR; } -