* Copyright (c) 2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include <mach/mach_types.h>
#include <machine/machine_routines.h>
#include <kern/processor.h>
-#include <kern/kalloc.h>
#include <i386/cpuid.h>
#include <i386/proc_reg.h>
#include <i386/mp.h>
#include <kperf/context.h>
#include <kperf/action.h>
+#include <kern/monotonic.h>
+
/* Fixed counter mask -- three counters, each with OS and USER */
#define IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS (0x333)
#define IA32_FIXED_CTR_ENABLE_ALL_PMI (0x888)
+#define IA32_PERFEVT_USER_EN (0x10000)
+#define IA32_PERFEVT_OS_EN (0x20000)
+
#define IA32_PERFEVTSEL_PMI (1ull << 20)
#define IA32_PERFEVTSEL_EN (1ull << 22)
return rdmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL );
}
-static uint64_t
-IA32_FIXED_CTRx(uint32_t ctr)
-{
-#ifdef USE_RDPMC
- return rdpmc64(RDPMC_FIXED_COUNTER_SELECTOR | ctr);
-#else /* !USE_RDPMC */
- return rdmsr64(MSR_IA32_PERF_FIXED_CTR0 + ctr);
-#endif /* !USE_RDPMC */
-}
-
#ifdef FIXED_COUNTER_RELOAD
static void
wrIA32_FIXED_CTRx(uint32_t ctr, uint64_t value)
uint32_t
kpc_fixed_count(void)
{
- i386_cpu_info_t *info = NULL;
+ i386_cpu_info_t *info = NULL;
info = cpuid_info();
return info->cpuid_arch_perf_leaf.fixed_number;
}
uint32_t
kpc_configurable_count(void)
{
- i386_cpu_info_t *info = NULL;
+ i386_cpu_info_t *info = NULL;
info = cpuid_info();
return info->cpuid_arch_perf_leaf.number;
}
static uint8_t
kpc_fixed_width(void)
{
- i386_cpu_info_t *info = NULL;
-
+ i386_cpu_info_t *info = NULL;
+
info = cpuid_info();
return info->cpuid_arch_perf_leaf.fixed_width;
static uint8_t
kpc_configurable_width(void)
{
- i386_cpu_info_t *info = NULL;
+ i386_cpu_info_t *info = NULL;
info = cpuid_info();
return old;
}
-void kpc_pmi_handler(x86_saved_state_t *state);
+void kpc_pmi_handler(void);
static void
set_running_fixed(boolean_t on)
int i;
boolean_t enabled;
- if( on )
+ if (on) {
/* these are per-thread in SMT */
fixed_ctrl = IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS | IA32_FIXED_CTR_ENABLE_ALL_PMI;
- else
+ } else {
/* don't allow disabling fixed counters */
return;
+ }
wrmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL, fixed_ctrl );
/* rmw the global control */
global = rdmsr64(MSR_IA32_PERF_GLOBAL_CTRL);
- for( i = 0; i < (int) kpc_fixed_count(); i++ )
- mask |= (1ULL<<(32+i));
+ for (i = 0; i < (int) kpc_fixed_count(); i++) {
+ mask |= (1ULL << (32 + i));
+ }
- if( on )
+ if (on) {
global |= mask;
- else
+ } else {
global &= ~mask;
+ }
wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global);
}
/* update the global control value */
- global &= ~target_mask; /* clear the targeted PMCs bits */
- global |= state_mask; /* update the targeted PMCs bits with their new states */
+ global &= ~target_mask; /* clear the targeted PMCs bits */
+ global |= state_mask; /* update the targeted PMCs bits with their new states */
wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global);
ml_set_interrupts_enabled(enabled);
struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate;
assert(mp_config);
- if (kpc_controls_fixed_counters())
+ if (kpc_controls_fixed_counters()) {
set_running_fixed(mp_config->classes & KPC_CLASS_FIXED_MASK);
+ }
set_running_configurable(mp_config->cfg_target_mask,
- mp_config->cfg_state_mask);
+ mp_config->cfg_state_mask);
}
int
int
kpc_get_fixed_counters(uint64_t *counterv)
{
- int i, n = kpc_fixed_count();
-
-#ifdef FIXED_COUNTER_SHADOW
- uint64_t status;
-
- /* snap the counters */
- for( i = 0; i < n; i++ ) {
- counterv[i] = FIXED_SHADOW(ctr) +
- (IA32_FIXED_CTRx(i) - FIXED_RELOAD(ctr));
- }
-
- /* Grab the overflow bits */
- status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
-
- /* If the overflow bit is set for a counter, our previous read may or may not have been
- * before the counter overflowed. Re-read any counter with it's overflow bit set so
- * we know for sure that it has overflowed. The reason this matters is that the math
- * is different for a counter that has overflowed. */
- for( i = 0; i < n; i++ ) {
- if ((1ull << (i + 32)) & status)
- counterv[i] = FIXED_SHADOW(ctr) +
- (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + IA32_FIXED_CTRx(i);
- }
-#else
- for( i = 0; i < n; i++ )
- counterv[i] = IA32_FIXED_CTRx(i);
-#endif
-
+#if MONOTONIC
+ mt_fixed_counts(counterv);
return 0;
+#else /* MONOTONIC */
+#pragma unused(counterv)
+ return ENOTSUP;
+#endif /* !MONOTONIC */
}
int
assert(configv);
- for (uint32_t i = 0; i < cfg_count; ++i)
- if ((1ULL << i) & pmc_mask)
+ for (uint32_t i = 0; i < cfg_count; ++i) {
+ if ((1ULL << i) & pmc_mask) {
*configv++ = IA32_PERFEVTSELx(i);
+ }
+ }
return 0;
}
uint32_t cfg_count = kpc_configurable_count();
uint64_t save;
- for (uint32_t i = 0; i < cfg_count; i++ ) {
- if (((1ULL << i) & pmc_mask) == 0)
+ for (uint32_t i = 0; i < cfg_count; i++) {
+ if (((1ULL << i) & pmc_mask) == 0) {
continue;
+ }
/* need to save and restore counter since it resets when reconfigured */
save = IA32_PMCx(i);
for (uint32_t i = 0; i < cfg_count; ++i) {
if ((1ULL << i) & pmc_mask) {
*it_counterv++ = CONFIGURABLE_SHADOW(i) +
- (IA32_PMCx(i) - CONFIGURABLE_RELOAD(i));
+ (IA32_PMCx(i) - CONFIGURABLE_RELOAD(i));
}
}
*/
for (uint32_t i = 0; i < cfg_count; ++i) {
if (((1ULL << i) & pmc_mask) &&
- ((1ULL << i) & status))
- {
+ ((1ULL << i) & status)) {
*it_counterv++ = CONFIGURABLE_SHADOW(i) +
- (kpc_configurable_max() - CONFIGURABLE_RELOAD(i)) + IA32_PMCx(i);
+ (kpc_configurable_max() - CONFIGURABLE_RELOAD(i)) + IA32_PMCx(i);
}
}
kpc_get_curcpu_counters_mp_call(void *args)
{
struct kpc_get_counters_remote *handler = args;
- int offset=0, r=0;
+ int offset = 0, r = 0;
assert(handler);
assert(handler->buf);
r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]);
/* number of counters added by this CPU, needs to be atomic */
- hw_atomic_add(&(handler->nb_counters), r);
+ os_atomic_add(&(handler->nb_counters), r, relaxed);
}
int
enabled = ml_set_interrupts_enabled(FALSE);
- if (curcpu)
- *curcpu = current_processor()->cpu_id;
+ if (curcpu) {
+ *curcpu = cpu_number();
+ }
mp_cpus_call(CPUMASK_ALL, ASYNC, kpc_get_curcpu_counters_mp_call, &hdl);
ml_set_interrupts_enabled(enabled);
static void
kpc_set_config_mp_call(void *vmp_config)
{
-
struct kpc_config_remote *mp_config = vmp_config;
kpc_config_t *new_config = NULL;
uint32_t classes = 0, count = 0;
new_config = mp_config->configv;
enabled = ml_set_interrupts_enabled(FALSE);
-
- if (classes & KPC_CLASS_FIXED_MASK)
- {
+
+ if (classes & KPC_CLASS_FIXED_MASK) {
kpc_set_fixed_config(&new_config[count]);
count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
}
count = kpc_configurable_count();
for (uint32_t i = 0; i < count; ++i) {
/* ignore the counter */
- if (((1ULL << i) & mp_config->pmc_mask) == 0)
+ if (((1ULL << i) & mp_config->pmc_mask) == 0) {
continue;
+ }
- if (*new_period == 0)
+ if (*new_period == 0) {
*new_period = kpc_configurable_max();
+ }
CONFIGURABLE_RELOAD(i) = max - *new_period;
return 0;
}
-/* PMI stuff */
-void kpc_pmi_handler(__unused x86_saved_state_t *state)
+static uintptr_t
+get_interrupted_pc(bool *kernel_out)
+{
+ x86_saved_state_t *state = current_cpu_datap()->cpu_int_state;
+ if (!state) {
+ return 0;
+ }
+
+ bool state_64 = is_saved_state64(state);
+ uint64_t cs;
+ if (state_64) {
+ cs = saved_state64(state)->isf.cs;
+ } else {
+ cs = saved_state32(state)->cs;
+ }
+ bool kernel = (cs & SEL_PL) != SEL_PL_U;
+ *kernel_out = kernel;
+
+ uintptr_t pc = 0;
+ if (state_64) {
+ pc = saved_state64(state)->isf.rip;
+ } else {
+ pc = saved_state32(state)->eip;
+ }
+ if (kernel) {
+ pc = VM_KERNEL_UNSLIDE(pc);
+ }
+ return pc;
+}
+
+static void
+kpc_sample_kperf_x86(uint32_t ctr, uint32_t actionid, uint64_t count,
+ uint64_t config)
+{
+ bool kernel = false;
+ uintptr_t pc = get_interrupted_pc(&kernel);
+ kperf_kpc_flags_t flags = kernel ? KPC_KERNEL_PC : 0;
+ if ((config) & IA32_PERFEVT_USER_EN) {
+ flags |= KPC_USER_COUNTING;
+ }
+ if ((config) & IA32_PERFEVT_OS_EN) {
+ flags |= KPC_KERNEL_COUNTING;
+ }
+ kpc_sample_kperf(actionid, ctr,
+ config & 0xffff /* just the number and umask */, count, pc, flags);
+}
+
+void
+kpc_pmi_handler(void)
{
uint64_t status, extra;
uint32_t ctr;
extra = kpc_reload_fixed(ctr);
FIXED_SHADOW(ctr)
- += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra;
+ += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra;
- BUF_INFO(PERF_KPC_FCOUNTER, ctr, FIXED_SHADOW(ctr), extra, FIXED_ACTIONID(ctr));
+ uint32_t actionid = FIXED_ACTIONID(ctr);
+ BUF_INFO(PERF_KPC_FCOUNTER, ctr, FIXED_SHADOW(ctr), extra, actionid);
- if (FIXED_ACTIONID(ctr))
- kpc_sample_kperf(FIXED_ACTIONID(ctr));
+ if (actionid != 0) {
+ kpc_sample_kperf_x86(ctr, actionid, FIXED_SHADOW(ctr) + extra, 0);
+ }
}
}
-#endif
+#endif // FIXED_COUNTER_SHADOW
for (ctr = 0; ctr < kpc_configurable_count(); ctr++) {
if ((1ULL << ctr) & status) {
extra = kpc_reload_configurable(ctr);
- CONFIGURABLE_SHADOW(ctr)
- += kpc_configurable_max() - CONFIGURABLE_RELOAD(ctr) + extra;
+ CONFIGURABLE_SHADOW(ctr) += kpc_configurable_max() -
+ CONFIGURABLE_RELOAD(ctr) + extra;
/* kperf can grab the PMCs when it samples so we need to make sure the overflow
* bits are in the correct state before the call to kperf_sample */
wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << ctr);
- BUF_INFO(PERF_KPC_COUNTER, ctr, CONFIGURABLE_SHADOW(ctr), extra, CONFIGURABLE_ACTIONID(ctr));
-
- if (CONFIGURABLE_ACTIONID(ctr))
- kpc_sample_kperf(CONFIGURABLE_ACTIONID(ctr));
+ unsigned int actionid = CONFIGURABLE_ACTIONID(ctr);
+ BUF_INFO(PERF_KPC_COUNTER, ctr, CONFIGURABLE_SHADOW(ctr), extra, actionid);
+
+ if (actionid != 0) {
+ uint64_t config = IA32_PERFEVTSELx(ctr);
+ kpc_sample_kperf_x86(ctr + kpc_fixed_count(), actionid,
+ CONFIGURABLE_SHADOW(ctr) + extra, config);
+ }
}
}
return KPC_PMU_ERROR;
}
-