* Copyright (c) 2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include <mach/mach_types.h>
#include <machine/machine_routines.h>
#include <kern/processor.h>
-#include <kern/kalloc.h>
#include <kern/thread.h>
#include <sys/errno.h>
#include <arm/cpu_data_internal.h>
static int first_time = 1;
/* Private */
-
-static boolean_t
+
+static uint64_t get_counter_config(uint32_t counter);
+
+
+static boolean_t
enable_counter(uint32_t counter)
{
boolean_t enabled;
uint32_t PMCNTENSET;
/* Cycle counter is MSB; configurable counters reside in LSBs */
uint32_t mask = (counter == 0) ? (1 << 31) : (1 << (counter - 1));
-
+
/* Enabled? */
- __asm__ volatile("mrc p15, 0, %0, c9, c12, 1;" : "=r" (PMCNTENSET));
-
+ __asm__ volatile ("mrc p15, 0, %0, c9, c12, 1;" : "=r" (PMCNTENSET));
+
enabled = (PMCNTENSET & mask);
if (!enabled) {
/* Counter interrupt enable (PMINTENSET) */
- __asm__ volatile("mcr p15, 0, %0, c9, c14, 1;" : : "r" (mask));
-
+ __asm__ volatile ("mcr p15, 0, %0, c9, c14, 1;" : : "r" (mask));
+
/* Individual counter enable set (PMCNTENSET) */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 1;" : : "r" (mask));
-
- kpc_enabled_counters++;
-
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 1;" : : "r" (mask));
+
+ kpc_enabled_counters++;
+
/* 1st enabled counter? Set the master enable bit in PMCR */
if (kpc_enabled_counters == 1) {
uint32_t PMCR = 1;
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
}
}
return enabled;
}
-static boolean_t
+static boolean_t
disable_counter(uint32_t counter)
{
boolean_t enabled;
uint32_t PMCNTENCLR;
/* Cycle counter is MSB; configurable counters reside in LSBs */
uint32_t mask = (counter == 0) ? (1 << 31) : (1 << (counter - 1));
-
+
/* Enabled? */
- __asm__ volatile("mrc p15, 0, %0, c9, c12, 2;" : "=r" (PMCNTENCLR));
-
+ __asm__ volatile ("mrc p15, 0, %0, c9, c12, 2;" : "=r" (PMCNTENCLR));
+
enabled = (PMCNTENCLR & mask);
if (enabled) {
/* Individual counter enable clear (PMCNTENCLR) */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 2;" : : "r" (mask));
-
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 2;" : : "r" (mask));
+
/* Counter interrupt disable (PMINTENCLR) */
- __asm__ volatile("mcr p15, 0, %0, c9, c14, 2;" : : "r" (mask));
-
- kpc_enabled_counters--;
-
+ __asm__ volatile ("mcr p15, 0, %0, c9, c14, 2;" : : "r" (mask));
+
+ kpc_enabled_counters--;
+
/* Last enabled counter? Clear the master enable bit in PMCR */
if (kpc_enabled_counters == 0) {
uint32_t PMCR = 0;
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
}
}
-
+
return enabled;
}
-static uint64_t
+static uint64_t
read_counter(uint32_t counter)
{
uint32_t low = 0;
switch (counter) {
case 0:
/* Fixed counter */
- __asm__ volatile("mrc p15, 0, %0, c9, c13, 0;" : "=r" (low));
+ __asm__ volatile ("mrc p15, 0, %0, c9, c13, 0;" : "=r" (low));
break;
case 1:
case 2:
case 3:
case 4:
/* Configurable. Set PMSELR... */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1));
/* ...then read PMXEVCNTR */
- __asm__ volatile("mrc p15, 0, %0, c9, c13, 2;" : "=r" (low));
+ __asm__ volatile ("mrc p15, 0, %0, c9, c13, 2;" : "=r" (low));
break;
default:
/* ??? */
- break;
+ break;
}
-
+
return (uint64_t)low;
}
static void
-write_counter(uint32_t counter, uint64_t value)
+write_counter(uint32_t counter, uint64_t value)
{
uint32_t low = value & 0xFFFFFFFF;
switch (counter) {
case 0:
/* Fixed counter */
- __asm__ volatile("mcr p15, 0, %0, c9, c13, 0;" : : "r" (low));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c13, 0;" : : "r" (low));
break;
case 1:
case 2:
case 3:
case 4:
/* Configurable. Set PMSELR... */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1));
/* ...then write PMXEVCNTR */
- __asm__ volatile("mcr p15, 0, %0, c9, c13, 2;" : : "r" (low));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c13, 2;" : : "r" (low));
break;
default:
/* ??? */
- break;
+ break;
}
}
int n = KPC_ARM_FIXED_COUNT;
enabled = ml_set_interrupts_enabled(FALSE);
-
- for( i = 0; i < n; i++ ) {
+
+ for (i = 0; i < n; i++) {
if (on) {
enable_counter(i);
} else {
boolean_t enabled;
enabled = ml_set_interrupts_enabled(FALSE);
-
+
for (uint32_t i = 0; i < cfg_count; ++i) {
- if (((1ULL << i) & target_mask) == 0)
+ if (((1ULL << i) & target_mask) == 0) {
continue;
+ }
assert(kpc_controls_counter(offset + i));
if ((1ULL << i) & state_mask) {
ml_set_interrupts_enabled(enabled);
}
+static uintptr_t
+get_interrupted_pc(bool *kernel_out)
+{
+ struct arm_saved_state *state = getCpuDatap()->cpu_int_state;
+ if (!state) {
+ return 0;
+ }
+
+ bool kernel = !PSR_IS_USER(get_saved_state_cpsr(state));
+ *kernel_out = kernel;
+ uintptr_t pc = get_saved_state_pc(state);
+ if (kernel) {
+ pc = VM_KERNEL_UNSLIDE(pc);
+ }
+ return pc;
+}
+
void kpc_pmi_handler(cpu_id_t source);
void
kpc_pmi_handler(cpu_id_t source)
/* The pmi must be delivered to the CPU that generated it */
if (source != getCpuDatap()->interrupt_nub) {
- panic("pmi from IOCPU %p delivered to IOCPU %p", source, getCpuDatap()->interrupt_nub);
+ panic("pmi from IOCPU %p delivered to IOCPU %p", source, getCpuDatap()->interrupt_nub);
}
for (ctr = 0;
- ctr < (KPC_ARM_FIXED_COUNT + KPC_ARM_CONFIGURABLE_COUNT);
- ctr++)
- {
+ ctr < (KPC_ARM_FIXED_COUNT + KPC_ARM_CONFIGURABLE_COUNT);
+ ctr++) {
uint32_t PMOVSR;
uint32_t mask;
-
- /* check the counter for overflow */
+
+ /* check the counter for overflow */
if (ctr == 0) {
mask = 1 << 31;
} else {
mask = 1 << (ctr - 1);
}
-
+
/* read PMOVSR */
- __asm__ volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR));
-
+ __asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR));
+
if (PMOVSR & mask) {
extra = kpc_reload_counter(ctr);
FIXED_SHADOW(ctr)
- += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* wrap */) + extra;
+ += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* wrap */) + extra;
+
+ if (FIXED_ACTIONID(ctr)) {
+ bool kernel = false;
+ uintptr_t pc = get_interrupted_pc(&kernel);
+ kpc_sample_kperf(FIXED_ACTIONID(ctr), ctr, get_counter_config(ctr),
+ FIXED_SHADOW(ctr), pc, kernel ? KPC_KERNEL_PC : 0);
+ }
- if (FIXED_ACTIONID(ctr))
- kpc_sample_kperf(FIXED_ACTIONID(ctr));
-
/* clear PMOVSR bit */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 3;" : : "r" (mask));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 3;" : : "r" (mask));
}
}
struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate;
assert(mp_config);
- if (kpc_controls_fixed_counters())
+ if (kpc_controls_fixed_counters()) {
set_running_fixed(mp_config->classes & KPC_CLASS_FIXED_MASK);
-
+ }
+
set_running_configurable(mp_config->cfg_target_mask,
- mp_config->cfg_state_mask);
+ mp_config->cfg_state_mask);
- if (hw_atomic_sub(&kpc_xcall_sync, 1) == 0) {
+ if (os_atomic_dec(&kpc_xcall_sync, relaxed) == 0) {
thread_wakeup((event_t) &kpc_xcall_sync);
}
}
/* Fixed counter accessed via top bit... */
counter = 31;
/* Write PMSELR.SEL */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter));
/* Read PMXEVTYPER */
- __asm__ volatile("mcr p15, 0, %0, c9, c13, 1;" : "=r" (config));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : "=r" (config));
break;
case 1:
case 2:
/* Offset */
counter -= 1;
/* Write PMSELR.SEL to select the configurable counter */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter));
/* Read PMXEVTYPER to get the config */
- __asm__ volatile("mrc p15, 0, %0, c9, c13, 1;" : "=r" (config));
+ __asm__ volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (config));
break;
default:
break;
}
-
+
return config;
}
static void
set_counter_config(uint32_t counter, uint64_t config)
-{
+{
switch (counter) {
case 0:
/* Write PMSELR.SEL */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
/* Write PMXEVTYPER */
- __asm__ volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config & 0xFFFFFFFF));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config & 0xFFFFFFFF));
break;
case 1:
case 2:
case 3:
case 4:
/* Write PMSELR.SEL */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1));
/* Write PMXEVTYPER */
- __asm__ volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config & 0xFFFFFFFF));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config & 0xFFFFFFFF));
break;
default:
break;
{
uint32_t PMCR;
uint32_t event_counters;
-
+
/* read PMOVSR and determine the number of event counters */
- __asm__ volatile("mrc p15, 0, %0, c9, c12, 0;" : "=r" (PMCR));
+ __asm__ volatile ("mrc p15, 0, %0, c9, c12, 0;" : "=r" (PMCR));
event_counters = (PMCR >> 11) & 0x1F;
-
+
assert(event_counters >= KPC_ARM_CONFIGURABLE_COUNT);
}
uint32_t PMOVSR;
uint32_t mask;
uint64_t ctr;
-
- if (((1ULL << i) & pmc_mask) == 0)
+
+ if (((1ULL << i) & pmc_mask) == 0) {
continue;
+ }
ctr = read_counter(i + offset);
/* check the counter for overflow */
mask = 1 << i;
-
+
/* read PMOVSR */
- __asm__ volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR));
+ __asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR));
if (PMOVSR & mask) {
- ctr = CONFIGURABLE_SHADOW(i) +
- (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) +
- ctr;
+ ctr = CONFIGURABLE_SHADOW(i) +
+ (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) +
+ ctr;
} else {
ctr = CONFIGURABLE_SHADOW(i) +
- (ctr - CONFIGURABLE_RELOAD(i));
+ (ctr - CONFIGURABLE_RELOAD(i));
}
*counterv++ = ctr;
/* check the counter for overflow */
mask = 1 << 31;
-
+
/* read PMOVSR */
- __asm__ volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR));
+ __asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR));
ctr = read_counter(0);
if (PMOVSR & mask) {
ctr = FIXED_SHADOW(0) +
- (kpc_fixed_max() - FIXED_RELOAD(0) + 1 /* Wrap */) +
- (ctr & 0xFFFFFFFF);
+ (kpc_fixed_max() - FIXED_RELOAD(0) + 1 /* Wrap */) +
+ (ctr & 0xFFFFFFFF);
} else {
ctr = FIXED_SHADOW(0) +
- (ctr - FIXED_RELOAD(0));
+ (ctr - FIXED_RELOAD(0));
}
counterv[0] = ctr;
unsigned int cpu;
assert(mp_config);
-
+
if (first_time) {
kprintf( "kpc: setting PMI handler\n" );
PE_cpu_perfmon_interrupt_install_handler(kpc_pmi_handler);
- for (cpu = 0; cpu < real_ncpus; cpu++)
+ for (cpu = 0; cpu < real_ncpus; cpu++) {
PE_cpu_perfmon_interrupt_enable(cpu_datap(cpu)->cpu_id,
- TRUE);
+ TRUE);
+ }
first_time = 0;
}
/* dispatch to all CPUs */
cpu_broadcast_xcall(&kpc_xcall_sync, TRUE, kpc_set_running_xcall,
- mp_config);
+ mp_config);
kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask;
kpc_running_classes = mp_config->classes;
kpc_configured = 1;
-
+
return 0;
}
save_regs(void)
{
int i;
- int cpuid = current_processor()->cpu_id;
+ int cpuid = cpu_number();
uint32_t PMCR = 0;
- __asm__ volatile("dmb ish");
+ __asm__ volatile ("dmb ish");
/* Clear master enable */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
/* Save individual enable state */
- __asm__ volatile("mrc p15, 0, %0, c9, c12, 1;" : "=r" (saved_PMCNTENSET[cpuid]));
+ __asm__ volatile ("mrc p15, 0, %0, c9, c12, 1;" : "=r" (saved_PMCNTENSET[cpuid]));
/* Save PMOVSR */
- __asm__ volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (saved_PMOVSR[cpuid]));
+ __asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (saved_PMOVSR[cpuid]));
/* Select fixed counter with PMSELR.SEL */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
/* Read PMXEVTYPER */
- __asm__ volatile("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER[cpuid][0]));
+ __asm__ volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER[cpuid][0]));
/* Save configurable event selections */
for (i = 0; i < 4; i++) {
/* Select counter with PMSELR.SEL */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i));
/* Read PMXEVTYPER */
- __asm__ volatile("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER[cpuid][i + 1]));
+ __asm__ volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER[cpuid][i + 1]));
}
/* Finally, save count for each counter */
- for (i=0; i < 5; i++) {
+ for (i = 0; i < 5; i++) {
saved_counter[cpuid][i] = read_counter(i);
}
}
restore_regs(void)
{
int i;
- int cpuid = current_processor()->cpu_id;
+ int cpuid = cpu_number();
uint64_t extra;
- uint32_t PMCR = 1;
+ uint32_t PMCR = 1;
/* Restore counter values */
for (i = 0; i < 5; i++) {
/* did we overflow? if so handle it now since we won't get a pmi */
uint32_t mask;
- /* check the counter for overflow */
+ /* check the counter for overflow */
if (i == 0) {
mask = 1 << 31;
} else {
mask = 1 << (i - 1);
}
-
+
if (saved_PMOVSR[cpuid] & mask) {
extra = kpc_reload_counter(i);
- /*
+ /*
* CONFIGURABLE_* directly follows FIXED, so we can simply
* increment the index here. Although it's ugly.
*/
FIXED_SHADOW(i)
- += (kpc_fixed_max() - FIXED_RELOAD(i) + 1 /* Wrap */) + extra;
-
- if (FIXED_ACTIONID(i))
- kpc_sample_kperf(FIXED_ACTIONID(i));
+ += (kpc_fixed_max() - FIXED_RELOAD(i) + 1 /* Wrap */) + extra;
+
+ if (FIXED_ACTIONID(i)) {
+ bool kernel = false;
+ uintptr_t pc = get_interrupted_pc(&kernel);
+ kpc_sample_kperf(FIXED_ACTIONID(i), i, get_counter_config(i),
+ FIXED_SHADOW(i), pc, kernel ? KPC_KERNEL_PC : 0);
+ }
} else {
write_counter(i, saved_counter[cpuid][i]);
}
}
/* Restore configuration - first, the fixed... */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
/* Write PMXEVTYPER */
- __asm__ volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER[cpuid][0]));
-
+ __asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER[cpuid][0]));
+
/* ...then the configurable */
for (i = 0; i < 4; i++) {
/* Select counter with PMSELR.SEL */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i));
/* Write PMXEVTYPER */
- __asm__ volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER[cpuid][i + 1]));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER[cpuid][i + 1]));
}
/* Restore enable state */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 1;" : : "r" (saved_PMCNTENSET[cpuid]));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 1;" : : "r" (saved_PMCNTENSET[cpuid]));
/* Counter master re-enable */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
}
static void
/* set the new period */
count = kpc_fixed_count();
for (uint32_t i = 0; i < count; ++i) {
- if (*new_period == 0)
+ if (*new_period == 0) {
*new_period = kpc_fixed_max();
+ }
FIXED_RELOAD(i) = max - *new_period;
/* reload the counter if possible */
kpc_reload_counter(i);
count = kpc_configurable_count();
for (uint32_t i = 0; i < count; ++i) {
/* ignore the counter */
- if (((1ULL << i) & mp_config->pmc_mask) == 0)
+ if (((1ULL << i) & mp_config->pmc_mask) == 0) {
continue;
- if (*new_period == 0)
+ }
+ if (*new_period == 0) {
*new_period = kpc_configurable_max();
+ }
CONFIGURABLE_RELOAD(i) = max - *new_period;
/* reload the counter */
kpc_reload_counter(offset + i);
ml_set_interrupts_enabled(enabled);
- if (hw_atomic_sub(&kpc_reload_sync, 1) == 0)
+ if (os_atomic_dec(&kpc_reload_sync, relaxed) == 0) {
thread_wakeup((event_t) &kpc_reload_sync);
+ }
}
kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
{
uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
-
+
assert(configv);
- for (uint32_t i = 0; i < cfg_count; ++i)
- if ((1ULL << i) & pmc_mask)
+ for (uint32_t i = 0; i < cfg_count; ++i) {
+ if ((1ULL << i) & pmc_mask) {
*configv++ = get_counter_config(i + offset);
+ }
+ }
return 0;
}
enabled = ml_set_interrupts_enabled(FALSE);
for (uint32_t i = 0; i < cfg_count; ++i) {
- if (((1ULL << i) & pmc_mask) == 0)
+ if (((1ULL << i) & pmc_mask) == 0) {
continue;
+ }
assert(kpc_controls_counter(i + offset));
set_counter_config(i + offset, *configv++);
new_config += kpc_popcount(mp_config->pmc_mask);
}
- if (hw_atomic_sub(&kpc_config_sync, 1) == 0)
+ if (os_atomic_dec(&kpc_config_sync, relaxed) == 0) {
thread_wakeup((event_t) &kpc_config_sync);
+ }
}
int
kpc_set_config_arch(struct kpc_config_remote *mp_config)
-{
+{
/* dispatch to all CPUs */
cpu_broadcast_xcall(&kpc_config_sync, TRUE, kpc_set_config_xcall, mp_config);
return 0;
}
-void
+void
kpc_idle(void)
{
- if (kpc_configured)
+ if (kpc_configured) {
save_regs();
+ }
}
-void
-kpc_idle_exit(void)
+void
+kpc_idle_exit(void)
{
- if (kpc_configured)
+ if (kpc_configured) {
restore_regs();
+ }
}
static uint32_t kpc_xread_sync;
kpc_get_curcpu_counters_xcall(void *args)
{
struct kpc_get_counters_remote *handler = args;
- int offset=0, r=0;
+ int offset = 0, r = 0;
assert(handler);
assert(handler->buf);
r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]);
/* number of counters added by this CPU, needs to be atomic */
- hw_atomic_add(&(handler->nb_counters), r);
+ os_atomic_add(&(handler->nb_counters), r, relaxed);
- if (hw_atomic_sub(&kpc_xread_sync, 1) == 0)
+ if (os_atomic_dec(&kpc_xread_sync, relaxed) == 0) {
thread_wakeup((event_t) &kpc_xread_sync);
+ }
}
int
enabled = ml_set_interrupts_enabled(FALSE);
- if (curcpu)
- *curcpu = current_processor()->cpu_id;
+ if (curcpu) {
+ *curcpu = cpu_number();
+ }
cpu_broadcast_xcall(&kpc_xread_sync, TRUE, kpc_get_curcpu_counters_xcall, &hdl);
ml_set_interrupts_enabled(enabled);
int
kpc_set_sw_inc( uint32_t mask )
-{
+{
/* Only works with the configurable counters set to count the increment event (0x0) */
/* Write to PMSWINC */
- __asm__ volatile("mcr p15, 0, %0, c9, c12, 4;" : : "r" (mask));
-
+ __asm__ volatile ("mcr p15, 0, %0, c9, c12, 4;" : : "r" (mask));
+
return 0;
}
return ENOTSUP;
}
-void
+void
kpc_idle(void)
{
// do nothing
}
-void
-kpc_idle_exit(void)
+void
+kpc_idle_exit(void)
{
// do nothing
}