#include <mach/i386/syscall_sw.h>
#include <kern/kalloc.h>
#include <sys/kdebug.h>
-
#include <i386/machine_cpu.h>
#include <i386/misc_protos.h>
#include <i386/cpuid.h>
+#if MONOTONIC
+#include <kern/monotonic.h>
+#endif /* MONOTONIC */
+
#define PERMIT_PERMCHECK (0)
diagWork dgWork;
uint64_t lastRuptClear = 0ULL;
-
+boolean_t diag_pmc_enabled = FALSE;
void cpu_powerstats(void *);
typedef struct {
uint64_t cpu_insns;
uint64_t cpu_ucc;
uint64_t cpu_urc;
+#if DIAG_ALL_PMCS
+ uint64_t gpmcs[4];
+#endif /* DIAG_ALL_PMCS */
} core_energy_stat_t;
typedef struct {
+ uint64_t pkes_version;
uint64_t pkg_cres[2][7];
uint64_t pkg_power_unit;
uint64_t pkg_energy;
assert(is_saved_state64(state));
regs = saved_state64(state);
+
diagflag = ((dgWork.dgFlags & enaDiagSCs) != 0);
selector = regs->rdi;
lastRuptClear = mach_absolute_time(); /* Get the time of clear */
rval = 1; /* Normal return */
+ (void) ml_set_interrupts_enabled(FALSE);
break;
}
(void) copyout((char *) &real_ncpus, data, sizeof(real_ncpus)); /* Copy out number of
* processors */
-
currNap = mach_absolute_time(); /* Get the time now */
durNap = currNap - lastRuptClear; /* Get the last interval
* duration */
* slot */
}
rval = 1;
+ (void) ml_set_interrupts_enabled(FALSE);
break;
+
case dgPowerStat:
{
uint32_t c2l = 0, c2h = 0, c3l = 0, c3h = 0, c6l = 0, c6h = 0, c7l = 0, c7h = 0;
bzero(&pkes, sizeof(pkes));
bzero(&cest, sizeof(cest));
+ pkes.pkes_version = 1ULL;
rdmsr_carefully(MSR_IA32_PKG_C2_RESIDENCY, &c2l, &c2h);
rdmsr_carefully(MSR_IA32_PKG_C3_RESIDENCY, &c3l, &c3h);
rdmsr_carefully(MSR_IA32_PKG_C6_RESIDENCY, &c6l, &c6h);
pkes.pkg_cres[0][2] = ((uint64_t)c6h << 32) | c6l;
pkes.pkg_cres[0][3] = ((uint64_t)c7h << 32) | c7l;
- uint32_t cpumodel = cpuid_info()->cpuid_model;
- boolean_t c8avail;
- switch (cpumodel) {
- case CPUID_MODEL_HASWELL_ULT:
- c8avail = TRUE;
- break;
- default:
- c8avail = FALSE;
- break;
- }
uint64_t c8r = ~0ULL, c9r = ~0ULL, c10r = ~0ULL;
- if (c8avail) {
- rdmsr64_carefully(MSR_IA32_PKG_C8_RESIDENCY, &c8r);
- rdmsr64_carefully(MSR_IA32_PKG_C9_RESIDENCY, &c9r);
- rdmsr64_carefully(MSR_IA32_PKG_C10_RESIDENCY, &c10r);
- }
+ rdmsr64_carefully(MSR_IA32_PKG_C8_RESIDENCY, &c8r);
+ rdmsr64_carefully(MSR_IA32_PKG_C9_RESIDENCY, &c9r);
+ rdmsr64_carefully(MSR_IA32_PKG_C10_RESIDENCY, &c10r);
pkes.pkg_cres[0][4] = c8r;
pkes.pkg_cres[0][5] = c9r;
rdmsr64_carefully(MSR_IA32_RING_PERF_STATUS, &pkes.ring_ratio_instantaneous);
pkes.IA_frequency_clipping_cause = ~0ULL;
- rdmsr64_carefully(MSR_IA32_IA_PERF_LIMIT_REASONS, &pkes.IA_frequency_clipping_cause);
+
+ uint32_t ia_perf_limits = MSR_IA32_IA_PERF_LIMIT_REASONS;
+ /* Should perhaps be a generic register map module for these
+ * registers with identical functionality that were renumbered.
+ */
+ switch (cpuid_cpufamily()) {
+ case CPUFAMILY_INTEL_SKYLAKE:
+ case CPUFAMILY_INTEL_KABYLAKE:
+ ia_perf_limits = MSR_IA32_IA_PERF_LIMIT_REASONS_SKL;
+ break;
+ default:
+ break;
+ }
+
+ rdmsr64_carefully(ia_perf_limits, &pkes.IA_frequency_clipping_cause);
pkes.GT_frequency_clipping_cause = ~0ULL;
rdmsr64_carefully(MSR_IA32_GT_PERF_LIMIT_REASONS, &pkes.GT_frequency_clipping_cause);
cest.citime_total = cpu_data_ptr[i]->cpu_itime_total;
cest.crtime_total = cpu_data_ptr[i]->cpu_rtime_total;
- cest.cpu_idle_exits = cpu_data_ptr[i]->cpu_idle_exits;
- cest.cpu_insns = cpu_data_ptr[i]->cpu_cur_insns;
- cest.cpu_ucc = cpu_data_ptr[i]->cpu_cur_ucc;
- cest.cpu_urc = cpu_data_ptr[i]->cpu_cur_urc;
- (void) ml_set_interrupts_enabled(TRUE);
+ cest.cpu_idle_exits = cpu_data_ptr[i]->cpu_idle_exits;
+#if MONOTONIC
+ cest.cpu_insns = cpu_data_ptr[i]->cpu_monotonic.mtc_counts[MT_CORE_INSTRS];
+ cest.cpu_ucc = cpu_data_ptr[i]->cpu_monotonic.mtc_counts[MT_CORE_CYCLES];
+ cest.cpu_urc = cpu_data_ptr[i]->cpu_monotonic.mtc_counts[MT_CORE_REFCYCLES];
+#endif /* MONOTONIC */
+#if DIAG_ALL_PMCS
+ bcopy(&cpu_data_ptr[i]->cpu_gpmcs[0], &cest.gpmcs[0], sizeof(cest.gpmcs));
+#endif /* DIAG_ALL_PMCS */
+ (void) ml_set_interrupts_enabled(TRUE);
copyout(&cest, curpos, sizeof(cest));
curpos += sizeof(cest);
}
rval = 1;
+ (void) ml_set_interrupts_enabled(FALSE);
}
break;
case dgEnaPMC:
{
boolean_t enable = TRUE;
- mp_cpus_call(CPUMASK_ALL, ASYNC, cpu_pmc_control, &enable);
+ uint32_t cpuinfo[4];
+ /* Require architectural PMC v2 or higher, corresponding to
+ * Merom+, or equivalent virtualised facility.
+ */
+ do_cpuid(0xA, &cpuinfo[0]);
+ if ((cpuinfo[0] & 0xFF) >= 2) {
+ mp_cpus_call(CPUMASK_ALL, ASYNC, cpu_pmc_control, &enable);
+ diag_pmc_enabled = TRUE;
+ }
rval = 1;
}
break;
-
-#if DEBUG
+#if DEVELOPMENT || DEBUG
case dgGzallocTest:
{
(void) ml_set_interrupts_enabled(TRUE);
- if (diagflag == 0)
- break;
-
- unsigned *ptr = (unsigned *)kalloc(1024);
- kfree(ptr, 1024);
- *ptr = 0x42;
+ if (diagflag) {
+ unsigned *ptr = (unsigned *)kalloc(1024);
+ kfree(ptr, 1024);
+ *ptr = 0x42;
+ }
+ (void) ml_set_interrupts_enabled(FALSE);
}
break;
#endif
-#if PERMIT_PERMCHECK
+#if DEVELOPMENT || DEBUG
case dgPermCheck:
{
(void) ml_set_interrupts_enabled(TRUE);
- if (diagflag == 0)
- break;
-
- rval = pmap_permissions_verify(kernel_pmap, kernel_map, 0, ~0ULL);
+ if (diagflag)
+ rval = pmap_permissions_verify(kernel_pmap, kernel_map, 0, ~0ULL);
+ (void) ml_set_interrupts_enabled(FALSE);
}
break;
-#endif /* PERMIT_PERMCHECK */
-
+#endif /* DEVELOPMENT || DEBUG */
default: /* Handle invalid ones */
rval = 0; /* Return an exception */
}
regs->rax = rval;
- return rval; /* Normal non-ast check return */
+ assert(ml_get_interrupts_enabled() == FALSE);
+ return rval;
}
void cpu_powerstats(__unused void *arg) {
rdmsr_carefully(MSR_IA32_CORE_C7_RESIDENCY, &cl, &ch);
cdp->cpu_c7res = ((uint64_t)ch << 32) | cl;
-
- uint64_t insns = read_pmc(FIXED_PMC0);
- uint64_t ucc = read_pmc(FIXED_PMC1);
- uint64_t urc = read_pmc(FIXED_PMC2);
- cdp->cpu_cur_insns = insns;
- cdp->cpu_cur_ucc = ucc;
- cdp->cpu_cur_urc = urc;
+
+ if (diag_pmc_enabled) {
+#if MONOTONIC
+ mt_update_fixed_counts();
+#else /* MONOTONIC */
+ uint64_t insns = read_pmc(FIXED_PMC0);
+ uint64_t ucc = read_pmc(FIXED_PMC1);
+ uint64_t urc = read_pmc(FIXED_PMC2);
+#endif /* !MONOTONIC */
+#if DIAG_ALL_PMCS
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ cdp->cpu_gpmcs[i] = read_pmc(i);
+ }
+#endif /* DIAG_ALL_PMCS */
+#if !MONOTONIC
+ cdp->cpu_cur_insns = insns;
+ cdp->cpu_cur_ucc = ucc;
+ cdp->cpu_cur_urc = urc;
+#endif /* !MONOTONIC */
+ }
}
void cpu_pmc_control(void *enablep) {
+#if !MONOTONIC
boolean_t enable = *(boolean_t *)enablep;
cpu_data_t *cdp = current_cpu_datap();
set_cr4((get_cr4() & ~CR4_PCE));
}
cdp->cpu_fixed_pmcs_enabled = enable;
+#else /* !MONOTONIC */
+#pragma unused(enablep)
+#endif /* MONOTONIC */
}