#include <kern/sched_prim.h>
#include <i386/lapic.h>
#include <i386/pal_routines.h>
-
#include <sys/kdebug.h>
extern int disableConsoleOutput;
#define DELAY_UNSET 0xFFFFFFFFFFFFFFFFULL
+uint64_t cpu_itime_bins[CPU_ITIME_BINS] = {16* NSEC_PER_USEC, 32* NSEC_PER_USEC, 64* NSEC_PER_USEC, 128* NSEC_PER_USEC, 256* NSEC_PER_USEC, 512* NSEC_PER_USEC, 1024* NSEC_PER_USEC, 2048* NSEC_PER_USEC, 4096* NSEC_PER_USEC, 8192* NSEC_PER_USEC, 16384* NSEC_PER_USEC, 32768* NSEC_PER_USEC};
+uint64_t *cpu_rtime_bins = &cpu_itime_bins[0];
+
/*
* The following is set when the KEXT loads and initializes.
*/
pmDispatch_t *pmDispatch = NULL;
-static uint32_t pmInitDone = 0;
+uint32_t pmInitDone = 0;
static boolean_t earlyTopology = FALSE;
static uint64_t earlyMaxBusDelay = DELAY_UNSET;
static uint64_t earlyMaxIntDelay = DELAY_UNSET;
(*pmDispatch->cstateInit)();
}
-#define CPU_ACTIVE_STAT_BIN_1 (500000)
-#define CPU_ACTIVE_STAT_BIN_2 (2000000)
-#define CPU_ACTIVE_STAT_BIN_3 (5000000)
-
-#define CPU_IDLE_STAT_BIN_1 (500000)
-#define CPU_IDLE_STAT_BIN_2 (2000000)
-#define CPU_IDLE_STAT_BIN_3 (5000000)
+static inline void machine_classify_interval(uint64_t interval, uint64_t *bins, uint64_t *binvals, uint32_t nbins) {
+ uint32_t i;
+ for (i = 0; i < nbins; i++) {
+ if (interval < binvals[i]) {
+ bins[i]++;
+ break;
+ }
+ }
+}
/*
* Called when the CPU is idle. It calls into the power management kext
void
machine_idle(void)
{
- cpu_data_t *my_cpu = current_cpu_datap();
- uint64_t ctime, rtime, itime;
+ cpu_data_t *my_cpu = current_cpu_datap();
+ uint64_t ctime, rtime, itime;
- if (my_cpu == NULL)
- goto out;
+ if (my_cpu == NULL)
+ goto out;
ctime = mach_absolute_time();
- my_cpu->lcpu.state = LCPU_IDLE;
- DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
- MARK_CPU_IDLE(cpu_number());
+ my_cpu->lcpu.state = LCPU_IDLE;
+ DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
+ MARK_CPU_IDLE(cpu_number());
rtime = ctime - my_cpu->cpu_ixtime;
my_cpu->cpu_rtime_total += rtime;
+ machine_classify_interval(rtime, &my_cpu->cpu_rtimes[0], &cpu_rtime_bins[0], CPU_RTIME_BINS);
+
+ if (pmInitDone) {
+ /*
+ * Handle case where ml_set_maxbusdelay() or ml_set_maxintdelay()
+ * were called prior to the CPU PM kext being registered. We do
+ * this here since we know at this point the values will be first
+ * used since idle is where the decisions using these values is made.
+ */
+ if (earlyMaxBusDelay != DELAY_UNSET)
+ ml_set_maxbusdelay((uint32_t)(earlyMaxBusDelay & 0xFFFFFFFF));
+
+ if (earlyMaxIntDelay != DELAY_UNSET)
+ ml_set_maxintdelay(earlyMaxIntDelay);
+ }
- if (rtime < CPU_ACTIVE_STAT_BIN_1)
- my_cpu->cpu_rtimes[0]++;
- else if (rtime < CPU_ACTIVE_STAT_BIN_2)
- my_cpu->cpu_rtimes[1]++;
- else if (rtime < CPU_ACTIVE_STAT_BIN_3)
- my_cpu->cpu_rtimes[2]++;
- else
- my_cpu->cpu_rtimes[3]++;
-
+ if (pmInitDone
+ && pmDispatch != NULL
+ && pmDispatch->MachineIdle != NULL)
+ (*pmDispatch->MachineIdle)(0x7FFFFFFFFFFFFFFFULL);
+ else {
+ /*
+ * If no power management, re-enable interrupts and halt.
+ * This will keep the CPU from spinning through the scheduler
+ * and will allow at least some minimal power savings (but it
+ * cause problems in some MP configurations w.r.t. the APIC
+ * stopping during a GV3 transition).
+ */
+ pal_hlt();
+
+ /* Once woken, re-disable interrupts. */
+ pal_cli();
+ }
- if (pmInitDone) {
/*
- * Handle case where ml_set_maxbusdelay() or ml_set_maxintdelay()
- * were called prior to the CPU PM kext being registered. We do
- * this here since we know at this point the values will be first
- * used since idle is where the decisions using these values is made.
+ * Mark the CPU as running again.
*/
- if (earlyMaxBusDelay != DELAY_UNSET)
- ml_set_maxbusdelay((uint32_t)(earlyMaxBusDelay & 0xFFFFFFFF));
-
- if (earlyMaxIntDelay != DELAY_UNSET)
- ml_set_maxintdelay(earlyMaxIntDelay);
- }
-
- if (pmInitDone
- && pmDispatch != NULL
- && pmDispatch->MachineIdle != NULL)
- (*pmDispatch->MachineIdle)(0x7FFFFFFFFFFFFFFFULL);
- else {
- /*
- * If no power management, re-enable interrupts and halt.
- * This will keep the CPU from spinning through the scheduler
- * and will allow at least some minimal power savings (but it
- * cause problems in some MP configurations w.r.t. the APIC
- * stopping during a GV3 transition).
- */
- pal_hlt();
-
- /* Once woken, re-disable interrupts. */
- pal_cli();
- }
-
- /*
- * Mark the CPU as running again.
- */
- MARK_CPU_ACTIVE(cpu_number());
- DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
+ MARK_CPU_ACTIVE(cpu_number());
+ DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
uint64_t ixtime = my_cpu->cpu_ixtime = mach_absolute_time();
- itime = ixtime - ctime;
+ my_cpu->cpu_idle_exits++;
- my_cpu->lcpu.state = LCPU_RUN;
+ itime = ixtime - ctime;
- if (itime < CPU_IDLE_STAT_BIN_1)
- my_cpu->cpu_itimes[0]++;
- else if (itime < CPU_IDLE_STAT_BIN_2)
- my_cpu->cpu_itimes[1]++;
- else if (itime < CPU_IDLE_STAT_BIN_3)
- my_cpu->cpu_itimes[2]++;
- else
- my_cpu->cpu_itimes[3]++;
+ my_cpu->lcpu.state = LCPU_RUN;
+ machine_classify_interval(itime, &my_cpu->cpu_itimes[0], &cpu_itime_bins[0], CPU_ITIME_BINS);
my_cpu->cpu_itime_total += itime;
- /*
- * Re-enable interrupts.
- */
- out:
- pal_sti();
+ /*
+ * Re-enable interrupts.
+ */
+out:
+ pal_sti();
}
/*
break;
case PM_HALT_NORMAL:
+ case PM_HALT_SLEEP:
default:
pal_cli();
(*pmDispatch->pmCPUHalt)();
/*
- * We've exited halt, so get the the CPU schedulable again.
+ * We've exited halt, so get the CPU schedulable again.
+ * - by calling the fast init routine for a slave, or
+ * - by returning if we're the master processor.
*/
- i386_init_slave_fast();
-
- panic("init_slave_fast returned");
+ if (cpup->cpu_number != master_cpu) {
+ i386_init_slave_fast();
+ panic("init_slave_fast returned");
+ }
} else
{
/*
pmInitDone = 1;
}
-static x86_lcpu_t *
+x86_lcpu_t *
pmGetLogicalCPU(int cpu)
{
return(cpu_to_lcpu(cpu));
}
-static x86_lcpu_t *
+x86_lcpu_t *
pmGetMyLogicalCPU(void)
{
cpu_data_t *cpup = current_cpu_datap();
/*
* Returns the root of the package tree.
*/
-static x86_pkg_t *
+x86_pkg_t *
pmGetPkgRoot(void)
{
return(x86_pkgs);
return(cpu_datap(cpu)->cpu_hibernate);
}
-static processor_t
+processor_t
pmLCPUtoProcessor(int lcpu)
{
return(cpu_datap(lcpu)->cpu_processor);
&& rtc_nanotime->generation != pal_rtc_nanotime_info.generation);
}
-static uint32_t
+uint32_t
pmTimerQueueMigrate(int target_cpu)
{
/* Call the etimer code to do this. */
}
if (cpuFuncs != NULL) {
+ if (pmDispatch) {
+ panic("Attempt to re-register power management interface--AICPM present in xcpm mode? %p->%p", pmDispatch, cpuFuncs);
+ }
+
pmDispatch = cpuFuncs;
if (earlyTopology
if (entry) {
(void)__sync_fetch_and_add(&my_cpu->lcpu.package->num_idle, 1);
}
- else {
- (void)__sync_fetch_and_sub(&my_cpu->lcpu.package->num_idle, 1);
- }
+ else {
+ uint32_t nidle = __sync_fetch_and_sub(&my_cpu->lcpu.package->num_idle, 1);
+ if (nidle == topoParms.nLThreadsPerPackage) {
+ my_cpu->lcpu.package->package_idle_exits++;
+ }
+ }
}