*
* Implements the "wrappers" to the KEXT.
*/
-#include <kern/machine.h>
-#include <i386/machine_routines.h>
-#include <i386/machine_cpu.h>
-#include <i386/misc_protos.h>
-#include <i386/pmap.h>
#include <i386/asm.h>
+#include <i386/machine_cpu.h>
#include <i386/mp.h>
+#include <i386/machine_routines.h>
#include <i386/proc_reg.h>
+#include <i386/pmap.h>
+#include <i386/misc_protos.h>
+#include <kern/machine.h>
#include <kern/pms.h>
#include <kern/processor.h>
+#include <kern/etimer.h>
#include <i386/cpu_threads.h>
#include <i386/pmCPU.h>
#include <i386/cpuid.h>
#include <i386/rtclock.h>
#include <kern/sched_prim.h>
+#include <i386/lapic.h>
/*
* Kernel parameter determining whether threads are halted unconditionally
goto out;
my_cpu->lcpu.state = LCPU_IDLE;
- my_cpu->lcpu.flags |= X86CORE_FL_IDLE;
DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
MARK_CPU_IDLE(cpu_number());
if (pmInitDone
&& pmDispatch != NULL
- && pmDispatch->cstateMachineIdle != NULL)
- (*pmDispatch->cstateMachineIdle)(0x7FFFFFFFFFFFFFFFULL);
+ && pmDispatch->MachineIdle != NULL)
+ (*pmDispatch->MachineIdle)(0x7FFFFFFFFFFFFFFFULL);
else {
/*
* If no power management, re-enable interrupts and halt.
*/
MARK_CPU_ACTIVE(cpu_number());
DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
- my_cpu->lcpu.flags &= ~(X86CORE_FL_IDLE | X86CORE_FL_WAKEUP);
my_cpu->lcpu.state = LCPU_RUN;
/*
default:
__asm__ volatile ("cli");
- if (pmInitDone
+ if (pmInitDone
&& pmDispatch != NULL
&& pmDispatch->pmCPUHalt != NULL) {
/*
{
uint64_t deadline = EndOfAllTime;
- if (pmInitDone
+ if (pmInitDone
&& pmDispatch != NULL
&& pmDispatch->GetDeadline != NULL)
deadline = (*pmDispatch->GetDeadline)(&cpu->lcpu);
uint64_t
pmCPUSetDeadline(cpu_data_t *cpu, uint64_t deadline)
{
- if (pmInitDone
+ if (pmInitDone
&& pmDispatch != NULL
&& pmDispatch->SetDeadline != NULL)
deadline = (*pmDispatch->SetDeadline)(&cpu->lcpu, deadline);
{
boolean_t do_ipi;
- cpu->lcpu.flags |= X86CORE_FL_WAKEUP;
if (pmInitDone
&& pmDispatch != NULL
&& pmDispatch->exitIdle != NULL)
else
do_ipi = TRUE;
- if (do_ipi)
- cpu->lcpu.flags &= ~X86CORE_FL_WAKEUP;
-
return(do_ipi);
}
}
boolean_t
-machine_cpu_is_inactive(int cpu)
+machine_processor_is_inactive(processor_t processor)
{
+ int cpu = processor->cpu_id;
+
if (pmDispatch != NULL
&& pmDispatch->pmIsCPUUnAvailable != NULL)
return(pmDispatch->pmIsCPUUnAvailable(cpu_to_lcpu(cpu)));
return(FALSE);
}
+processor_t
+machine_choose_processor(processor_set_t pset,
+ processor_t preferred)
+{
+ int startCPU;
+ int endCPU;
+ int preferredCPU;
+ int chosenCPU;
+
+ if (!pmInitDone)
+ return(preferred);
+
+ if (pset == NULL) {
+ startCPU = -1;
+ endCPU = -1;
+ } else {
+ startCPU = pset->cpu_set_low;
+ endCPU = pset->cpu_set_hi;
+ }
+
+ if (preferred == NULL)
+ preferredCPU = -1;
+ else
+ preferredCPU = preferred->cpu_id;
+
+ if (pmDispatch != NULL
+ && pmDispatch->pmChooseCPU != NULL) {
+ chosenCPU = pmDispatch->pmChooseCPU(startCPU, endCPU, preferredCPU);
+
+ if (chosenCPU == -1)
+ return(NULL);
+ return(cpu_datap(chosenCPU)->cpu_processor);
+ }
+
+ return(preferred);
+}
+
static uint32_t
pmGetSavedRunCount(void)
{
cpu_PM_interrupt(cpu);
}
+static void
+pmSendIPI(int cpu)
+{
+ lapic_send_ipi(cpu, LAPIC_PM_INTERRUPT);
+}
+
+static rtc_nanotime_t *
+pmGetNanotimeInfo(void)
+{
+ return(&rtc_nanotime_info);
+}
+
/*
* Called by the power management kext to register itself and to get the
* callbacks it might need into other kernel functions. This interface
callbacks->LCPUtoProcessor = pmLCPUtoProcessor;
callbacks->ThreadBind = thread_bind;
callbacks->GetSavedRunCount = pmGetSavedRunCount;
+ callbacks->pmSendIPI = pmSendIPI;
+ callbacks->GetNanotimeInfo = pmGetNanotimeInfo;
callbacks->topoParms = &topoParms;
} else {
panic("Version mis-match between Kernel and CPU PM");
if (cpuFuncs != NULL) {
pmDispatch = cpuFuncs;
+
+ if (pmDispatch->pmIPIHandler != NULL) {
+ lapic_set_pm_func((i386_intr_func_t)pmDispatch->pmIPIHandler);
+ }
}
}