X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/4452a7af2eac33dbad800bcc91f2399d62c18f53..0b4c1975fb5e4eccf1012a35081f7e7799b81046:/osfmk/i386/pmCPU.c diff --git a/osfmk/i386/pmCPU.c b/osfmk/i386/pmCPU.c index 63108e188..91e3799bb 100644 --- a/osfmk/i386/pmCPU.c +++ b/osfmk/i386/pmCPU.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2004-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -31,28 +31,23 @@ * * Implements the "wrappers" to the KEXT. */ -#include -#include -#include -#include #include +#include #include +#include #include +#include +#include +#include #include #include +#include +#include #include #include #include -#if MACH_KDB -#include -#include -#include -#include -#include -#include -#include -#include -#endif +#include +#include extern int disableConsoleOutput; @@ -63,332 +58,642 @@ decl_simple_lock_data(,pm_init_lock); */ pmDispatch_t *pmDispatch = NULL; +static uint32_t pmInitDone = 0; + + /* - * Current power management states (for use until KEXT is loaded). + * Initialize the Cstate change code. */ -static pmInitState_t pmInitState; +void +power_management_init(void) +{ + static boolean_t initialized = FALSE; + + /* + * Initialize the lock for the KEXT initialization. + */ + if (!initialized) { + simple_lock_init(&pm_init_lock, 0); + initialized = TRUE; + } + + if (pmDispatch != NULL && pmDispatch->cstateInit != NULL) + (*pmDispatch->cstateInit)(); +} /* - * Nap control variables: + * Called when the CPU is idle. It calls into the power management kext + * to determine the best way to idle the CPU. */ -uint32_t napCtl = 0; /* Defaults to neither napping - nor halting */ -uint32_t forcenap = 0; /* Force nap (fn) boot-arg controls */ -uint32_t maxBusDelay = 0xFFFFFFFF; /* Maximum memory bus delay that - I/O devices can tolerate - before errors (nanoseconds) */ -uint32_t C4C2SnoopDelay = 0; /* C4 to C2 transition time - - time before a C4 system - can snoop (nanoseconds) */ +void +machine_idle(void) +{ + cpu_data_t *my_cpu = current_cpu_datap(); + + if (my_cpu == NULL) + goto out; + + my_cpu->lcpu.state = LCPU_IDLE; + DBGLOG(cpu_handle, cpu_number(), MP_IDLE); + MARK_CPU_IDLE(cpu_number()); + + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->MachineIdle != NULL) + (*pmDispatch->MachineIdle)(0x7FFFFFFFFFFFFFFFULL); + else { + /* + * If no power management, re-enable interrupts and halt. + * This will keep the CPU from spinning through the scheduler + * and will allow at least some minimal power savings (but it + * cause problems in some MP configurations w.r.t. the APIC + * stopping during a GV3 transition). + */ + __asm__ volatile ("sti; hlt"); + } + + /* + * Mark the CPU as running again. + */ + MARK_CPU_ACTIVE(cpu_number()); + DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE); + my_cpu->lcpu.state = LCPU_RUN; + + /* + * Re-enable interrupts. + */ + out: + __asm__ volatile("sti"); +} /* - * We are being asked to set PState (sel). + * Called when the CPU is to be halted. It will choose the best C-State + * to be in. */ void -pmsCPUSet(uint32_t sel) +pmCPUHalt(uint32_t reason) { - if (pmDispatch != NULL && pmDispatch->pmsCPUSet != NULL) - (*pmDispatch->pmsCPUSet)(sel); - else - pmInitState.PState = sel; + cpu_data_t *cpup = current_cpu_datap(); + + switch (reason) { + case PM_HALT_DEBUG: + cpup->lcpu.state = LCPU_PAUSE; + __asm__ volatile ("wbinvd; hlt"); + break; + + case PM_HALT_PANIC: + cpup->lcpu.state = LCPU_PAUSE; + __asm__ volatile ("cli; wbinvd; hlt"); + break; + + case PM_HALT_NORMAL: + default: + __asm__ volatile ("cli"); + + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->pmCPUHalt != NULL) { + /* + * Halt the CPU (and put it in a low power state. + */ + (*pmDispatch->pmCPUHalt)(); + + /* + * We've exited halt, so get the the CPU schedulable again. + */ + i386_init_slave_fast(); + + panic("init_slave_fast returned"); + } else { + /* + * If no power managment and a processor is taken off-line, + * then invalidate the cache and halt it (it will not be able + * to be brought back on-line without resetting the CPU). + */ + __asm__ volatile ("wbinvd"); + cpup->lcpu.state = LCPU_HALT; + __asm__ volatile ( "wbinvd; hlt" ); + + panic("back from Halt"); + } + break; + } +} + +void +pmMarkAllCPUsOff(void) +{ + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->markAllCPUsOff != NULL) + (*pmDispatch->markAllCPUsOff)(); +} + +static void +pmInitComplete(void) +{ + pmInitDone = 1; +} + +static x86_lcpu_t * +pmGetLogicalCPU(int cpu) +{ + return(cpu_to_lcpu(cpu)); +} + +static x86_lcpu_t * +pmGetMyLogicalCPU(void) +{ + cpu_data_t *cpup = current_cpu_datap(); + + return(&cpup->lcpu); +} + +static x86_core_t * +pmGetCore(int cpu) +{ + return(cpu_to_core(cpu)); +} + +static x86_core_t * +pmGetMyCore(void) +{ + cpu_data_t *cpup = current_cpu_datap(); + + return(cpup->lcpu.core); +} + +static x86_die_t * +pmGetDie(int cpu) +{ + return(cpu_to_die(cpu)); +} + +static x86_die_t * +pmGetMyDie(void) +{ + cpu_data_t *cpup = current_cpu_datap(); + + return(cpup->lcpu.die); +} + +static x86_pkg_t * +pmGetPackage(int cpu) +{ + return(cpu_to_package(cpu)); +} + +static x86_pkg_t * +pmGetMyPackage(void) +{ + cpu_data_t *cpup = current_cpu_datap(); + + return(cpup->lcpu.package); +} + +static void +pmLockCPUTopology(int lock) +{ + if (lock) { + simple_lock(&x86_topo_lock); + } else { + simple_unlock(&x86_topo_lock); + } } /* - * This code configures the initial step tables. It should be called after - * the timebase frequency is initialized. - * - * Note that this is not used in normal operation. It is strictly for - * debugging/testing purposes. + * Called to get the next deadline that has been set by the + * power management code. */ -void -pmsCPUConf(void) +uint64_t +pmCPUGetDeadline(cpu_data_t *cpu) { + uint64_t deadline = EndOfAllTime; - if (pmDispatch != NULL && pmDispatch->pmsCPUConf != NULL) - (*pmDispatch->pmsCPUConf)(); + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->GetDeadline != NULL) + deadline = (*pmDispatch->GetDeadline)(&cpu->lcpu); + + return(deadline); } /* - * Machine-dependent initialization. + * Called to determine if the supplied deadline or the power management + * deadline is sooner. Returns which ever one is first. */ -void -pmsCPUMachineInit(void) +uint64_t +pmCPUSetDeadline(cpu_data_t *cpu, uint64_t deadline) { - /* - * Initialize some of the initial state to "uninitialized" until - * it gets set with something more useful. This allows the KEXT - * to determine if the initial value was actually set to something. - */ - pmInitState.PState = -1; - pmInitState.PLimit = -1; + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->SetDeadline != NULL) + deadline = (*pmDispatch->SetDeadline)(&cpu->lcpu, deadline); - if (pmDispatch != NULL && pmDispatch->pmsCPUMachineInit != NULL) - (*pmDispatch->pmsCPUMachineInit)(); + return(deadline); } /* - * This function should be called once for each processor to force the - * processor to the correct initial voltage and frequency. + * Called when a power management deadline expires. */ void -pmsCPUInit(void) +pmCPUDeadline(cpu_data_t *cpu) +{ + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->Deadline != NULL) + (*pmDispatch->Deadline)(&cpu->lcpu); +} + +/* + * Called to get a CPU out of idle. + */ +boolean_t +pmCPUExitIdle(cpu_data_t *cpu) +{ + boolean_t do_ipi; + + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->exitIdle != NULL) + do_ipi = (*pmDispatch->exitIdle)(&cpu->lcpu); + else + do_ipi = TRUE; + + return(do_ipi); +} + +kern_return_t +pmCPUExitHalt(int cpu) { - pmsCPUMachineInit(); - if (pmDispatch != NULL && pmDispatch->pmsCPUInit != NULL) - (*pmDispatch->pmsCPUInit)(); + kern_return_t rc = KERN_INVALID_ARGUMENT; + + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->exitHalt != NULL) + rc = pmDispatch->exitHalt(cpu_to_lcpu(cpu)); + + return(rc); +} + +kern_return_t +pmCPUExitHaltToOff(int cpu) +{ + kern_return_t rc = KERN_INVALID_ARGUMENT; + + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->exitHaltToOff != NULL) + rc = pmDispatch->exitHaltToOff(cpu_to_lcpu(cpu)); + + return(rc); } /* - * Broadcast a change to all processing including ourselves. + * Called to initialize the power management structures for the CPUs. */ void -pmsCPURun(uint32_t nstep) +pmCPUStateInit(void) { - if (pmDispatch != NULL && pmDispatch->pmsCPURun != NULL) - (*pmDispatch->pmsCPURun)(nstep); + if (pmDispatch != NULL && pmDispatch->pmCPUStateInit != NULL) + (*pmDispatch->pmCPUStateInit)(); } /* - * Return the current state of a core. + * Called when a CPU is being restarted after being powered off (as in S3). */ -uint32_t -pmsCPUQuery(void) +void +pmCPUMarkRunning(cpu_data_t *cpu) { - if (pmDispatch != NULL && pmDispatch->pmsCPUQuery != NULL) - return((*pmDispatch->pmsCPUQuery)()); + cpu_data_t *cpup = current_cpu_datap(); - /* - * Return a non-sense value. - */ - return((~0) << 16); + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->markCPURunning != NULL) + (*pmDispatch->markCPURunning)(&cpu->lcpu); + else + cpup->lcpu.state = LCPU_RUN; } /* - * Return the current state of the package. + * Called to get/set CPU power management state. */ -uint32_t -pmsCPUPackageQuery(void) +int +pmCPUControl(uint32_t cmd, void *datap) { - if (pmDispatch != NULL && pmDispatch->pmsCPUPackageQuery != NULL) - return((*pmDispatch->pmsCPUPackageQuery)()); + int rc = -1; - /* - * Return a non-sense value. - */ - return((~0) << 16); + if (pmDispatch != NULL + && pmDispatch->pmCPUControl != NULL) + rc = (*pmDispatch->pmCPUControl)(cmd, datap); + + return(rc); } /* - * Force the CPU package to the lowest power level. This is a low-level - * interface meant to be called from the panic or debugger code to bring - * the CPU to a safe power level for unmanaged operation. - * - * Note that while this will bring an entire package to a safe level, it - * cannot affect other packages. As a general rule, this should be run on - * every code as part of entering the debugger or on the panic path. + * Called to save the timer state used by power management prior + * to "sleeping". */ void -pmsCPUYellowFlag(void) +pmTimerSave(void) { - if (pmDispatch != NULL && pmDispatch->pmsCPUYellowFlag != NULL) - (*pmDispatch->pmsCPUYellowFlag)(); + if (pmDispatch != NULL + && pmDispatch->pmTimerStateSave != NULL) + (*pmDispatch->pmTimerStateSave)(); } /* - * Restore the CPU to the power state it was in before a yellow flag. + * Called to restore the timer state used by power management after + * waking from "sleep". */ void -pmsCPUGreenFlag(void) +pmTimerRestore(void) { - if (pmDispatch != NULL && pmDispatch->pmsCPUGreenFlag != NULL) - (*pmDispatch->pmsCPUGreenFlag)(); + if (pmDispatch != NULL + && pmDispatch->pmTimerStateRestore != NULL) + (*pmDispatch->pmTimerStateRestore)(); } /* - * Load a new ratio/VID table. - * - * Note that this interface is specific to the Intel SpeedStep implementation. - * It is expected that this will only be called once to override the default - * ratio/VID table when the platform starts. - * - * Normally, the table will need to be replaced at the same time that the - * stepper program proper is replaced, as the PState indices from an old - * program may no longer be valid. When replacing the default program this - * should not be a problem as any new table will have at least two PState - * entries and the default program only references P0 and P1. + * Set the worst-case time for the C4 to C2 transition. + * No longer does anything. */ -kern_return_t -pmsCPULoadVIDTable(uint16_t *tablep, int nstates) +void +ml_set_maxsnoop(__unused uint32_t maxdelay) { - if (pmDispatch != NULL && pmDispatch->pmsCPULoadVIDTable != NULL) - return((*pmDispatch->pmsCPULoadVIDTable)(tablep, nstates)); - else { - int i; - - if (nstates > MAX_PSTATES) - return(KERN_FAILURE); - - for (i = 0; i < nstates; i += 1) - pmInitState.VIDTable[i] = tablep[i]; - } - return(KERN_SUCCESS); } + /* - * Set the (global) PState limit. CPUs will not be permitted to run at - * a lower (more performant) PState than this. + * Get the worst-case time for the C4 to C2 transition. Returns nanoseconds. */ -kern_return_t -pmsCPUSetPStateLimit(uint32_t limit) +unsigned +ml_get_maxsnoop(void) { - if (pmDispatch != NULL && pmDispatch->pmsCPUSetPStateLimit != NULL) - return((*pmDispatch->pmsCPUSetPStateLimit)(limit)); + uint64_t max_snoop = 0; - pmInitState.PLimit = limit; - return(KERN_SUCCESS); + if (pmDispatch != NULL + && pmDispatch->getMaxSnoop != NULL) + max_snoop = pmDispatch->getMaxSnoop(); + + return((unsigned)(max_snoop & 0xffffffff)); +} + + +uint32_t +ml_get_maxbusdelay(void) +{ + uint64_t max_delay = 0; + + if (pmDispatch != NULL + && pmDispatch->getMaxBusDelay != NULL) + max_delay = pmDispatch->getMaxBusDelay(); + + return((uint32_t)(max_delay & 0xffffffff)); } /* - * Initialize the Cstate change code. + * Set the maximum delay time allowed for snoop on the bus. + * + * Note that this value will be compared to the amount of time that it takes + * to transition from a non-snooping power state (C4) to a snooping state (C2). + * If maxBusDelay is less than C4C2SnoopDelay, + * we will not enter the lowest power state. */ void -power_management_init(void) +ml_set_maxbusdelay(uint32_t mdelay) { - uint32_t cpuModel; - uint32_t cpuFamily; - uint32_t xcpuid[4]; + uint64_t maxdelay = mdelay; - /* - * Initialize the lock for the KEXT initialization. - */ - simple_lock_init(&pm_init_lock, 0); + if (pmDispatch != NULL + && pmDispatch->setMaxBusDelay != NULL) + pmDispatch->setMaxBusDelay(maxdelay); +} - /* - * XXX - * - * The following is a hack to disable power management on some systems - * until the KEXT is done. This is strictly temporary!!! - */ - do_cpuid(1, xcpuid); - cpuFamily = (xcpuid[eax] >> 8) & 0xf; - cpuModel = (xcpuid[eax] >> 4) & 0xf; +uint64_t +ml_get_maxintdelay(void) +{ + uint64_t max_delay = 0; - if (cpuFamily != 0x6 || cpuModel < 0xe) - pmDispatch = NULL; + if (pmDispatch != NULL + && pmDispatch->getMaxIntDelay != NULL) + max_delay = pmDispatch->getMaxIntDelay(); - if (pmDispatch != NULL && pmDispatch->cstateInit != NULL) - (*pmDispatch->cstateInit)(); + return(max_delay); } /* - * This function will update the system nap policy. It should be called - * whenever conditions change: when the system is ready to being napping - * and if something changes the rules (e.g. a sysctl altering the policy - * for debugging). + * Set the maximum delay allowed for an interrupt. */ void -machine_nap_policy(void) +ml_set_maxintdelay(uint64_t mdelay) { - if (pmDispatch != NULL && pmDispatch->cstateNapPolicy != NULL) - napCtl = (*pmDispatch->cstateNapPolicy)(forcenap, napCtl); + if (pmDispatch != NULL + && pmDispatch->setMaxIntDelay != NULL) + pmDispatch->setMaxIntDelay(mdelay); } /* - * ACPI calls the following routine to set/update mwait hints. A table - * (possibly null) specifies the available Cstates and their hints, all - * other states are assumed to be invalid. ACPI may update available - * states to change the nap policy (for example, while AC power is - * available). + * Put a CPU into "safe" mode with respect to power. + * + * Some systems cannot operate at a continuous "normal" speed without + * exceeding the thermal design. This is called per-CPU to place the + * CPUs into a "safe" operating mode. */ -kern_return_t -Cstate_table_set(Cstate_hint_t *tablep, unsigned int nstates) +void +pmSafeMode(x86_lcpu_t *lcpu, uint32_t flags) { - if (forcenap) - return(KERN_SUCCESS); - - if (pmDispatch != NULL && pmDispatch->cstateTableSet != NULL) - return((*pmDispatch->cstateTableSet)(tablep, nstates)); + if (pmDispatch != NULL + && pmDispatch->pmCPUSafeMode != NULL) + pmDispatch->pmCPUSafeMode(lcpu, flags); else { - unsigned int i; - - for (i = 0; i < nstates; i += 1) { - pmInitState.CStates[i].number = tablep[i].number; - pmInitState.CStates[i].hint = tablep[i].hint; + /* + * Do something reasonable if the KEXT isn't present. + * + * We only look at the PAUSE and RESUME flags. The other flag(s) + * will not make any sense without the KEXT, so just ignore them. + * + * We set the CPU's state to indicate that it's halted. If this + * is the CPU we're currently running on, then spin until the + * state becomes non-halted. + */ + if (flags & PM_SAFE_FL_PAUSE) { + lcpu->state = LCPU_PAUSE; + if (lcpu == x86_lcpu()) { + while (lcpu->state == LCPU_PAUSE) + cpu_pause(); + } + } + + /* + * Clear the halted flag for the specified CPU, that will + * get it out of it's spin loop. + */ + if (flags & PM_SAFE_FL_RESUME) { + lcpu->state = LCPU_RUN; } + } +} + +static uint32_t saved_run_count = 0; - pmInitState.CStatesCount = nstates; +void +machine_run_count(uint32_t count) +{ + if (pmDispatch != NULL + && pmDispatch->pmSetRunCount != NULL) + pmDispatch->pmSetRunCount(count); + else + saved_run_count = count; +} + +boolean_t +machine_processor_is_inactive(processor_t processor) +{ + int cpu = processor->cpu_id; + + if (pmDispatch != NULL + && pmDispatch->pmIsCPUUnAvailable != NULL) + return(pmDispatch->pmIsCPUUnAvailable(cpu_to_lcpu(cpu))); + else + return(FALSE); +} + +processor_t +machine_choose_processor(processor_set_t pset, + processor_t preferred) +{ + int startCPU; + int endCPU; + int preferredCPU; + int chosenCPU; + + if (!pmInitDone) + return(preferred); + + if (pset == NULL) { + startCPU = -1; + endCPU = -1; + } else { + startCPU = pset->cpu_set_low; + endCPU = pset->cpu_set_hi; } - return(KERN_SUCCESS); + + if (preferred == NULL) + preferredCPU = -1; + else + preferredCPU = preferred->cpu_id; + + if (pmDispatch != NULL + && pmDispatch->pmChooseCPU != NULL) { + chosenCPU = pmDispatch->pmChooseCPU(startCPU, endCPU, preferredCPU); + + if (chosenCPU == -1) + return(NULL); + return(cpu_datap(chosenCPU)->cpu_processor); + } + + return(preferred); } -static inline void -sti(void) { - __asm__ volatile ( "sti" : : : "memory"); +static uint32_t +pmGetSavedRunCount(void) +{ + return(saved_run_count); } /* - * Called when the CPU is idle. It will choose the best C state to - * be in. + * Returns the root of the package tree. */ -void -machine_idle_cstate(void) +static x86_pkg_t * +pmGetPkgRoot(void) { - if (pmDispatch != NULL && pmDispatch->cstateMachineIdle != NULL) - (*pmDispatch->cstateMachineIdle)(napCtl); - else { - sti(); - } + return(x86_pkgs); } -static pmStats_t * -pmsCPUStats(void) +static boolean_t +pmCPUGetHibernate(int cpu) { - cpu_data_t *pp; + return(cpu_datap(cpu)->cpu_hibernate); +} - pp = current_cpu_datap(); - return(&pp->cpu_pmStats); +static processor_t +pmLCPUtoProcessor(int lcpu) +{ + return(cpu_datap(lcpu)->cpu_processor); } -static pmsd * -pmsCPUStepperData(void) +static void +pmReSyncDeadlines(int cpu) { - cpu_data_t *pp; + static boolean_t registered = FALSE; + + if (!registered) { + PM_interrupt_register(&etimer_resync_deadlines); + registered = TRUE; + } + + if ((uint32_t)cpu == current_cpu_datap()->lcpu.cpu_num) + etimer_resync_deadlines(); + else + cpu_PM_interrupt(cpu); +} - pp = current_cpu_datap(); - return(&pp->pms); +static void +pmSendIPI(int cpu) +{ + lapic_send_ipi(cpu, LAPIC_PM_INTERRUPT); } -static uint64_t * -CPUHPETAddr(void) +static rtc_nanotime_t * +pmGetNanotimeInfo(void) { - cpu_data_t *pp; - pp = current_cpu_datap(); - return(pp->cpu_pmHpet); + return(&rtc_nanotime_info); } /* * Called by the power management kext to register itself and to get the - * callbacks it might need into other power management functions. + * callbacks it might need into other kernel functions. This interface + * is versioned to allow for slight mis-matches between the kext and the + * kernel. */ void -pmRegister(pmDispatch_t *cpuFuncs, pmCallBacks_t *callbacks) -{ - if (callbacks != NULL) { - callbacks->Park = pmsPark; - callbacks->Run = pmsRun; - callbacks->RunLocal = pmsRunLocal; - callbacks->SetStep = pmsSetStep; - callbacks->NapPolicy = machine_nap_policy; - callbacks->Build = pmsBuild; - callbacks->Stats = pmsCPUStats; - callbacks->StepperData = pmsCPUStepperData; - callbacks->HPETAddr = CPUHPETAddr; - callbacks->InitState = &pmInitState; - callbacks->resetPop = resetPop; +pmKextRegister(uint32_t version, pmDispatch_t *cpuFuncs, + pmCallBacks_t *callbacks) +{ + if (callbacks != NULL && version == PM_DISPATCH_VERSION) { + callbacks->setRTCPop = setPop; + callbacks->resyncDeadlines = pmReSyncDeadlines; + callbacks->initComplete = pmInitComplete; + callbacks->GetLCPU = pmGetLogicalCPU; + callbacks->GetCore = pmGetCore; + callbacks->GetDie = pmGetDie; + callbacks->GetPackage = pmGetPackage; + callbacks->GetMyLCPU = pmGetMyLogicalCPU; + callbacks->GetMyCore = pmGetMyCore; + callbacks->GetMyDie = pmGetMyDie; + callbacks->GetMyPackage = pmGetMyPackage; + callbacks->GetPkgRoot = pmGetPkgRoot; + callbacks->LockCPUTopology = pmLockCPUTopology; + callbacks->GetHibernate = pmCPUGetHibernate; + callbacks->LCPUtoProcessor = pmLCPUtoProcessor; + callbacks->ThreadBind = thread_bind; + callbacks->GetSavedRunCount = pmGetSavedRunCount; + callbacks->pmSendIPI = pmSendIPI; + callbacks->GetNanotimeInfo = pmGetNanotimeInfo; + callbacks->RTCClockAdjust = rtc_clock_adjust; + callbacks->topoParms = &topoParms; + } else { + panic("Version mis-match between Kernel and CPU PM"); } - if (cpuFuncs != NULL) + if (cpuFuncs != NULL) { pmDispatch = cpuFuncs; + + if (pmDispatch->pmIPIHandler != NULL) { + lapic_set_pm_func((i386_intr_func_t)pmDispatch->pmIPIHandler); + } + } } /* @@ -397,79 +702,47 @@ pmRegister(pmDispatch_t *cpuFuncs, pmCallBacks_t *callbacks) void pmUnRegister(pmDispatch_t *cpuFuncs) { - if (cpuFuncs != NULL && pmDispatch == cpuFuncs) + if (cpuFuncs != NULL && pmDispatch == cpuFuncs) { pmDispatch = NULL; + } } -#if MACH_KDB -/* - * XXX stubs for now - */ -void -db_cfg(__unused db_expr_t addr, - __unused int have_addr, - __unused db_expr_t count, - __unused char *modif) -{ - return; -} - -void -db_display_iokit(__unused db_expr_t addr, - __unused int have_addr, - __unused db_expr_t count, - __unused char *modif) -{ - return; -} - -void -db_dtimers(__unused db_expr_t addr, - __unused int have_addr, - __unused db_expr_t count, - __unused char *modif) +/****************************************************************************** + * + * All of the following are deprecated interfaces and no longer used. + * + ******************************************************************************/ +kern_return_t +pmsControl(__unused uint32_t request, __unused user_addr_t reqaddr, + __unused uint32_t reqsize) { - return; + return(KERN_SUCCESS); } void -db_intcnt(__unused db_expr_t addr, - __unused int have_addr, - __unused db_expr_t count, - __unused char *modif) +pmsInit(void) { - return; } void -db_nap(__unused db_expr_t addr, - __unused int have_addr, - __unused db_expr_t count, - __unused char *modif) +pmsStart(void) { - return; } void -db_pmgr(__unused db_expr_t addr, - __unused int have_addr, - __unused db_expr_t count, - __unused char *modif) +pmsPark(void) { - return; } void -db_test(__unused db_expr_t addr, - __unused int have_addr, - __unused db_expr_t count, - __unused char *modif) +pmsRun(__unused uint32_t nstep) { - return; } -void -db_getpmgr(__unused pmData_t *pmj) +kern_return_t +pmsBuild(__unused pmsDef *pd, __unused uint32_t pdsize, + __unused pmsSetFunc_t *functab, + __unused uint32_t platformData, __unused pmsQueryFunc_t queryFunc) { + return(KERN_SUCCESS); } -#endif