2 * Copyright (c) 2004-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * CPU-specific power management support.
32 * Implements the "wrappers" to the KEXT.
35 #include <i386/machine_cpu.h>
37 #include <i386/machine_routines.h>
38 #include <i386/proc_reg.h>
39 #include <i386/pmap.h>
40 #include <i386/misc_protos.h>
41 #include <kern/machine.h>
43 #include <kern/processor.h>
44 #include <kern/etimer.h>
45 #include <i386/cpu_threads.h>
46 #include <i386/pmCPU.h>
47 #include <i386/cpuid.h>
48 #include <i386/rtclock.h>
49 #include <kern/sched_prim.h>
50 #include <i386/lapic.h>
52 extern int disableConsoleOutput
;
54 decl_simple_lock_data(,pm_init_lock
);
57 * The following is set when the KEXT loads and initializes.
59 pmDispatch_t
*pmDispatch
= NULL
;
61 static uint32_t pmInitDone
= 0;
65 * Initialize the Cstate change code.
68 power_management_init(void)
70 static boolean_t initialized
= FALSE
;
73 * Initialize the lock for the KEXT initialization.
76 simple_lock_init(&pm_init_lock
, 0);
80 if (pmDispatch
!= NULL
&& pmDispatch
->cstateInit
!= NULL
)
81 (*pmDispatch
->cstateInit
)();
85 * Called when the CPU is idle. It calls into the power management kext
86 * to determine the best way to idle the CPU.
91 cpu_data_t
*my_cpu
= current_cpu_datap();
96 my_cpu
->lcpu
.state
= LCPU_IDLE
;
97 DBGLOG(cpu_handle
, cpu_number(), MP_IDLE
);
98 MARK_CPU_IDLE(cpu_number());
101 && pmDispatch
!= NULL
102 && pmDispatch
->MachineIdle
!= NULL
)
103 (*pmDispatch
->MachineIdle
)(0x7FFFFFFFFFFFFFFFULL
);
106 * If no power management, re-enable interrupts and halt.
107 * This will keep the CPU from spinning through the scheduler
108 * and will allow at least some minimal power savings (but it
109 * cause problems in some MP configurations w.r.t. the APIC
110 * stopping during a GV3 transition).
112 __asm__
volatile ("sti; hlt");
116 * Mark the CPU as running again.
118 MARK_CPU_ACTIVE(cpu_number());
119 DBGLOG(cpu_handle
, cpu_number(), MP_UNIDLE
);
120 my_cpu
->lcpu
.state
= LCPU_RUN
;
123 * Re-enable interrupts.
126 __asm__
volatile("sti");
130 * Called when the CPU is to be halted. It will choose the best C-State
134 pmCPUHalt(uint32_t reason
)
136 cpu_data_t
*cpup
= current_cpu_datap();
140 cpup
->lcpu
.state
= LCPU_PAUSE
;
141 __asm__
volatile ("wbinvd; hlt");
145 cpup
->lcpu
.state
= LCPU_PAUSE
;
146 __asm__
volatile ("cli; wbinvd; hlt");
151 __asm__
volatile ("cli");
154 && pmDispatch
!= NULL
155 && pmDispatch
->pmCPUHalt
!= NULL
) {
157 * Halt the CPU (and put it in a low power state.
159 (*pmDispatch
->pmCPUHalt
)();
162 * We've exited halt, so get the the CPU schedulable again.
164 i386_init_slave_fast();
166 panic("init_slave_fast returned");
169 * If no power managment and a processor is taken off-line,
170 * then invalidate the cache and halt it (it will not be able
171 * to be brought back on-line without resetting the CPU).
173 __asm__
volatile ("wbinvd");
174 cpup
->lcpu
.state
= LCPU_HALT
;
175 __asm__
volatile ( "wbinvd; hlt" );
177 panic("back from Halt");
184 pmMarkAllCPUsOff(void)
187 && pmDispatch
!= NULL
188 && pmDispatch
->markAllCPUsOff
!= NULL
)
189 (*pmDispatch
->markAllCPUsOff
)();
199 pmGetLogicalCPU(int cpu
)
201 return(cpu_to_lcpu(cpu
));
205 pmGetMyLogicalCPU(void)
207 cpu_data_t
*cpup
= current_cpu_datap();
215 return(cpu_to_core(cpu
));
221 cpu_data_t
*cpup
= current_cpu_datap();
223 return(cpup
->lcpu
.core
);
229 return(cpu_to_die(cpu
));
235 cpu_data_t
*cpup
= current_cpu_datap();
237 return(cpup
->lcpu
.die
);
241 pmGetPackage(int cpu
)
243 return(cpu_to_package(cpu
));
249 cpu_data_t
*cpup
= current_cpu_datap();
251 return(cpup
->lcpu
.package
);
255 pmLockCPUTopology(int lock
)
258 simple_lock(&x86_topo_lock
);
260 simple_unlock(&x86_topo_lock
);
265 * Called to get the next deadline that has been set by the
266 * power management code.
269 pmCPUGetDeadline(cpu_data_t
*cpu
)
271 uint64_t deadline
= EndOfAllTime
;
274 && pmDispatch
!= NULL
275 && pmDispatch
->GetDeadline
!= NULL
)
276 deadline
= (*pmDispatch
->GetDeadline
)(&cpu
->lcpu
);
282 * Called to determine if the supplied deadline or the power management
283 * deadline is sooner. Returns which ever one is first.
286 pmCPUSetDeadline(cpu_data_t
*cpu
, uint64_t deadline
)
289 && pmDispatch
!= NULL
290 && pmDispatch
->SetDeadline
!= NULL
)
291 deadline
= (*pmDispatch
->SetDeadline
)(&cpu
->lcpu
, deadline
);
297 * Called when a power management deadline expires.
300 pmCPUDeadline(cpu_data_t
*cpu
)
303 && pmDispatch
!= NULL
304 && pmDispatch
->Deadline
!= NULL
)
305 (*pmDispatch
->Deadline
)(&cpu
->lcpu
);
309 * Called to get a CPU out of idle.
312 pmCPUExitIdle(cpu_data_t
*cpu
)
317 && pmDispatch
!= NULL
318 && pmDispatch
->exitIdle
!= NULL
)
319 do_ipi
= (*pmDispatch
->exitIdle
)(&cpu
->lcpu
);
327 pmCPUExitHalt(int cpu
)
329 kern_return_t rc
= KERN_INVALID_ARGUMENT
;
332 && pmDispatch
!= NULL
333 && pmDispatch
->exitHalt
!= NULL
)
334 rc
= pmDispatch
->exitHalt(cpu_to_lcpu(cpu
));
340 pmCPUExitHaltToOff(int cpu
)
342 kern_return_t rc
= KERN_INVALID_ARGUMENT
;
345 && pmDispatch
!= NULL
346 && pmDispatch
->exitHaltToOff
!= NULL
)
347 rc
= pmDispatch
->exitHaltToOff(cpu_to_lcpu(cpu
));
353 * Called to initialize the power management structures for the CPUs.
358 if (pmDispatch
!= NULL
&& pmDispatch
->pmCPUStateInit
!= NULL
)
359 (*pmDispatch
->pmCPUStateInit
)();
363 * Called when a CPU is being restarted after being powered off (as in S3).
366 pmCPUMarkRunning(cpu_data_t
*cpu
)
368 cpu_data_t
*cpup
= current_cpu_datap();
371 && pmDispatch
!= NULL
372 && pmDispatch
->markCPURunning
!= NULL
)
373 (*pmDispatch
->markCPURunning
)(&cpu
->lcpu
);
375 cpup
->lcpu
.state
= LCPU_RUN
;
379 * Called to get/set CPU power management state.
382 pmCPUControl(uint32_t cmd
, void *datap
)
386 if (pmDispatch
!= NULL
387 && pmDispatch
->pmCPUControl
!= NULL
)
388 rc
= (*pmDispatch
->pmCPUControl
)(cmd
, datap
);
394 * Called to save the timer state used by power management prior
400 if (pmDispatch
!= NULL
401 && pmDispatch
->pmTimerStateSave
!= NULL
)
402 (*pmDispatch
->pmTimerStateSave
)();
406 * Called to restore the timer state used by power management after
407 * waking from "sleep".
412 if (pmDispatch
!= NULL
413 && pmDispatch
->pmTimerStateRestore
!= NULL
)
414 (*pmDispatch
->pmTimerStateRestore
)();
418 * Set the worst-case time for the C4 to C2 transition.
419 * No longer does anything.
422 ml_set_maxsnoop(__unused
uint32_t maxdelay
)
428 * Get the worst-case time for the C4 to C2 transition. Returns nanoseconds.
431 ml_get_maxsnoop(void)
433 uint64_t max_snoop
= 0;
435 if (pmDispatch
!= NULL
436 && pmDispatch
->getMaxSnoop
!= NULL
)
437 max_snoop
= pmDispatch
->getMaxSnoop();
439 return((unsigned)(max_snoop
& 0xffffffff));
444 ml_get_maxbusdelay(void)
446 uint64_t max_delay
= 0;
448 if (pmDispatch
!= NULL
449 && pmDispatch
->getMaxBusDelay
!= NULL
)
450 max_delay
= pmDispatch
->getMaxBusDelay();
452 return((uint32_t)(max_delay
& 0xffffffff));
456 * Set the maximum delay time allowed for snoop on the bus.
458 * Note that this value will be compared to the amount of time that it takes
459 * to transition from a non-snooping power state (C4) to a snooping state (C2).
460 * If maxBusDelay is less than C4C2SnoopDelay,
461 * we will not enter the lowest power state.
464 ml_set_maxbusdelay(uint32_t mdelay
)
466 uint64_t maxdelay
= mdelay
;
468 if (pmDispatch
!= NULL
469 && pmDispatch
->setMaxBusDelay
!= NULL
)
470 pmDispatch
->setMaxBusDelay(maxdelay
);
474 ml_get_maxintdelay(void)
476 uint64_t max_delay
= 0;
478 if (pmDispatch
!= NULL
479 && pmDispatch
->getMaxIntDelay
!= NULL
)
480 max_delay
= pmDispatch
->getMaxIntDelay();
486 * Set the maximum delay allowed for an interrupt.
489 ml_set_maxintdelay(uint64_t mdelay
)
491 if (pmDispatch
!= NULL
492 && pmDispatch
->setMaxIntDelay
!= NULL
)
493 pmDispatch
->setMaxIntDelay(mdelay
);
497 * Put a CPU into "safe" mode with respect to power.
499 * Some systems cannot operate at a continuous "normal" speed without
500 * exceeding the thermal design. This is called per-CPU to place the
501 * CPUs into a "safe" operating mode.
504 pmSafeMode(x86_lcpu_t
*lcpu
, uint32_t flags
)
506 if (pmDispatch
!= NULL
507 && pmDispatch
->pmCPUSafeMode
!= NULL
)
508 pmDispatch
->pmCPUSafeMode(lcpu
, flags
);
511 * Do something reasonable if the KEXT isn't present.
513 * We only look at the PAUSE and RESUME flags. The other flag(s)
514 * will not make any sense without the KEXT, so just ignore them.
516 * We set the CPU's state to indicate that it's halted. If this
517 * is the CPU we're currently running on, then spin until the
518 * state becomes non-halted.
520 if (flags
& PM_SAFE_FL_PAUSE
) {
521 lcpu
->state
= LCPU_PAUSE
;
522 if (lcpu
== x86_lcpu()) {
523 while (lcpu
->state
== LCPU_PAUSE
)
529 * Clear the halted flag for the specified CPU, that will
530 * get it out of it's spin loop.
532 if (flags
& PM_SAFE_FL_RESUME
) {
533 lcpu
->state
= LCPU_RUN
;
538 static uint32_t saved_run_count
= 0;
541 machine_run_count(uint32_t count
)
543 if (pmDispatch
!= NULL
544 && pmDispatch
->pmSetRunCount
!= NULL
)
545 pmDispatch
->pmSetRunCount(count
);
547 saved_run_count
= count
;
551 machine_processor_is_inactive(processor_t processor
)
553 int cpu
= processor
->cpu_id
;
555 if (pmDispatch
!= NULL
556 && pmDispatch
->pmIsCPUUnAvailable
!= NULL
)
557 return(pmDispatch
->pmIsCPUUnAvailable(cpu_to_lcpu(cpu
)));
563 machine_choose_processor(processor_set_t pset
,
564 processor_t preferred
)
578 startCPU
= pset
->cpu_set_low
;
579 endCPU
= pset
->cpu_set_hi
;
582 if (preferred
== NULL
)
585 preferredCPU
= preferred
->cpu_id
;
587 if (pmDispatch
!= NULL
588 && pmDispatch
->pmChooseCPU
!= NULL
) {
589 chosenCPU
= pmDispatch
->pmChooseCPU(startCPU
, endCPU
, preferredCPU
);
593 return(cpu_datap(chosenCPU
)->cpu_processor
);
600 pmGetSavedRunCount(void)
602 return(saved_run_count
);
606 * Returns the root of the package tree.
615 pmCPUGetHibernate(int cpu
)
617 return(cpu_datap(cpu
)->cpu_hibernate
);
621 pmLCPUtoProcessor(int lcpu
)
623 return(cpu_datap(lcpu
)->cpu_processor
);
627 pmReSyncDeadlines(int cpu
)
629 static boolean_t registered
= FALSE
;
632 PM_interrupt_register(&etimer_resync_deadlines
);
636 if ((uint32_t)cpu
== current_cpu_datap()->lcpu
.cpu_num
)
637 etimer_resync_deadlines();
639 cpu_PM_interrupt(cpu
);
645 lapic_send_ipi(cpu
, LAPIC_PM_INTERRUPT
);
648 static rtc_nanotime_t
*
649 pmGetNanotimeInfo(void)
651 return(&rtc_nanotime_info
);
655 * Called by the power management kext to register itself and to get the
656 * callbacks it might need into other kernel functions. This interface
657 * is versioned to allow for slight mis-matches between the kext and the
661 pmKextRegister(uint32_t version
, pmDispatch_t
*cpuFuncs
,
662 pmCallBacks_t
*callbacks
)
664 if (callbacks
!= NULL
&& version
== PM_DISPATCH_VERSION
) {
665 callbacks
->setRTCPop
= setPop
;
666 callbacks
->resyncDeadlines
= pmReSyncDeadlines
;
667 callbacks
->initComplete
= pmInitComplete
;
668 callbacks
->GetLCPU
= pmGetLogicalCPU
;
669 callbacks
->GetCore
= pmGetCore
;
670 callbacks
->GetDie
= pmGetDie
;
671 callbacks
->GetPackage
= pmGetPackage
;
672 callbacks
->GetMyLCPU
= pmGetMyLogicalCPU
;
673 callbacks
->GetMyCore
= pmGetMyCore
;
674 callbacks
->GetMyDie
= pmGetMyDie
;
675 callbacks
->GetMyPackage
= pmGetMyPackage
;
676 callbacks
->GetPkgRoot
= pmGetPkgRoot
;
677 callbacks
->LockCPUTopology
= pmLockCPUTopology
;
678 callbacks
->GetHibernate
= pmCPUGetHibernate
;
679 callbacks
->LCPUtoProcessor
= pmLCPUtoProcessor
;
680 callbacks
->ThreadBind
= thread_bind
;
681 callbacks
->GetSavedRunCount
= pmGetSavedRunCount
;
682 callbacks
->pmSendIPI
= pmSendIPI
;
683 callbacks
->GetNanotimeInfo
= pmGetNanotimeInfo
;
684 callbacks
->RTCClockAdjust
= rtc_clock_adjust
;
685 callbacks
->topoParms
= &topoParms
;
687 panic("Version mis-match between Kernel and CPU PM");
690 if (cpuFuncs
!= NULL
) {
691 pmDispatch
= cpuFuncs
;
693 if (pmDispatch
->pmIPIHandler
!= NULL
) {
694 lapic_set_pm_func((i386_intr_func_t
)pmDispatch
->pmIPIHandler
);
700 * Unregisters the power management functions from the kext.
703 pmUnRegister(pmDispatch_t
*cpuFuncs
)
705 if (cpuFuncs
!= NULL
&& pmDispatch
== cpuFuncs
) {
710 /******************************************************************************
712 * All of the following are deprecated interfaces and no longer used.
714 ******************************************************************************/
716 pmsControl(__unused
uint32_t request
, __unused user_addr_t reqaddr
,
717 __unused
uint32_t reqsize
)
719 return(KERN_SUCCESS
);
738 pmsRun(__unused
uint32_t nstep
)
743 pmsBuild(__unused pmsDef
*pd
, __unused
uint32_t pdsize
,
744 __unused pmsSetFunc_t
*functab
,
745 __unused
uint32_t platformData
, __unused pmsQueryFunc_t queryFunc
)
747 return(KERN_SUCCESS
);