2 * Copyright (c) 2004-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * CPU-specific power management support.
32 * Implements the "wrappers" to the KEXT.
35 #include <i386/machine_cpu.h>
37 #include <i386/machine_routines.h>
38 #include <i386/proc_reg.h>
39 #include <i386/pmap.h>
40 #include <i386/misc_protos.h>
41 #include <kern/machine.h>
43 #include <kern/processor.h>
44 #include <kern/etimer.h>
45 #include <i386/cpu_threads.h>
46 #include <i386/pmCPU.h>
47 #include <i386/cpuid.h>
48 #include <i386/rtclock_protos.h>
49 #include <kern/sched_prim.h>
50 #include <i386/lapic.h>
51 #include <i386/pal_routines.h>
53 #include <sys/kdebug.h>
55 extern int disableConsoleOutput
;
57 #define DELAY_UNSET 0xFFFFFFFFFFFFFFFFULL
60 * The following is set when the KEXT loads and initializes.
62 pmDispatch_t
*pmDispatch
= NULL
;
64 static uint32_t pmInitDone
= 0;
65 static boolean_t earlyTopology
= FALSE
;
66 static uint64_t earlyMaxBusDelay
= DELAY_UNSET
;
67 static uint64_t earlyMaxIntDelay
= DELAY_UNSET
;
70 * Initialize the Cstate change code.
73 power_management_init(void)
75 if (pmDispatch
!= NULL
&& pmDispatch
->cstateInit
!= NULL
)
76 (*pmDispatch
->cstateInit
)();
79 #define CPU_ACTIVE_STAT_BIN_1 (500000)
80 #define CPU_ACTIVE_STAT_BIN_2 (2000000)
81 #define CPU_ACTIVE_STAT_BIN_3 (5000000)
83 #define CPU_IDLE_STAT_BIN_1 (500000)
84 #define CPU_IDLE_STAT_BIN_2 (2000000)
85 #define CPU_IDLE_STAT_BIN_3 (5000000)
88 * Called when the CPU is idle. It calls into the power management kext
89 * to determine the best way to idle the CPU.
94 cpu_data_t
*my_cpu
= current_cpu_datap();
95 uint64_t ctime
, rtime
, itime
;
100 ctime
= mach_absolute_time();
102 my_cpu
->lcpu
.state
= LCPU_IDLE
;
103 DBGLOG(cpu_handle
, cpu_number(), MP_IDLE
);
104 MARK_CPU_IDLE(cpu_number());
106 rtime
= ctime
- my_cpu
->cpu_ixtime
;
108 my_cpu
->cpu_rtime_total
+= rtime
;
110 if (rtime
< CPU_ACTIVE_STAT_BIN_1
)
111 my_cpu
->cpu_rtimes
[0]++;
112 else if (rtime
< CPU_ACTIVE_STAT_BIN_2
)
113 my_cpu
->cpu_rtimes
[1]++;
114 else if (rtime
< CPU_ACTIVE_STAT_BIN_3
)
115 my_cpu
->cpu_rtimes
[2]++;
117 my_cpu
->cpu_rtimes
[3]++;
122 * Handle case where ml_set_maxbusdelay() or ml_set_maxintdelay()
123 * were called prior to the CPU PM kext being registered. We do
124 * this here since we know at this point the values will be first
125 * used since idle is where the decisions using these values is made.
127 if (earlyMaxBusDelay
!= DELAY_UNSET
)
128 ml_set_maxbusdelay((uint32_t)(earlyMaxBusDelay
& 0xFFFFFFFF));
130 if (earlyMaxIntDelay
!= DELAY_UNSET
)
131 ml_set_maxintdelay(earlyMaxIntDelay
);
135 && pmDispatch
!= NULL
136 && pmDispatch
->MachineIdle
!= NULL
)
137 (*pmDispatch
->MachineIdle
)(0x7FFFFFFFFFFFFFFFULL
);
140 * If no power management, re-enable interrupts and halt.
141 * This will keep the CPU from spinning through the scheduler
142 * and will allow at least some minimal power savings (but it
143 * cause problems in some MP configurations w.r.t. the APIC
144 * stopping during a GV3 transition).
148 /* Once woken, re-disable interrupts. */
153 * Mark the CPU as running again.
155 MARK_CPU_ACTIVE(cpu_number());
156 DBGLOG(cpu_handle
, cpu_number(), MP_UNIDLE
);
158 uint64_t ixtime
= my_cpu
->cpu_ixtime
= mach_absolute_time();
159 itime
= ixtime
- ctime
;
161 my_cpu
->lcpu
.state
= LCPU_RUN
;
163 if (itime
< CPU_IDLE_STAT_BIN_1
)
164 my_cpu
->cpu_itimes
[0]++;
165 else if (itime
< CPU_IDLE_STAT_BIN_2
)
166 my_cpu
->cpu_itimes
[1]++;
167 else if (itime
< CPU_IDLE_STAT_BIN_3
)
168 my_cpu
->cpu_itimes
[2]++;
170 my_cpu
->cpu_itimes
[3]++;
172 my_cpu
->cpu_itime_total
+= itime
;
176 * Re-enable interrupts.
183 * Called when the CPU is to be halted. It will choose the best C-State
187 pmCPUHalt(uint32_t reason
)
189 cpu_data_t
*cpup
= current_cpu_datap();
193 cpup
->lcpu
.state
= LCPU_PAUSE
;
198 cpup
->lcpu
.state
= LCPU_PAUSE
;
207 && pmDispatch
!= NULL
208 && pmDispatch
->pmCPUHalt
!= NULL
) {
210 * Halt the CPU (and put it in a low power state.
212 (*pmDispatch
->pmCPUHalt
)();
215 * We've exited halt, so get the the CPU schedulable again.
217 i386_init_slave_fast();
219 panic("init_slave_fast returned");
223 * If no power managment and a processor is taken off-line,
224 * then invalidate the cache and halt it (it will not be able
225 * to be brought back on-line without resetting the CPU).
227 __asm__
volatile ("wbinvd");
228 cpup
->lcpu
.state
= LCPU_HALT
;
231 panic("back from Halt");
239 pmMarkAllCPUsOff(void)
242 && pmDispatch
!= NULL
243 && pmDispatch
->markAllCPUsOff
!= NULL
)
244 (*pmDispatch
->markAllCPUsOff
)();
251 && pmDispatch
!= NULL
252 && pmDispatch
->pmCPUStateInit
!= NULL
) {
253 (*pmDispatch
->pmCPUStateInit
)();
254 earlyTopology
= FALSE
;
261 pmGetLogicalCPU(int cpu
)
263 return(cpu_to_lcpu(cpu
));
267 pmGetMyLogicalCPU(void)
269 cpu_data_t
*cpup
= current_cpu_datap();
277 return(cpu_to_core(cpu
));
283 cpu_data_t
*cpup
= current_cpu_datap();
285 return(cpup
->lcpu
.core
);
291 return(cpu_to_die(cpu
));
297 cpu_data_t
*cpup
= current_cpu_datap();
299 return(cpup
->lcpu
.die
);
303 pmGetPackage(int cpu
)
305 return(cpu_to_package(cpu
));
311 cpu_data_t
*cpup
= current_cpu_datap();
313 return(cpup
->lcpu
.package
);
317 pmLockCPUTopology(int lock
)
320 simple_lock(&x86_topo_lock
);
322 simple_unlock(&x86_topo_lock
);
327 * Called to get the next deadline that has been set by the
328 * power management code.
329 * Note: a return of 0 from AICPM and this routine signifies
330 * that no deadline is set.
333 pmCPUGetDeadline(cpu_data_t
*cpu
)
335 uint64_t deadline
= 0;
338 && pmDispatch
!= NULL
339 && pmDispatch
->GetDeadline
!= NULL
)
340 deadline
= (*pmDispatch
->GetDeadline
)(&cpu
->lcpu
);
346 * Called to determine if the supplied deadline or the power management
347 * deadline is sooner. Returns which ever one is first.
350 pmCPUSetDeadline(cpu_data_t
*cpu
, uint64_t deadline
)
353 && pmDispatch
!= NULL
354 && pmDispatch
->SetDeadline
!= NULL
)
355 deadline
= (*pmDispatch
->SetDeadline
)(&cpu
->lcpu
, deadline
);
361 * Called when a power management deadline expires.
364 pmCPUDeadline(cpu_data_t
*cpu
)
367 && pmDispatch
!= NULL
368 && pmDispatch
->Deadline
!= NULL
)
369 (*pmDispatch
->Deadline
)(&cpu
->lcpu
);
373 * Called to get a CPU out of idle.
376 pmCPUExitIdle(cpu_data_t
*cpu
)
381 && pmDispatch
!= NULL
382 && pmDispatch
->exitIdle
!= NULL
)
383 do_ipi
= (*pmDispatch
->exitIdle
)(&cpu
->lcpu
);
391 pmCPUExitHalt(int cpu
)
393 kern_return_t rc
= KERN_INVALID_ARGUMENT
;
396 && pmDispatch
!= NULL
397 && pmDispatch
->exitHalt
!= NULL
)
398 rc
= pmDispatch
->exitHalt(cpu_to_lcpu(cpu
));
404 pmCPUExitHaltToOff(int cpu
)
406 kern_return_t rc
= KERN_SUCCESS
;
409 && pmDispatch
!= NULL
410 && pmDispatch
->exitHaltToOff
!= NULL
)
411 rc
= pmDispatch
->exitHaltToOff(cpu_to_lcpu(cpu
));
417 * Called to initialize the power management structures for the CPUs.
422 if (pmDispatch
!= NULL
&& pmDispatch
->pmCPUStateInit
!= NULL
)
423 (*pmDispatch
->pmCPUStateInit
)();
425 earlyTopology
= TRUE
;
429 * Called when a CPU is being restarted after being powered off (as in S3).
432 pmCPUMarkRunning(cpu_data_t
*cpu
)
434 cpu_data_t
*cpup
= current_cpu_datap();
437 && pmDispatch
!= NULL
438 && pmDispatch
->markCPURunning
!= NULL
)
439 (*pmDispatch
->markCPURunning
)(&cpu
->lcpu
);
441 cpup
->lcpu
.state
= LCPU_RUN
;
445 * Called to get/set CPU power management state.
448 pmCPUControl(uint32_t cmd
, void *datap
)
452 if (pmDispatch
!= NULL
453 && pmDispatch
->pmCPUControl
!= NULL
)
454 rc
= (*pmDispatch
->pmCPUControl
)(cmd
, datap
);
460 * Called to save the timer state used by power management prior
466 if (pmDispatch
!= NULL
467 && pmDispatch
->pmTimerStateSave
!= NULL
)
468 (*pmDispatch
->pmTimerStateSave
)();
472 * Called to restore the timer state used by power management after
473 * waking from "sleep".
478 if (pmDispatch
!= NULL
479 && pmDispatch
->pmTimerStateRestore
!= NULL
)
480 (*pmDispatch
->pmTimerStateRestore
)();
484 * Set the worst-case time for the C4 to C2 transition.
485 * No longer does anything.
488 ml_set_maxsnoop(__unused
uint32_t maxdelay
)
494 * Get the worst-case time for the C4 to C2 transition. Returns nanoseconds.
497 ml_get_maxsnoop(void)
499 uint64_t max_snoop
= 0;
502 && pmDispatch
!= NULL
503 && pmDispatch
->getMaxSnoop
!= NULL
)
504 max_snoop
= pmDispatch
->getMaxSnoop();
506 return((unsigned)(max_snoop
& 0xffffffff));
511 ml_get_maxbusdelay(void)
513 uint64_t max_delay
= 0;
516 && pmDispatch
!= NULL
517 && pmDispatch
->getMaxBusDelay
!= NULL
)
518 max_delay
= pmDispatch
->getMaxBusDelay();
520 return((uint32_t)(max_delay
& 0xffffffff));
524 * Set the maximum delay time allowed for snoop on the bus.
526 * Note that this value will be compared to the amount of time that it takes
527 * to transition from a non-snooping power state (C4) to a snooping state (C2).
528 * If maxBusDelay is less than C4C2SnoopDelay,
529 * we will not enter the lowest power state.
532 ml_set_maxbusdelay(uint32_t mdelay
)
534 uint64_t maxdelay
= mdelay
;
536 if (pmDispatch
!= NULL
537 && pmDispatch
->setMaxBusDelay
!= NULL
) {
538 earlyMaxBusDelay
= DELAY_UNSET
;
539 pmDispatch
->setMaxBusDelay(maxdelay
);
541 earlyMaxBusDelay
= maxdelay
;
545 ml_get_maxintdelay(void)
547 uint64_t max_delay
= 0;
549 if (pmDispatch
!= NULL
550 && pmDispatch
->getMaxIntDelay
!= NULL
)
551 max_delay
= pmDispatch
->getMaxIntDelay();
557 * Set the maximum delay allowed for an interrupt.
560 ml_set_maxintdelay(uint64_t mdelay
)
562 if (pmDispatch
!= NULL
563 && pmDispatch
->setMaxIntDelay
!= NULL
) {
564 earlyMaxIntDelay
= DELAY_UNSET
;
565 pmDispatch
->setMaxIntDelay(mdelay
);
567 earlyMaxIntDelay
= mdelay
;
571 ml_get_interrupt_prewake_applicable()
573 boolean_t applicable
= FALSE
;
576 && pmDispatch
!= NULL
577 && pmDispatch
->pmInterruptPrewakeApplicable
!= NULL
)
578 applicable
= pmDispatch
->pmInterruptPrewakeApplicable();
584 * Put a CPU into "safe" mode with respect to power.
586 * Some systems cannot operate at a continuous "normal" speed without
587 * exceeding the thermal design. This is called per-CPU to place the
588 * CPUs into a "safe" operating mode.
591 pmSafeMode(x86_lcpu_t
*lcpu
, uint32_t flags
)
593 if (pmDispatch
!= NULL
594 && pmDispatch
->pmCPUSafeMode
!= NULL
)
595 pmDispatch
->pmCPUSafeMode(lcpu
, flags
);
598 * Do something reasonable if the KEXT isn't present.
600 * We only look at the PAUSE and RESUME flags. The other flag(s)
601 * will not make any sense without the KEXT, so just ignore them.
603 * We set the CPU's state to indicate that it's halted. If this
604 * is the CPU we're currently running on, then spin until the
605 * state becomes non-halted.
607 if (flags
& PM_SAFE_FL_PAUSE
) {
608 lcpu
->state
= LCPU_PAUSE
;
609 if (lcpu
== x86_lcpu()) {
610 while (lcpu
->state
== LCPU_PAUSE
)
616 * Clear the halted flag for the specified CPU, that will
617 * get it out of it's spin loop.
619 if (flags
& PM_SAFE_FL_RESUME
) {
620 lcpu
->state
= LCPU_RUN
;
625 static uint32_t saved_run_count
= 0;
628 machine_run_count(uint32_t count
)
630 if (pmDispatch
!= NULL
631 && pmDispatch
->pmSetRunCount
!= NULL
)
632 pmDispatch
->pmSetRunCount(count
);
634 saved_run_count
= count
;
638 machine_processor_is_inactive(processor_t processor
)
640 int cpu
= processor
->cpu_id
;
642 if (pmDispatch
!= NULL
643 && pmDispatch
->pmIsCPUUnAvailable
!= NULL
)
644 return(pmDispatch
->pmIsCPUUnAvailable(cpu_to_lcpu(cpu
)));
650 machine_choose_processor(processor_set_t pset
,
651 processor_t preferred
)
665 startCPU
= pset
->cpu_set_low
;
666 endCPU
= pset
->cpu_set_hi
;
669 if (preferred
== NULL
)
672 preferredCPU
= preferred
->cpu_id
;
674 if (pmDispatch
!= NULL
675 && pmDispatch
->pmChooseCPU
!= NULL
) {
676 chosenCPU
= pmDispatch
->pmChooseCPU(startCPU
, endCPU
, preferredCPU
);
680 return(cpu_datap(chosenCPU
)->cpu_processor
);
687 pmThreadGetUrgency(uint64_t *rt_period
, uint64_t *rt_deadline
)
690 return(thread_get_urgency(rt_period
, rt_deadline
));
694 uint32_t urgency_stats
[64][THREAD_URGENCY_MAX
];
697 #define URGENCY_NOTIFICATION_ASSERT_NS (5 * 1000 * 1000)
698 uint64_t urgency_notification_assert_abstime_threshold
, urgency_notification_max_recorded
;
701 thread_tell_urgency(int urgency
,
703 uint64_t rt_deadline
)
705 uint64_t urgency_notification_time_start
, delta
;
706 boolean_t urgency_assert
= (urgency_notification_assert_abstime_threshold
!= 0);
707 assert(get_preemption_level() > 0 || ml_get_interrupts_enabled() == FALSE
);
709 urgency_stats
[cpu_number() % 64][urgency
]++;
712 || pmDispatch
== NULL
713 || pmDispatch
->pmThreadTellUrgency
== NULL
)
716 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_URGENCY
) | DBG_FUNC_START
, urgency
, rt_period
, (rt_deadline
>> 32), rt_deadline
, 0);
718 if (__improbable((urgency_assert
== TRUE
)))
719 urgency_notification_time_start
= mach_absolute_time();
721 pmDispatch
->pmThreadTellUrgency(urgency
, rt_period
, rt_deadline
);
723 if (__improbable((urgency_assert
== TRUE
))) {
724 delta
= mach_absolute_time() - urgency_notification_time_start
;
726 if (__improbable(delta
> urgency_notification_max_recorded
)) {
727 /* This is not synchronized, but it doesn't matter
728 * if we (rarely) miss an event, as it is statistically
729 * unlikely that it will never recur.
731 urgency_notification_max_recorded
= delta
;
733 if (__improbable((delta
> urgency_notification_assert_abstime_threshold
) && !machine_timeout_suspended()))
734 panic("Urgency notification callout %p exceeded threshold, 0x%llx abstime units", pmDispatch
->pmThreadTellUrgency
, delta
);
738 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_URGENCY
) | DBG_FUNC_END
, urgency
, rt_period
, (rt_deadline
>> 32), rt_deadline
, 0);
742 active_rt_threads(boolean_t active
)
745 || pmDispatch
== NULL
746 || pmDispatch
->pmActiveRTThreads
== NULL
)
749 pmDispatch
->pmActiveRTThreads(active
);
753 pmGetSavedRunCount(void)
755 return(saved_run_count
);
759 * Returns the root of the package tree.
768 pmCPUGetHibernate(int cpu
)
770 return(cpu_datap(cpu
)->cpu_hibernate
);
774 pmLCPUtoProcessor(int lcpu
)
776 return(cpu_datap(lcpu
)->cpu_processor
);
780 pmReSyncDeadlines(int cpu
)
782 static boolean_t registered
= FALSE
;
785 PM_interrupt_register(&etimer_resync_deadlines
);
789 if ((uint32_t)cpu
== current_cpu_datap()->lcpu
.cpu_num
)
790 etimer_resync_deadlines();
792 cpu_PM_interrupt(cpu
);
798 lapic_send_ipi(cpu
, LAPIC_PM_INTERRUPT
);
802 pmGetNanotimeInfo(pm_rtc_nanotime_t
*rtc_nanotime
)
805 * Make sure that nanotime didn't change while we were reading it.
808 rtc_nanotime
->generation
= pal_rtc_nanotime_info
.generation
; /* must be first */
809 rtc_nanotime
->tsc_base
= pal_rtc_nanotime_info
.tsc_base
;
810 rtc_nanotime
->ns_base
= pal_rtc_nanotime_info
.ns_base
;
811 rtc_nanotime
->scale
= pal_rtc_nanotime_info
.scale
;
812 rtc_nanotime
->shift
= pal_rtc_nanotime_info
.shift
;
813 } while(pal_rtc_nanotime_info
.generation
!= 0
814 && rtc_nanotime
->generation
!= pal_rtc_nanotime_info
.generation
);
818 pmTimerQueueMigrate(int target_cpu
)
820 /* Call the etimer code to do this. */
821 return (target_cpu
!= cpu_number())
822 ? etimer_queue_migrate(target_cpu
)
828 * Called by the power management kext to register itself and to get the
829 * callbacks it might need into other kernel functions. This interface
830 * is versioned to allow for slight mis-matches between the kext and the
834 pmKextRegister(uint32_t version
, pmDispatch_t
*cpuFuncs
,
835 pmCallBacks_t
*callbacks
)
837 if (callbacks
!= NULL
&& version
== PM_DISPATCH_VERSION
) {
838 callbacks
->setRTCPop
= setPop
;
839 callbacks
->resyncDeadlines
= pmReSyncDeadlines
;
840 callbacks
->initComplete
= pmInitComplete
;
841 callbacks
->GetLCPU
= pmGetLogicalCPU
;
842 callbacks
->GetCore
= pmGetCore
;
843 callbacks
->GetDie
= pmGetDie
;
844 callbacks
->GetPackage
= pmGetPackage
;
845 callbacks
->GetMyLCPU
= pmGetMyLogicalCPU
;
846 callbacks
->GetMyCore
= pmGetMyCore
;
847 callbacks
->GetMyDie
= pmGetMyDie
;
848 callbacks
->GetMyPackage
= pmGetMyPackage
;
849 callbacks
->GetPkgRoot
= pmGetPkgRoot
;
850 callbacks
->LockCPUTopology
= pmLockCPUTopology
;
851 callbacks
->GetHibernate
= pmCPUGetHibernate
;
852 callbacks
->LCPUtoProcessor
= pmLCPUtoProcessor
;
853 callbacks
->ThreadBind
= thread_bind
;
854 callbacks
->GetSavedRunCount
= pmGetSavedRunCount
;
855 callbacks
->GetNanotimeInfo
= pmGetNanotimeInfo
;
856 callbacks
->ThreadGetUrgency
= pmThreadGetUrgency
;
857 callbacks
->RTCClockAdjust
= rtc_clock_adjust
;
858 callbacks
->timerQueueMigrate
= pmTimerQueueMigrate
;
859 callbacks
->topoParms
= &topoParms
;
860 callbacks
->pmSendIPI
= pmSendIPI
;
861 callbacks
->InterruptPending
= lapic_is_interrupt_pending
;
862 callbacks
->IsInterrupting
= lapic_is_interrupting
;
863 callbacks
->InterruptStats
= lapic_interrupt_counts
;
864 callbacks
->DisableApicTimer
= lapic_disable_timer
;
866 panic("Version mis-match between Kernel and CPU PM");
869 if (cpuFuncs
!= NULL
) {
870 pmDispatch
= cpuFuncs
;
873 && pmDispatch
->pmCPUStateInit
!= NULL
) {
874 (*pmDispatch
->pmCPUStateInit
)();
875 earlyTopology
= FALSE
;
878 if (pmDispatch
->pmIPIHandler
!= NULL
) {
879 lapic_set_pm_func((i386_intr_func_t
)pmDispatch
->pmIPIHandler
);
885 * Unregisters the power management functions from the kext.
888 pmUnRegister(pmDispatch_t
*cpuFuncs
)
890 if (cpuFuncs
!= NULL
&& pmDispatch
== cpuFuncs
) {
895 /******************************************************************************
897 * All of the following are deprecated interfaces and no longer used.
899 ******************************************************************************/
901 pmsControl(__unused
uint32_t request
, __unused user_addr_t reqaddr
,
902 __unused
uint32_t reqsize
)
904 return(KERN_SUCCESS
);
923 pmsRun(__unused
uint32_t nstep
)
928 pmsBuild(__unused pmsDef
*pd
, __unused
uint32_t pdsize
,
929 __unused pmsSetFunc_t
*functab
,
930 __unused
uint32_t platformData
, __unused pmsQueryFunc_t queryFunc
)
932 return(KERN_SUCCESS
);
935 void machine_track_platform_idle(boolean_t entry
) {
936 cpu_data_t
*my_cpu
= current_cpu_datap();
939 (void)__sync_fetch_and_add(&my_cpu
->lcpu
.package
->num_idle
, 1);
942 (void)__sync_fetch_and_sub(&my_cpu
->lcpu
.package
->num_idle
, 1);