]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pmCPU.c
xnu-2050.24.15.tar.gz
[apple/xnu.git] / osfmk / i386 / pmCPU.c
1 /*
2 * Copyright (c) 2004-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * CPU-specific power management support.
31 *
32 * Implements the "wrappers" to the KEXT.
33 */
34 #include <i386/asm.h>
35 #include <i386/machine_cpu.h>
36 #include <i386/mp.h>
37 #include <i386/machine_routines.h>
38 #include <i386/proc_reg.h>
39 #include <i386/pmap.h>
40 #include <i386/misc_protos.h>
41 #include <kern/machine.h>
42 #include <kern/pms.h>
43 #include <kern/processor.h>
44 #include <kern/etimer.h>
45 #include <i386/cpu_threads.h>
46 #include <i386/pmCPU.h>
47 #include <i386/cpuid.h>
48 #include <i386/rtclock_protos.h>
49 #include <kern/sched_prim.h>
50 #include <i386/lapic.h>
51 #include <i386/pal_routines.h>
52
53 #include <sys/kdebug.h>
54
55 extern int disableConsoleOutput;
56
57 #define DELAY_UNSET 0xFFFFFFFFFFFFFFFFULL
58
59 /*
60 * The following is set when the KEXT loads and initializes.
61 */
62 pmDispatch_t *pmDispatch = NULL;
63
64 static uint32_t pmInitDone = 0;
65 static boolean_t earlyTopology = FALSE;
66 static uint64_t earlyMaxBusDelay = DELAY_UNSET;
67 static uint64_t earlyMaxIntDelay = DELAY_UNSET;
68
69 /*
70 * Initialize the Cstate change code.
71 */
72 void
73 power_management_init(void)
74 {
75 if (pmDispatch != NULL && pmDispatch->cstateInit != NULL)
76 (*pmDispatch->cstateInit)();
77 }
78
79 #define CPU_ACTIVE_STAT_BIN_1 (500000)
80 #define CPU_ACTIVE_STAT_BIN_2 (2000000)
81 #define CPU_ACTIVE_STAT_BIN_3 (5000000)
82
83 #define CPU_IDLE_STAT_BIN_1 (500000)
84 #define CPU_IDLE_STAT_BIN_2 (2000000)
85 #define CPU_IDLE_STAT_BIN_3 (5000000)
86
87 /*
88 * Called when the CPU is idle. It calls into the power management kext
89 * to determine the best way to idle the CPU.
90 */
91 void
92 machine_idle(void)
93 {
94 cpu_data_t *my_cpu = current_cpu_datap();
95 uint64_t ctime, rtime, itime;
96
97 if (my_cpu == NULL)
98 goto out;
99
100 ctime = mach_absolute_time();
101
102 my_cpu->lcpu.state = LCPU_IDLE;
103 DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
104 MARK_CPU_IDLE(cpu_number());
105
106 rtime = ctime - my_cpu->cpu_ixtime;
107
108 my_cpu->cpu_rtime_total += rtime;
109
110 if (rtime < CPU_ACTIVE_STAT_BIN_1)
111 my_cpu->cpu_rtimes[0]++;
112 else if (rtime < CPU_ACTIVE_STAT_BIN_2)
113 my_cpu->cpu_rtimes[1]++;
114 else if (rtime < CPU_ACTIVE_STAT_BIN_3)
115 my_cpu->cpu_rtimes[2]++;
116 else
117 my_cpu->cpu_rtimes[3]++;
118
119
120 if (pmInitDone) {
121 /*
122 * Handle case where ml_set_maxbusdelay() or ml_set_maxintdelay()
123 * were called prior to the CPU PM kext being registered. We do
124 * this here since we know at this point the values will be first
125 * used since idle is where the decisions using these values is made.
126 */
127 if (earlyMaxBusDelay != DELAY_UNSET)
128 ml_set_maxbusdelay((uint32_t)(earlyMaxBusDelay & 0xFFFFFFFF));
129
130 if (earlyMaxIntDelay != DELAY_UNSET)
131 ml_set_maxintdelay(earlyMaxIntDelay);
132 }
133
134 if (pmInitDone
135 && pmDispatch != NULL
136 && pmDispatch->MachineIdle != NULL)
137 (*pmDispatch->MachineIdle)(0x7FFFFFFFFFFFFFFFULL);
138 else {
139 /*
140 * If no power management, re-enable interrupts and halt.
141 * This will keep the CPU from spinning through the scheduler
142 * and will allow at least some minimal power savings (but it
143 * cause problems in some MP configurations w.r.t. the APIC
144 * stopping during a GV3 transition).
145 */
146 pal_hlt();
147
148 /* Once woken, re-disable interrupts. */
149 pal_cli();
150 }
151
152 /*
153 * Mark the CPU as running again.
154 */
155 MARK_CPU_ACTIVE(cpu_number());
156 DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
157
158 uint64_t ixtime = my_cpu->cpu_ixtime = mach_absolute_time();
159 itime = ixtime - ctime;
160
161 my_cpu->lcpu.state = LCPU_RUN;
162
163 if (itime < CPU_IDLE_STAT_BIN_1)
164 my_cpu->cpu_itimes[0]++;
165 else if (itime < CPU_IDLE_STAT_BIN_2)
166 my_cpu->cpu_itimes[1]++;
167 else if (itime < CPU_IDLE_STAT_BIN_3)
168 my_cpu->cpu_itimes[2]++;
169 else
170 my_cpu->cpu_itimes[3]++;
171
172 my_cpu->cpu_itime_total += itime;
173
174
175 /*
176 * Re-enable interrupts.
177 */
178 out:
179 pal_sti();
180 }
181
182 /*
183 * Called when the CPU is to be halted. It will choose the best C-State
184 * to be in.
185 */
186 void
187 pmCPUHalt(uint32_t reason)
188 {
189 cpu_data_t *cpup = current_cpu_datap();
190
191 switch (reason) {
192 case PM_HALT_DEBUG:
193 cpup->lcpu.state = LCPU_PAUSE;
194 pal_stop_cpu(FALSE);
195 break;
196
197 case PM_HALT_PANIC:
198 cpup->lcpu.state = LCPU_PAUSE;
199 pal_stop_cpu(TRUE);
200 break;
201
202 case PM_HALT_NORMAL:
203 default:
204 pal_cli();
205
206 if (pmInitDone
207 && pmDispatch != NULL
208 && pmDispatch->pmCPUHalt != NULL) {
209 /*
210 * Halt the CPU (and put it in a low power state.
211 */
212 (*pmDispatch->pmCPUHalt)();
213
214 /*
215 * We've exited halt, so get the the CPU schedulable again.
216 */
217 i386_init_slave_fast();
218
219 panic("init_slave_fast returned");
220 } else
221 {
222 /*
223 * If no power managment and a processor is taken off-line,
224 * then invalidate the cache and halt it (it will not be able
225 * to be brought back on-line without resetting the CPU).
226 */
227 __asm__ volatile ("wbinvd");
228 cpup->lcpu.state = LCPU_HALT;
229 pal_stop_cpu(FALSE);
230
231 panic("back from Halt");
232 }
233
234 break;
235 }
236 }
237
238 void
239 pmMarkAllCPUsOff(void)
240 {
241 if (pmInitDone
242 && pmDispatch != NULL
243 && pmDispatch->markAllCPUsOff != NULL)
244 (*pmDispatch->markAllCPUsOff)();
245 }
246
247 static void
248 pmInitComplete(void)
249 {
250 if (earlyTopology
251 && pmDispatch != NULL
252 && pmDispatch->pmCPUStateInit != NULL) {
253 (*pmDispatch->pmCPUStateInit)();
254 earlyTopology = FALSE;
255 }
256
257 pmInitDone = 1;
258 }
259
260 static x86_lcpu_t *
261 pmGetLogicalCPU(int cpu)
262 {
263 return(cpu_to_lcpu(cpu));
264 }
265
266 static x86_lcpu_t *
267 pmGetMyLogicalCPU(void)
268 {
269 cpu_data_t *cpup = current_cpu_datap();
270
271 return(&cpup->lcpu);
272 }
273
274 static x86_core_t *
275 pmGetCore(int cpu)
276 {
277 return(cpu_to_core(cpu));
278 }
279
280 static x86_core_t *
281 pmGetMyCore(void)
282 {
283 cpu_data_t *cpup = current_cpu_datap();
284
285 return(cpup->lcpu.core);
286 }
287
288 static x86_die_t *
289 pmGetDie(int cpu)
290 {
291 return(cpu_to_die(cpu));
292 }
293
294 static x86_die_t *
295 pmGetMyDie(void)
296 {
297 cpu_data_t *cpup = current_cpu_datap();
298
299 return(cpup->lcpu.die);
300 }
301
302 static x86_pkg_t *
303 pmGetPackage(int cpu)
304 {
305 return(cpu_to_package(cpu));
306 }
307
308 static x86_pkg_t *
309 pmGetMyPackage(void)
310 {
311 cpu_data_t *cpup = current_cpu_datap();
312
313 return(cpup->lcpu.package);
314 }
315
316 static void
317 pmLockCPUTopology(int lock)
318 {
319 if (lock) {
320 simple_lock(&x86_topo_lock);
321 } else {
322 simple_unlock(&x86_topo_lock);
323 }
324 }
325
326 /*
327 * Called to get the next deadline that has been set by the
328 * power management code.
329 * Note: a return of 0 from AICPM and this routine signifies
330 * that no deadline is set.
331 */
332 uint64_t
333 pmCPUGetDeadline(cpu_data_t *cpu)
334 {
335 uint64_t deadline = 0;
336
337 if (pmInitDone
338 && pmDispatch != NULL
339 && pmDispatch->GetDeadline != NULL)
340 deadline = (*pmDispatch->GetDeadline)(&cpu->lcpu);
341
342 return(deadline);
343 }
344
345 /*
346 * Called to determine if the supplied deadline or the power management
347 * deadline is sooner. Returns which ever one is first.
348 */
349 uint64_t
350 pmCPUSetDeadline(cpu_data_t *cpu, uint64_t deadline)
351 {
352 if (pmInitDone
353 && pmDispatch != NULL
354 && pmDispatch->SetDeadline != NULL)
355 deadline = (*pmDispatch->SetDeadline)(&cpu->lcpu, deadline);
356
357 return(deadline);
358 }
359
360 /*
361 * Called when a power management deadline expires.
362 */
363 void
364 pmCPUDeadline(cpu_data_t *cpu)
365 {
366 if (pmInitDone
367 && pmDispatch != NULL
368 && pmDispatch->Deadline != NULL)
369 (*pmDispatch->Deadline)(&cpu->lcpu);
370 }
371
372 /*
373 * Called to get a CPU out of idle.
374 */
375 boolean_t
376 pmCPUExitIdle(cpu_data_t *cpu)
377 {
378 boolean_t do_ipi;
379
380 if (pmInitDone
381 && pmDispatch != NULL
382 && pmDispatch->exitIdle != NULL)
383 do_ipi = (*pmDispatch->exitIdle)(&cpu->lcpu);
384 else
385 do_ipi = TRUE;
386
387 return(do_ipi);
388 }
389
390 kern_return_t
391 pmCPUExitHalt(int cpu)
392 {
393 kern_return_t rc = KERN_INVALID_ARGUMENT;
394
395 if (pmInitDone
396 && pmDispatch != NULL
397 && pmDispatch->exitHalt != NULL)
398 rc = pmDispatch->exitHalt(cpu_to_lcpu(cpu));
399
400 return(rc);
401 }
402
403 kern_return_t
404 pmCPUExitHaltToOff(int cpu)
405 {
406 kern_return_t rc = KERN_SUCCESS;
407
408 if (pmInitDone
409 && pmDispatch != NULL
410 && pmDispatch->exitHaltToOff != NULL)
411 rc = pmDispatch->exitHaltToOff(cpu_to_lcpu(cpu));
412
413 return(rc);
414 }
415
416 /*
417 * Called to initialize the power management structures for the CPUs.
418 */
419 void
420 pmCPUStateInit(void)
421 {
422 if (pmDispatch != NULL && pmDispatch->pmCPUStateInit != NULL)
423 (*pmDispatch->pmCPUStateInit)();
424 else
425 earlyTopology = TRUE;
426 }
427
428 /*
429 * Called when a CPU is being restarted after being powered off (as in S3).
430 */
431 void
432 pmCPUMarkRunning(cpu_data_t *cpu)
433 {
434 cpu_data_t *cpup = current_cpu_datap();
435
436 if (pmInitDone
437 && pmDispatch != NULL
438 && pmDispatch->markCPURunning != NULL)
439 (*pmDispatch->markCPURunning)(&cpu->lcpu);
440 else
441 cpup->lcpu.state = LCPU_RUN;
442 }
443
444 /*
445 * Called to get/set CPU power management state.
446 */
447 int
448 pmCPUControl(uint32_t cmd, void *datap)
449 {
450 int rc = -1;
451
452 if (pmDispatch != NULL
453 && pmDispatch->pmCPUControl != NULL)
454 rc = (*pmDispatch->pmCPUControl)(cmd, datap);
455
456 return(rc);
457 }
458
459 /*
460 * Called to save the timer state used by power management prior
461 * to "sleeping".
462 */
463 void
464 pmTimerSave(void)
465 {
466 if (pmDispatch != NULL
467 && pmDispatch->pmTimerStateSave != NULL)
468 (*pmDispatch->pmTimerStateSave)();
469 }
470
471 /*
472 * Called to restore the timer state used by power management after
473 * waking from "sleep".
474 */
475 void
476 pmTimerRestore(void)
477 {
478 if (pmDispatch != NULL
479 && pmDispatch->pmTimerStateRestore != NULL)
480 (*pmDispatch->pmTimerStateRestore)();
481 }
482
483 /*
484 * Set the worst-case time for the C4 to C2 transition.
485 * No longer does anything.
486 */
487 void
488 ml_set_maxsnoop(__unused uint32_t maxdelay)
489 {
490 }
491
492
493 /*
494 * Get the worst-case time for the C4 to C2 transition. Returns nanoseconds.
495 */
496 unsigned
497 ml_get_maxsnoop(void)
498 {
499 uint64_t max_snoop = 0;
500
501 if (pmInitDone
502 && pmDispatch != NULL
503 && pmDispatch->getMaxSnoop != NULL)
504 max_snoop = pmDispatch->getMaxSnoop();
505
506 return((unsigned)(max_snoop & 0xffffffff));
507 }
508
509
510 uint32_t
511 ml_get_maxbusdelay(void)
512 {
513 uint64_t max_delay = 0;
514
515 if (pmInitDone
516 && pmDispatch != NULL
517 && pmDispatch->getMaxBusDelay != NULL)
518 max_delay = pmDispatch->getMaxBusDelay();
519
520 return((uint32_t)(max_delay & 0xffffffff));
521 }
522
523 /*
524 * Set the maximum delay time allowed for snoop on the bus.
525 *
526 * Note that this value will be compared to the amount of time that it takes
527 * to transition from a non-snooping power state (C4) to a snooping state (C2).
528 * If maxBusDelay is less than C4C2SnoopDelay,
529 * we will not enter the lowest power state.
530 */
531 void
532 ml_set_maxbusdelay(uint32_t mdelay)
533 {
534 uint64_t maxdelay = mdelay;
535
536 if (pmDispatch != NULL
537 && pmDispatch->setMaxBusDelay != NULL) {
538 earlyMaxBusDelay = DELAY_UNSET;
539 pmDispatch->setMaxBusDelay(maxdelay);
540 } else
541 earlyMaxBusDelay = maxdelay;
542 }
543
544 uint64_t
545 ml_get_maxintdelay(void)
546 {
547 uint64_t max_delay = 0;
548
549 if (pmDispatch != NULL
550 && pmDispatch->getMaxIntDelay != NULL)
551 max_delay = pmDispatch->getMaxIntDelay();
552
553 return(max_delay);
554 }
555
556 /*
557 * Set the maximum delay allowed for an interrupt.
558 */
559 void
560 ml_set_maxintdelay(uint64_t mdelay)
561 {
562 if (pmDispatch != NULL
563 && pmDispatch->setMaxIntDelay != NULL) {
564 earlyMaxIntDelay = DELAY_UNSET;
565 pmDispatch->setMaxIntDelay(mdelay);
566 } else
567 earlyMaxIntDelay = mdelay;
568 }
569
570 boolean_t
571 ml_get_interrupt_prewake_applicable()
572 {
573 boolean_t applicable = FALSE;
574
575 if (pmInitDone
576 && pmDispatch != NULL
577 && pmDispatch->pmInterruptPrewakeApplicable != NULL)
578 applicable = pmDispatch->pmInterruptPrewakeApplicable();
579
580 return applicable;
581 }
582
583 /*
584 * Put a CPU into "safe" mode with respect to power.
585 *
586 * Some systems cannot operate at a continuous "normal" speed without
587 * exceeding the thermal design. This is called per-CPU to place the
588 * CPUs into a "safe" operating mode.
589 */
590 void
591 pmSafeMode(x86_lcpu_t *lcpu, uint32_t flags)
592 {
593 if (pmDispatch != NULL
594 && pmDispatch->pmCPUSafeMode != NULL)
595 pmDispatch->pmCPUSafeMode(lcpu, flags);
596 else {
597 /*
598 * Do something reasonable if the KEXT isn't present.
599 *
600 * We only look at the PAUSE and RESUME flags. The other flag(s)
601 * will not make any sense without the KEXT, so just ignore them.
602 *
603 * We set the CPU's state to indicate that it's halted. If this
604 * is the CPU we're currently running on, then spin until the
605 * state becomes non-halted.
606 */
607 if (flags & PM_SAFE_FL_PAUSE) {
608 lcpu->state = LCPU_PAUSE;
609 if (lcpu == x86_lcpu()) {
610 while (lcpu->state == LCPU_PAUSE)
611 cpu_pause();
612 }
613 }
614
615 /*
616 * Clear the halted flag for the specified CPU, that will
617 * get it out of it's spin loop.
618 */
619 if (flags & PM_SAFE_FL_RESUME) {
620 lcpu->state = LCPU_RUN;
621 }
622 }
623 }
624
625 static uint32_t saved_run_count = 0;
626
627 void
628 machine_run_count(uint32_t count)
629 {
630 if (pmDispatch != NULL
631 && pmDispatch->pmSetRunCount != NULL)
632 pmDispatch->pmSetRunCount(count);
633 else
634 saved_run_count = count;
635 }
636
637 boolean_t
638 machine_processor_is_inactive(processor_t processor)
639 {
640 int cpu = processor->cpu_id;
641
642 if (pmDispatch != NULL
643 && pmDispatch->pmIsCPUUnAvailable != NULL)
644 return(pmDispatch->pmIsCPUUnAvailable(cpu_to_lcpu(cpu)));
645 else
646 return(FALSE);
647 }
648
649 processor_t
650 machine_choose_processor(processor_set_t pset,
651 processor_t preferred)
652 {
653 int startCPU;
654 int endCPU;
655 int preferredCPU;
656 int chosenCPU;
657
658 if (!pmInitDone)
659 return(preferred);
660
661 if (pset == NULL) {
662 startCPU = -1;
663 endCPU = -1;
664 } else {
665 startCPU = pset->cpu_set_low;
666 endCPU = pset->cpu_set_hi;
667 }
668
669 if (preferred == NULL)
670 preferredCPU = -1;
671 else
672 preferredCPU = preferred->cpu_id;
673
674 if (pmDispatch != NULL
675 && pmDispatch->pmChooseCPU != NULL) {
676 chosenCPU = pmDispatch->pmChooseCPU(startCPU, endCPU, preferredCPU);
677
678 if (chosenCPU == -1)
679 return(NULL);
680 return(cpu_datap(chosenCPU)->cpu_processor);
681 }
682
683 return(preferred);
684 }
685
686 static int
687 pmThreadGetUrgency(uint64_t *rt_period, uint64_t *rt_deadline)
688 {
689
690 return(thread_get_urgency(rt_period, rt_deadline));
691 }
692
693 #if DEBUG
694 uint32_t urgency_stats[64][THREAD_URGENCY_MAX];
695 #endif
696
697 #define URGENCY_NOTIFICATION_ASSERT_NS (5 * 1000 * 1000)
698 uint64_t urgency_notification_assert_abstime_threshold, urgency_notification_max_recorded;
699
700 void
701 thread_tell_urgency(int urgency,
702 uint64_t rt_period,
703 uint64_t rt_deadline)
704 {
705 uint64_t urgency_notification_time_start, delta;
706 boolean_t urgency_assert = (urgency_notification_assert_abstime_threshold != 0);
707 assert(get_preemption_level() > 0 || ml_get_interrupts_enabled() == FALSE);
708 #if DEBUG
709 urgency_stats[cpu_number() % 64][urgency]++;
710 #endif
711 if (!pmInitDone
712 || pmDispatch == NULL
713 || pmDispatch->pmThreadTellUrgency == NULL)
714 return;
715
716 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_START, urgency, rt_period, (rt_deadline >> 32), rt_deadline, 0);
717
718 if (__improbable((urgency_assert == TRUE)))
719 urgency_notification_time_start = mach_absolute_time();
720
721 pmDispatch->pmThreadTellUrgency(urgency, rt_period, rt_deadline);
722
723 if (__improbable((urgency_assert == TRUE))) {
724 delta = mach_absolute_time() - urgency_notification_time_start;
725
726 if (__improbable(delta > urgency_notification_max_recorded)) {
727 /* This is not synchronized, but it doesn't matter
728 * if we (rarely) miss an event, as it is statistically
729 * unlikely that it will never recur.
730 */
731 urgency_notification_max_recorded = delta;
732
733 if (__improbable((delta > urgency_notification_assert_abstime_threshold) && !machine_timeout_suspended()))
734 panic("Urgency notification callout %p exceeded threshold, 0x%llx abstime units", pmDispatch->pmThreadTellUrgency, delta);
735 }
736 }
737
738 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_END, urgency, rt_period, (rt_deadline >> 32), rt_deadline, 0);
739 }
740
741 void
742 active_rt_threads(boolean_t active)
743 {
744 if (!pmInitDone
745 || pmDispatch == NULL
746 || pmDispatch->pmActiveRTThreads == NULL)
747 return;
748
749 pmDispatch->pmActiveRTThreads(active);
750 }
751
752 static uint32_t
753 pmGetSavedRunCount(void)
754 {
755 return(saved_run_count);
756 }
757
758 /*
759 * Returns the root of the package tree.
760 */
761 static x86_pkg_t *
762 pmGetPkgRoot(void)
763 {
764 return(x86_pkgs);
765 }
766
767 static boolean_t
768 pmCPUGetHibernate(int cpu)
769 {
770 return(cpu_datap(cpu)->cpu_hibernate);
771 }
772
773 static processor_t
774 pmLCPUtoProcessor(int lcpu)
775 {
776 return(cpu_datap(lcpu)->cpu_processor);
777 }
778
779 static void
780 pmReSyncDeadlines(int cpu)
781 {
782 static boolean_t registered = FALSE;
783
784 if (!registered) {
785 PM_interrupt_register(&etimer_resync_deadlines);
786 registered = TRUE;
787 }
788
789 if ((uint32_t)cpu == current_cpu_datap()->lcpu.cpu_num)
790 etimer_resync_deadlines();
791 else
792 cpu_PM_interrupt(cpu);
793 }
794
795 static void
796 pmSendIPI(int cpu)
797 {
798 lapic_send_ipi(cpu, LAPIC_PM_INTERRUPT);
799 }
800
801 static void
802 pmGetNanotimeInfo(pm_rtc_nanotime_t *rtc_nanotime)
803 {
804 /*
805 * Make sure that nanotime didn't change while we were reading it.
806 */
807 do {
808 rtc_nanotime->generation = pal_rtc_nanotime_info.generation; /* must be first */
809 rtc_nanotime->tsc_base = pal_rtc_nanotime_info.tsc_base;
810 rtc_nanotime->ns_base = pal_rtc_nanotime_info.ns_base;
811 rtc_nanotime->scale = pal_rtc_nanotime_info.scale;
812 rtc_nanotime->shift = pal_rtc_nanotime_info.shift;
813 } while(pal_rtc_nanotime_info.generation != 0
814 && rtc_nanotime->generation != pal_rtc_nanotime_info.generation);
815 }
816
817 static uint32_t
818 pmTimerQueueMigrate(int target_cpu)
819 {
820 /* Call the etimer code to do this. */
821 return (target_cpu != cpu_number())
822 ? etimer_queue_migrate(target_cpu)
823 : 0;
824 }
825
826
827 /*
828 * Called by the power management kext to register itself and to get the
829 * callbacks it might need into other kernel functions. This interface
830 * is versioned to allow for slight mis-matches between the kext and the
831 * kernel.
832 */
833 void
834 pmKextRegister(uint32_t version, pmDispatch_t *cpuFuncs,
835 pmCallBacks_t *callbacks)
836 {
837 if (callbacks != NULL && version == PM_DISPATCH_VERSION) {
838 callbacks->setRTCPop = setPop;
839 callbacks->resyncDeadlines = pmReSyncDeadlines;
840 callbacks->initComplete = pmInitComplete;
841 callbacks->GetLCPU = pmGetLogicalCPU;
842 callbacks->GetCore = pmGetCore;
843 callbacks->GetDie = pmGetDie;
844 callbacks->GetPackage = pmGetPackage;
845 callbacks->GetMyLCPU = pmGetMyLogicalCPU;
846 callbacks->GetMyCore = pmGetMyCore;
847 callbacks->GetMyDie = pmGetMyDie;
848 callbacks->GetMyPackage = pmGetMyPackage;
849 callbacks->GetPkgRoot = pmGetPkgRoot;
850 callbacks->LockCPUTopology = pmLockCPUTopology;
851 callbacks->GetHibernate = pmCPUGetHibernate;
852 callbacks->LCPUtoProcessor = pmLCPUtoProcessor;
853 callbacks->ThreadBind = thread_bind;
854 callbacks->GetSavedRunCount = pmGetSavedRunCount;
855 callbacks->GetNanotimeInfo = pmGetNanotimeInfo;
856 callbacks->ThreadGetUrgency = pmThreadGetUrgency;
857 callbacks->RTCClockAdjust = rtc_clock_adjust;
858 callbacks->timerQueueMigrate = pmTimerQueueMigrate;
859 callbacks->topoParms = &topoParms;
860 callbacks->pmSendIPI = pmSendIPI;
861 callbacks->InterruptPending = lapic_is_interrupt_pending;
862 callbacks->IsInterrupting = lapic_is_interrupting;
863 callbacks->InterruptStats = lapic_interrupt_counts;
864 callbacks->DisableApicTimer = lapic_disable_timer;
865 } else {
866 panic("Version mis-match between Kernel and CPU PM");
867 }
868
869 if (cpuFuncs != NULL) {
870 pmDispatch = cpuFuncs;
871
872 if (earlyTopology
873 && pmDispatch->pmCPUStateInit != NULL) {
874 (*pmDispatch->pmCPUStateInit)();
875 earlyTopology = FALSE;
876 }
877
878 if (pmDispatch->pmIPIHandler != NULL) {
879 lapic_set_pm_func((i386_intr_func_t)pmDispatch->pmIPIHandler);
880 }
881 }
882 }
883
884 /*
885 * Unregisters the power management functions from the kext.
886 */
887 void
888 pmUnRegister(pmDispatch_t *cpuFuncs)
889 {
890 if (cpuFuncs != NULL && pmDispatch == cpuFuncs) {
891 pmDispatch = NULL;
892 }
893 }
894
895 /******************************************************************************
896 *
897 * All of the following are deprecated interfaces and no longer used.
898 *
899 ******************************************************************************/
900 kern_return_t
901 pmsControl(__unused uint32_t request, __unused user_addr_t reqaddr,
902 __unused uint32_t reqsize)
903 {
904 return(KERN_SUCCESS);
905 }
906
907 void
908 pmsInit(void)
909 {
910 }
911
912 void
913 pmsStart(void)
914 {
915 }
916
917 void
918 pmsPark(void)
919 {
920 }
921
922 void
923 pmsRun(__unused uint32_t nstep)
924 {
925 }
926
927 kern_return_t
928 pmsBuild(__unused pmsDef *pd, __unused uint32_t pdsize,
929 __unused pmsSetFunc_t *functab,
930 __unused uint32_t platformData, __unused pmsQueryFunc_t queryFunc)
931 {
932 return(KERN_SUCCESS);
933 }
934
935 void machine_track_platform_idle(boolean_t entry) {
936 cpu_data_t *my_cpu = current_cpu_datap();
937
938 if (entry) {
939 (void)__sync_fetch_and_add(&my_cpu->lcpu.package->num_idle, 1);
940 }
941 else {
942 (void)__sync_fetch_and_sub(&my_cpu->lcpu.package->num_idle, 1);
943 }
944 }