]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pmCPU.c
xnu-1504.9.26.tar.gz
[apple/xnu.git] / osfmk / i386 / pmCPU.c
1 /*
2 * Copyright (c) 2004-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * CPU-specific power management support.
31 *
32 * Implements the "wrappers" to the KEXT.
33 */
34 #include <i386/asm.h>
35 #include <i386/machine_cpu.h>
36 #include <i386/mp.h>
37 #include <i386/machine_routines.h>
38 #include <i386/proc_reg.h>
39 #include <i386/pmap.h>
40 #include <i386/misc_protos.h>
41 #include <kern/machine.h>
42 #include <kern/pms.h>
43 #include <kern/processor.h>
44 #include <kern/etimer.h>
45 #include <i386/cpu_threads.h>
46 #include <i386/pmCPU.h>
47 #include <i386/cpuid.h>
48 #include <i386/rtclock.h>
49 #include <kern/sched_prim.h>
50 #include <i386/lapic.h>
51
52 extern int disableConsoleOutput;
53
54 decl_simple_lock_data(,pm_init_lock);
55
56 /*
57 * The following is set when the KEXT loads and initializes.
58 */
59 pmDispatch_t *pmDispatch = NULL;
60
61 static uint32_t pmInitDone = 0;
62
63
64 /*
65 * Initialize the Cstate change code.
66 */
67 void
68 power_management_init(void)
69 {
70 static boolean_t initialized = FALSE;
71
72 /*
73 * Initialize the lock for the KEXT initialization.
74 */
75 if (!initialized) {
76 simple_lock_init(&pm_init_lock, 0);
77 initialized = TRUE;
78 }
79
80 if (pmDispatch != NULL && pmDispatch->cstateInit != NULL)
81 (*pmDispatch->cstateInit)();
82 }
83
84 /*
85 * Called when the CPU is idle. It calls into the power management kext
86 * to determine the best way to idle the CPU.
87 */
88 void
89 machine_idle(void)
90 {
91 cpu_data_t *my_cpu = current_cpu_datap();
92
93 if (my_cpu == NULL)
94 goto out;
95
96 my_cpu->lcpu.state = LCPU_IDLE;
97 DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
98 MARK_CPU_IDLE(cpu_number());
99
100 if (pmInitDone
101 && pmDispatch != NULL
102 && pmDispatch->MachineIdle != NULL)
103 (*pmDispatch->MachineIdle)(0x7FFFFFFFFFFFFFFFULL);
104 else {
105 /*
106 * If no power management, re-enable interrupts and halt.
107 * This will keep the CPU from spinning through the scheduler
108 * and will allow at least some minimal power savings (but it
109 * cause problems in some MP configurations w.r.t. the APIC
110 * stopping during a GV3 transition).
111 */
112 __asm__ volatile ("sti; hlt");
113 }
114
115 /*
116 * Mark the CPU as running again.
117 */
118 MARK_CPU_ACTIVE(cpu_number());
119 DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
120 my_cpu->lcpu.state = LCPU_RUN;
121
122 /*
123 * Re-enable interrupts.
124 */
125 out:
126 __asm__ volatile("sti");
127 }
128
129 /*
130 * Called when the CPU is to be halted. It will choose the best C-State
131 * to be in.
132 */
133 void
134 pmCPUHalt(uint32_t reason)
135 {
136 cpu_data_t *cpup = current_cpu_datap();
137
138 switch (reason) {
139 case PM_HALT_DEBUG:
140 cpup->lcpu.state = LCPU_PAUSE;
141 __asm__ volatile ("wbinvd; hlt");
142 break;
143
144 case PM_HALT_PANIC:
145 cpup->lcpu.state = LCPU_PAUSE;
146 __asm__ volatile ("cli; wbinvd; hlt");
147 break;
148
149 case PM_HALT_NORMAL:
150 default:
151 __asm__ volatile ("cli");
152
153 if (pmInitDone
154 && pmDispatch != NULL
155 && pmDispatch->pmCPUHalt != NULL) {
156 /*
157 * Halt the CPU (and put it in a low power state.
158 */
159 (*pmDispatch->pmCPUHalt)();
160
161 /*
162 * We've exited halt, so get the the CPU schedulable again.
163 */
164 i386_init_slave_fast();
165
166 panic("init_slave_fast returned");
167 } else {
168 /*
169 * If no power managment and a processor is taken off-line,
170 * then invalidate the cache and halt it (it will not be able
171 * to be brought back on-line without resetting the CPU).
172 */
173 __asm__ volatile ("wbinvd");
174 cpup->lcpu.state = LCPU_HALT;
175 __asm__ volatile ( "wbinvd; hlt" );
176
177 panic("back from Halt");
178 }
179 break;
180 }
181 }
182
183 void
184 pmMarkAllCPUsOff(void)
185 {
186 if (pmInitDone
187 && pmDispatch != NULL
188 && pmDispatch->markAllCPUsOff != NULL)
189 (*pmDispatch->markAllCPUsOff)();
190 }
191
192 static void
193 pmInitComplete(void)
194 {
195 pmInitDone = 1;
196 }
197
198 static x86_lcpu_t *
199 pmGetLogicalCPU(int cpu)
200 {
201 return(cpu_to_lcpu(cpu));
202 }
203
204 static x86_lcpu_t *
205 pmGetMyLogicalCPU(void)
206 {
207 cpu_data_t *cpup = current_cpu_datap();
208
209 return(&cpup->lcpu);
210 }
211
212 static x86_core_t *
213 pmGetCore(int cpu)
214 {
215 return(cpu_to_core(cpu));
216 }
217
218 static x86_core_t *
219 pmGetMyCore(void)
220 {
221 cpu_data_t *cpup = current_cpu_datap();
222
223 return(cpup->lcpu.core);
224 }
225
226 static x86_die_t *
227 pmGetDie(int cpu)
228 {
229 return(cpu_to_die(cpu));
230 }
231
232 static x86_die_t *
233 pmGetMyDie(void)
234 {
235 cpu_data_t *cpup = current_cpu_datap();
236
237 return(cpup->lcpu.die);
238 }
239
240 static x86_pkg_t *
241 pmGetPackage(int cpu)
242 {
243 return(cpu_to_package(cpu));
244 }
245
246 static x86_pkg_t *
247 pmGetMyPackage(void)
248 {
249 cpu_data_t *cpup = current_cpu_datap();
250
251 return(cpup->lcpu.package);
252 }
253
254 static void
255 pmLockCPUTopology(int lock)
256 {
257 if (lock) {
258 simple_lock(&x86_topo_lock);
259 } else {
260 simple_unlock(&x86_topo_lock);
261 }
262 }
263
264 /*
265 * Called to get the next deadline that has been set by the
266 * power management code.
267 */
268 uint64_t
269 pmCPUGetDeadline(cpu_data_t *cpu)
270 {
271 uint64_t deadline = EndOfAllTime;
272
273 if (pmInitDone
274 && pmDispatch != NULL
275 && pmDispatch->GetDeadline != NULL)
276 deadline = (*pmDispatch->GetDeadline)(&cpu->lcpu);
277
278 return(deadline);
279 }
280
281 /*
282 * Called to determine if the supplied deadline or the power management
283 * deadline is sooner. Returns which ever one is first.
284 */
285 uint64_t
286 pmCPUSetDeadline(cpu_data_t *cpu, uint64_t deadline)
287 {
288 if (pmInitDone
289 && pmDispatch != NULL
290 && pmDispatch->SetDeadline != NULL)
291 deadline = (*pmDispatch->SetDeadline)(&cpu->lcpu, deadline);
292
293 return(deadline);
294 }
295
296 /*
297 * Called when a power management deadline expires.
298 */
299 void
300 pmCPUDeadline(cpu_data_t *cpu)
301 {
302 if (pmInitDone
303 && pmDispatch != NULL
304 && pmDispatch->Deadline != NULL)
305 (*pmDispatch->Deadline)(&cpu->lcpu);
306 }
307
308 /*
309 * Called to get a CPU out of idle.
310 */
311 boolean_t
312 pmCPUExitIdle(cpu_data_t *cpu)
313 {
314 boolean_t do_ipi;
315
316 if (pmInitDone
317 && pmDispatch != NULL
318 && pmDispatch->exitIdle != NULL)
319 do_ipi = (*pmDispatch->exitIdle)(&cpu->lcpu);
320 else
321 do_ipi = TRUE;
322
323 return(do_ipi);
324 }
325
326 kern_return_t
327 pmCPUExitHalt(int cpu)
328 {
329 kern_return_t rc = KERN_INVALID_ARGUMENT;
330
331 if (pmInitDone
332 && pmDispatch != NULL
333 && pmDispatch->exitHalt != NULL)
334 rc = pmDispatch->exitHalt(cpu_to_lcpu(cpu));
335
336 return(rc);
337 }
338
339 kern_return_t
340 pmCPUExitHaltToOff(int cpu)
341 {
342 kern_return_t rc = KERN_INVALID_ARGUMENT;
343
344 if (pmInitDone
345 && pmDispatch != NULL
346 && pmDispatch->exitHaltToOff != NULL)
347 rc = pmDispatch->exitHaltToOff(cpu_to_lcpu(cpu));
348
349 return(rc);
350 }
351
352 /*
353 * Called to initialize the power management structures for the CPUs.
354 */
355 void
356 pmCPUStateInit(void)
357 {
358 if (pmDispatch != NULL && pmDispatch->pmCPUStateInit != NULL)
359 (*pmDispatch->pmCPUStateInit)();
360 }
361
362 /*
363 * Called when a CPU is being restarted after being powered off (as in S3).
364 */
365 void
366 pmCPUMarkRunning(cpu_data_t *cpu)
367 {
368 cpu_data_t *cpup = current_cpu_datap();
369
370 if (pmInitDone
371 && pmDispatch != NULL
372 && pmDispatch->markCPURunning != NULL)
373 (*pmDispatch->markCPURunning)(&cpu->lcpu);
374 else
375 cpup->lcpu.state = LCPU_RUN;
376 }
377
378 /*
379 * Called to get/set CPU power management state.
380 */
381 int
382 pmCPUControl(uint32_t cmd, void *datap)
383 {
384 int rc = -1;
385
386 if (pmDispatch != NULL
387 && pmDispatch->pmCPUControl != NULL)
388 rc = (*pmDispatch->pmCPUControl)(cmd, datap);
389
390 return(rc);
391 }
392
393 /*
394 * Called to save the timer state used by power management prior
395 * to "sleeping".
396 */
397 void
398 pmTimerSave(void)
399 {
400 if (pmDispatch != NULL
401 && pmDispatch->pmTimerStateSave != NULL)
402 (*pmDispatch->pmTimerStateSave)();
403 }
404
405 /*
406 * Called to restore the timer state used by power management after
407 * waking from "sleep".
408 */
409 void
410 pmTimerRestore(void)
411 {
412 if (pmDispatch != NULL
413 && pmDispatch->pmTimerStateRestore != NULL)
414 (*pmDispatch->pmTimerStateRestore)();
415 }
416
417 /*
418 * Set the worst-case time for the C4 to C2 transition.
419 * No longer does anything.
420 */
421 void
422 ml_set_maxsnoop(__unused uint32_t maxdelay)
423 {
424 }
425
426
427 /*
428 * Get the worst-case time for the C4 to C2 transition. Returns nanoseconds.
429 */
430 unsigned
431 ml_get_maxsnoop(void)
432 {
433 uint64_t max_snoop = 0;
434
435 if (pmDispatch != NULL
436 && pmDispatch->getMaxSnoop != NULL)
437 max_snoop = pmDispatch->getMaxSnoop();
438
439 return((unsigned)(max_snoop & 0xffffffff));
440 }
441
442
443 uint32_t
444 ml_get_maxbusdelay(void)
445 {
446 uint64_t max_delay = 0;
447
448 if (pmDispatch != NULL
449 && pmDispatch->getMaxBusDelay != NULL)
450 max_delay = pmDispatch->getMaxBusDelay();
451
452 return((uint32_t)(max_delay & 0xffffffff));
453 }
454
455 /*
456 * Set the maximum delay time allowed for snoop on the bus.
457 *
458 * Note that this value will be compared to the amount of time that it takes
459 * to transition from a non-snooping power state (C4) to a snooping state (C2).
460 * If maxBusDelay is less than C4C2SnoopDelay,
461 * we will not enter the lowest power state.
462 */
463 void
464 ml_set_maxbusdelay(uint32_t mdelay)
465 {
466 uint64_t maxdelay = mdelay;
467
468 if (pmDispatch != NULL
469 && pmDispatch->setMaxBusDelay != NULL)
470 pmDispatch->setMaxBusDelay(maxdelay);
471 }
472
473 uint64_t
474 ml_get_maxintdelay(void)
475 {
476 uint64_t max_delay = 0;
477
478 if (pmDispatch != NULL
479 && pmDispatch->getMaxIntDelay != NULL)
480 max_delay = pmDispatch->getMaxIntDelay();
481
482 return(max_delay);
483 }
484
485 /*
486 * Set the maximum delay allowed for an interrupt.
487 */
488 void
489 ml_set_maxintdelay(uint64_t mdelay)
490 {
491 if (pmDispatch != NULL
492 && pmDispatch->setMaxIntDelay != NULL)
493 pmDispatch->setMaxIntDelay(mdelay);
494 }
495
496 /*
497 * Put a CPU into "safe" mode with respect to power.
498 *
499 * Some systems cannot operate at a continuous "normal" speed without
500 * exceeding the thermal design. This is called per-CPU to place the
501 * CPUs into a "safe" operating mode.
502 */
503 void
504 pmSafeMode(x86_lcpu_t *lcpu, uint32_t flags)
505 {
506 if (pmDispatch != NULL
507 && pmDispatch->pmCPUSafeMode != NULL)
508 pmDispatch->pmCPUSafeMode(lcpu, flags);
509 else {
510 /*
511 * Do something reasonable if the KEXT isn't present.
512 *
513 * We only look at the PAUSE and RESUME flags. The other flag(s)
514 * will not make any sense without the KEXT, so just ignore them.
515 *
516 * We set the CPU's state to indicate that it's halted. If this
517 * is the CPU we're currently running on, then spin until the
518 * state becomes non-halted.
519 */
520 if (flags & PM_SAFE_FL_PAUSE) {
521 lcpu->state = LCPU_PAUSE;
522 if (lcpu == x86_lcpu()) {
523 while (lcpu->state == LCPU_PAUSE)
524 cpu_pause();
525 }
526 }
527
528 /*
529 * Clear the halted flag for the specified CPU, that will
530 * get it out of it's spin loop.
531 */
532 if (flags & PM_SAFE_FL_RESUME) {
533 lcpu->state = LCPU_RUN;
534 }
535 }
536 }
537
538 static uint32_t saved_run_count = 0;
539
540 void
541 machine_run_count(uint32_t count)
542 {
543 if (pmDispatch != NULL
544 && pmDispatch->pmSetRunCount != NULL)
545 pmDispatch->pmSetRunCount(count);
546 else
547 saved_run_count = count;
548 }
549
550 boolean_t
551 machine_processor_is_inactive(processor_t processor)
552 {
553 int cpu = processor->cpu_id;
554
555 if (pmDispatch != NULL
556 && pmDispatch->pmIsCPUUnAvailable != NULL)
557 return(pmDispatch->pmIsCPUUnAvailable(cpu_to_lcpu(cpu)));
558 else
559 return(FALSE);
560 }
561
562 processor_t
563 machine_choose_processor(processor_set_t pset,
564 processor_t preferred)
565 {
566 int startCPU;
567 int endCPU;
568 int preferredCPU;
569 int chosenCPU;
570
571 if (!pmInitDone)
572 return(preferred);
573
574 if (pset == NULL) {
575 startCPU = -1;
576 endCPU = -1;
577 } else {
578 startCPU = pset->cpu_set_low;
579 endCPU = pset->cpu_set_hi;
580 }
581
582 if (preferred == NULL)
583 preferredCPU = -1;
584 else
585 preferredCPU = preferred->cpu_id;
586
587 if (pmDispatch != NULL
588 && pmDispatch->pmChooseCPU != NULL) {
589 chosenCPU = pmDispatch->pmChooseCPU(startCPU, endCPU, preferredCPU);
590
591 if (chosenCPU == -1)
592 return(NULL);
593 return(cpu_datap(chosenCPU)->cpu_processor);
594 }
595
596 return(preferred);
597 }
598
599 static uint32_t
600 pmGetSavedRunCount(void)
601 {
602 return(saved_run_count);
603 }
604
605 /*
606 * Returns the root of the package tree.
607 */
608 static x86_pkg_t *
609 pmGetPkgRoot(void)
610 {
611 return(x86_pkgs);
612 }
613
614 static boolean_t
615 pmCPUGetHibernate(int cpu)
616 {
617 return(cpu_datap(cpu)->cpu_hibernate);
618 }
619
620 static processor_t
621 pmLCPUtoProcessor(int lcpu)
622 {
623 return(cpu_datap(lcpu)->cpu_processor);
624 }
625
626 static void
627 pmReSyncDeadlines(int cpu)
628 {
629 static boolean_t registered = FALSE;
630
631 if (!registered) {
632 PM_interrupt_register(&etimer_resync_deadlines);
633 registered = TRUE;
634 }
635
636 if ((uint32_t)cpu == current_cpu_datap()->lcpu.cpu_num)
637 etimer_resync_deadlines();
638 else
639 cpu_PM_interrupt(cpu);
640 }
641
642 static void
643 pmSendIPI(int cpu)
644 {
645 lapic_send_ipi(cpu, LAPIC_PM_INTERRUPT);
646 }
647
648 static rtc_nanotime_t *
649 pmGetNanotimeInfo(void)
650 {
651 return(&rtc_nanotime_info);
652 }
653
654 /*
655 * Called by the power management kext to register itself and to get the
656 * callbacks it might need into other kernel functions. This interface
657 * is versioned to allow for slight mis-matches between the kext and the
658 * kernel.
659 */
660 void
661 pmKextRegister(uint32_t version, pmDispatch_t *cpuFuncs,
662 pmCallBacks_t *callbacks)
663 {
664 if (callbacks != NULL && version == PM_DISPATCH_VERSION) {
665 callbacks->setRTCPop = setPop;
666 callbacks->resyncDeadlines = pmReSyncDeadlines;
667 callbacks->initComplete = pmInitComplete;
668 callbacks->GetLCPU = pmGetLogicalCPU;
669 callbacks->GetCore = pmGetCore;
670 callbacks->GetDie = pmGetDie;
671 callbacks->GetPackage = pmGetPackage;
672 callbacks->GetMyLCPU = pmGetMyLogicalCPU;
673 callbacks->GetMyCore = pmGetMyCore;
674 callbacks->GetMyDie = pmGetMyDie;
675 callbacks->GetMyPackage = pmGetMyPackage;
676 callbacks->GetPkgRoot = pmGetPkgRoot;
677 callbacks->LockCPUTopology = pmLockCPUTopology;
678 callbacks->GetHibernate = pmCPUGetHibernate;
679 callbacks->LCPUtoProcessor = pmLCPUtoProcessor;
680 callbacks->ThreadBind = thread_bind;
681 callbacks->GetSavedRunCount = pmGetSavedRunCount;
682 callbacks->pmSendIPI = pmSendIPI;
683 callbacks->GetNanotimeInfo = pmGetNanotimeInfo;
684 callbacks->RTCClockAdjust = rtc_clock_adjust;
685 callbacks->topoParms = &topoParms;
686 } else {
687 panic("Version mis-match between Kernel and CPU PM");
688 }
689
690 if (cpuFuncs != NULL) {
691 pmDispatch = cpuFuncs;
692
693 if (pmDispatch->pmIPIHandler != NULL) {
694 lapic_set_pm_func((i386_intr_func_t)pmDispatch->pmIPIHandler);
695 }
696 }
697 }
698
699 /*
700 * Unregisters the power management functions from the kext.
701 */
702 void
703 pmUnRegister(pmDispatch_t *cpuFuncs)
704 {
705 if (cpuFuncs != NULL && pmDispatch == cpuFuncs) {
706 pmDispatch = NULL;
707 }
708 }
709
710 /******************************************************************************
711 *
712 * All of the following are deprecated interfaces and no longer used.
713 *
714 ******************************************************************************/
715 kern_return_t
716 pmsControl(__unused uint32_t request, __unused user_addr_t reqaddr,
717 __unused uint32_t reqsize)
718 {
719 return(KERN_SUCCESS);
720 }
721
722 void
723 pmsInit(void)
724 {
725 }
726
727 void
728 pmsStart(void)
729 {
730 }
731
732 void
733 pmsPark(void)
734 {
735 }
736
737 void
738 pmsRun(__unused uint32_t nstep)
739 {
740 }
741
742 kern_return_t
743 pmsBuild(__unused pmsDef *pd, __unused uint32_t pdsize,
744 __unused pmsSetFunc_t *functab,
745 __unused uint32_t platformData, __unused pmsQueryFunc_t queryFunc)
746 {
747 return(KERN_SUCCESS);
748 }