]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pmCPU.c
xnu-1456.1.26.tar.gz
[apple/xnu.git] / osfmk / i386 / pmCPU.c
1 /*
2 * Copyright (c) 2004-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * CPU-specific power management support.
31 *
32 * Implements the "wrappers" to the KEXT.
33 */
34 #include <i386/asm.h>
35 #include <i386/machine_cpu.h>
36 #include <i386/mp.h>
37 #include <i386/machine_routines.h>
38 #include <i386/proc_reg.h>
39 #include <i386/pmap.h>
40 #include <i386/misc_protos.h>
41 #include <kern/machine.h>
42 #include <kern/pms.h>
43 #include <kern/processor.h>
44 #include <i386/cpu_threads.h>
45 #include <i386/pmCPU.h>
46 #include <i386/cpuid.h>
47 #include <i386/rtclock.h>
48 #include <kern/sched_prim.h>
49 #include <i386/lapic.h>
50
51 /*
52 * Kernel parameter determining whether threads are halted unconditionally
53 * in the idle state. This is the default behavior.
54 * See machine_idle() for use.
55 */
56 int idlehalt = 1;
57
58 extern int disableConsoleOutput;
59
60 decl_simple_lock_data(,pm_init_lock);
61
62 /*
63 * The following is set when the KEXT loads and initializes.
64 */
65 pmDispatch_t *pmDispatch = NULL;
66
67 static uint32_t pmInitDone = 0;
68
69
70 /*
71 * Initialize the Cstate change code.
72 */
73 void
74 power_management_init(void)
75 {
76 static boolean_t initialized = FALSE;
77
78 /*
79 * Initialize the lock for the KEXT initialization.
80 */
81 if (!initialized) {
82 simple_lock_init(&pm_init_lock, 0);
83 initialized = TRUE;
84 }
85
86 if (pmDispatch != NULL && pmDispatch->cstateInit != NULL)
87 (*pmDispatch->cstateInit)();
88 }
89
90 /*
91 * Called when the CPU is idle. It calls into the power management kext
92 * to determine the best way to idle the CPU.
93 */
94 void
95 machine_idle(void)
96 {
97 cpu_data_t *my_cpu = current_cpu_datap();
98
99 if (my_cpu == NULL)
100 goto out;
101
102 /*
103 * If idlehalt isn't set, then don't do any power management related
104 * idle handling.
105 */
106 if (!idlehalt)
107 goto out;
108
109 my_cpu->lcpu.state = LCPU_IDLE;
110 DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
111 MARK_CPU_IDLE(cpu_number());
112
113 if (pmInitDone
114 && pmDispatch != NULL
115 && pmDispatch->cstateMachineIdle != NULL)
116 (*pmDispatch->cstateMachineIdle)(0x7FFFFFFFFFFFFFFFULL);
117 else {
118 /*
119 * If no power management, re-enable interrupts and halt.
120 * This will keep the CPU from spinning through the scheduler
121 * and will allow at least some minimal power savings (but it
122 * cause problems in some MP configurations w.r.t. the APIC
123 * stopping during a GV3 transition).
124 */
125 __asm__ volatile ("sti; hlt");
126 }
127
128 /*
129 * Mark the CPU as running again.
130 */
131 MARK_CPU_ACTIVE(cpu_number());
132 DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
133 my_cpu->lcpu.state = LCPU_RUN;
134
135 /*
136 * Re-enable interrupts.
137 */
138 out:
139 __asm__ volatile("sti");
140 }
141
142 /*
143 * Called when the CPU is to be halted. It will choose the best C-State
144 * to be in.
145 */
146 void
147 pmCPUHalt(uint32_t reason)
148 {
149 cpu_data_t *cpup = current_cpu_datap();
150
151 switch (reason) {
152 case PM_HALT_DEBUG:
153 cpup->lcpu.state = LCPU_PAUSE;
154 __asm__ volatile ("wbinvd; hlt");
155 break;
156
157 case PM_HALT_PANIC:
158 cpup->lcpu.state = LCPU_PAUSE;
159 __asm__ volatile ("cli; wbinvd; hlt");
160 break;
161
162 case PM_HALT_NORMAL:
163 default:
164 __asm__ volatile ("cli");
165
166 if (pmInitDone
167 && pmDispatch != NULL
168 && pmDispatch->pmCPUHalt != NULL) {
169 /*
170 * Halt the CPU (and put it in a low power state.
171 */
172 (*pmDispatch->pmCPUHalt)();
173
174 /*
175 * We've exited halt, so get the the CPU schedulable again.
176 */
177 i386_init_slave_fast();
178
179 panic("init_slave_fast returned");
180 } else {
181 /*
182 * If no power managment and a processor is taken off-line,
183 * then invalidate the cache and halt it (it will not be able
184 * to be brought back on-line without resetting the CPU).
185 */
186 __asm__ volatile ("wbinvd");
187 cpup->lcpu.state = LCPU_HALT;
188 __asm__ volatile ( "wbinvd; hlt" );
189
190 panic("back from Halt");
191 }
192 break;
193 }
194 }
195
196 void
197 pmMarkAllCPUsOff(void)
198 {
199 if (pmInitDone
200 && pmDispatch != NULL
201 && pmDispatch->markAllCPUsOff != NULL)
202 (*pmDispatch->markAllCPUsOff)();
203 }
204
205 static void
206 pmInitComplete(void)
207 {
208 pmInitDone = 1;
209 }
210
211 static x86_lcpu_t *
212 pmGetLogicalCPU(int cpu)
213 {
214 return(cpu_to_lcpu(cpu));
215 }
216
217 static x86_lcpu_t *
218 pmGetMyLogicalCPU(void)
219 {
220 cpu_data_t *cpup = current_cpu_datap();
221
222 return(&cpup->lcpu);
223 }
224
225 static x86_core_t *
226 pmGetCore(int cpu)
227 {
228 return(cpu_to_core(cpu));
229 }
230
231 static x86_core_t *
232 pmGetMyCore(void)
233 {
234 cpu_data_t *cpup = current_cpu_datap();
235
236 return(cpup->lcpu.core);
237 }
238
239 static x86_die_t *
240 pmGetDie(int cpu)
241 {
242 return(cpu_to_die(cpu));
243 }
244
245 static x86_die_t *
246 pmGetMyDie(void)
247 {
248 cpu_data_t *cpup = current_cpu_datap();
249
250 return(cpup->lcpu.die);
251 }
252
253 static x86_pkg_t *
254 pmGetPackage(int cpu)
255 {
256 return(cpu_to_package(cpu));
257 }
258
259 static x86_pkg_t *
260 pmGetMyPackage(void)
261 {
262 cpu_data_t *cpup = current_cpu_datap();
263
264 return(cpup->lcpu.package);
265 }
266
267 static void
268 pmLockCPUTopology(int lock)
269 {
270 if (lock) {
271 simple_lock(&x86_topo_lock);
272 } else {
273 simple_unlock(&x86_topo_lock);
274 }
275 }
276
277 /*
278 * Called to get the next deadline that has been set by the
279 * power management code.
280 */
281 uint64_t
282 pmCPUGetDeadline(cpu_data_t *cpu)
283 {
284 uint64_t deadline = EndOfAllTime;
285
286 if (pmInitDone
287 && pmDispatch != NULL
288 && pmDispatch->GetDeadline != NULL)
289 deadline = (*pmDispatch->GetDeadline)(&cpu->lcpu);
290
291 return(deadline);
292 }
293
294 /*
295 * Called to determine if the supplied deadline or the power management
296 * deadline is sooner. Returns which ever one is first.
297 */
298 uint64_t
299 pmCPUSetDeadline(cpu_data_t *cpu, uint64_t deadline)
300 {
301 if (pmInitDone
302 && pmDispatch != NULL
303 && pmDispatch->SetDeadline != NULL)
304 deadline = (*pmDispatch->SetDeadline)(&cpu->lcpu, deadline);
305
306 return(deadline);
307 }
308
309 /*
310 * Called when a power management deadline expires.
311 */
312 void
313 pmCPUDeadline(cpu_data_t *cpu)
314 {
315 if (pmInitDone
316 && pmDispatch != NULL
317 && pmDispatch->Deadline != NULL)
318 (*pmDispatch->Deadline)(&cpu->lcpu);
319 }
320
321 /*
322 * Called to get a CPU out of idle.
323 */
324 boolean_t
325 pmCPUExitIdle(cpu_data_t *cpu)
326 {
327 boolean_t do_ipi;
328
329 if (pmInitDone
330 && pmDispatch != NULL
331 && pmDispatch->exitIdle != NULL)
332 do_ipi = (*pmDispatch->exitIdle)(&cpu->lcpu);
333 else
334 do_ipi = TRUE;
335
336 return(do_ipi);
337 }
338
339 kern_return_t
340 pmCPUExitHalt(int cpu)
341 {
342 kern_return_t rc = KERN_INVALID_ARGUMENT;
343
344 if (pmInitDone
345 && pmDispatch != NULL
346 && pmDispatch->exitHalt != NULL)
347 rc = pmDispatch->exitHalt(cpu_to_lcpu(cpu));
348
349 return(rc);
350 }
351
352 kern_return_t
353 pmCPUExitHaltToOff(int cpu)
354 {
355 kern_return_t rc = KERN_INVALID_ARGUMENT;
356
357 if (pmInitDone
358 && pmDispatch != NULL
359 && pmDispatch->exitHaltToOff != NULL)
360 rc = pmDispatch->exitHaltToOff(cpu_to_lcpu(cpu));
361
362 return(rc);
363 }
364
365 /*
366 * Called to initialize the power management structures for the CPUs.
367 */
368 void
369 pmCPUStateInit(void)
370 {
371 if (pmDispatch != NULL && pmDispatch->pmCPUStateInit != NULL)
372 (*pmDispatch->pmCPUStateInit)();
373 }
374
375 /*
376 * Called when a CPU is being restarted after being powered off (as in S3).
377 */
378 void
379 pmCPUMarkRunning(cpu_data_t *cpu)
380 {
381 cpu_data_t *cpup = current_cpu_datap();
382
383 if (pmInitDone
384 && pmDispatch != NULL
385 && pmDispatch->markCPURunning != NULL)
386 (*pmDispatch->markCPURunning)(&cpu->lcpu);
387 else
388 cpup->lcpu.state = LCPU_RUN;
389 }
390
391 /*
392 * Called to get/set CPU power management state.
393 */
394 int
395 pmCPUControl(uint32_t cmd, void *datap)
396 {
397 int rc = -1;
398
399 if (pmDispatch != NULL
400 && pmDispatch->pmCPUControl != NULL)
401 rc = (*pmDispatch->pmCPUControl)(cmd, datap);
402
403 return(rc);
404 }
405
406 /*
407 * Called to save the timer state used by power management prior
408 * to "sleeping".
409 */
410 void
411 pmTimerSave(void)
412 {
413 if (pmDispatch != NULL
414 && pmDispatch->pmTimerStateSave != NULL)
415 (*pmDispatch->pmTimerStateSave)();
416 }
417
418 /*
419 * Called to restore the timer state used by power management after
420 * waking from "sleep".
421 */
422 void
423 pmTimerRestore(void)
424 {
425 if (pmDispatch != NULL
426 && pmDispatch->pmTimerStateRestore != NULL)
427 (*pmDispatch->pmTimerStateRestore)();
428 }
429
430 /*
431 * Set the worst-case time for the C4 to C2 transition.
432 * No longer does anything.
433 */
434 void
435 ml_set_maxsnoop(__unused uint32_t maxdelay)
436 {
437 }
438
439
440 /*
441 * Get the worst-case time for the C4 to C2 transition. Returns nanoseconds.
442 */
443 unsigned
444 ml_get_maxsnoop(void)
445 {
446 uint64_t max_snoop = 0;
447
448 if (pmDispatch != NULL
449 && pmDispatch->getMaxSnoop != NULL)
450 max_snoop = pmDispatch->getMaxSnoop();
451
452 return((unsigned)(max_snoop & 0xffffffff));
453 }
454
455
456 uint32_t
457 ml_get_maxbusdelay(void)
458 {
459 uint64_t max_delay = 0;
460
461 if (pmDispatch != NULL
462 && pmDispatch->getMaxBusDelay != NULL)
463 max_delay = pmDispatch->getMaxBusDelay();
464
465 return((uint32_t)(max_delay & 0xffffffff));
466 }
467
468 /*
469 * Set the maximum delay time allowed for snoop on the bus.
470 *
471 * Note that this value will be compared to the amount of time that it takes
472 * to transition from a non-snooping power state (C4) to a snooping state (C2).
473 * If maxBusDelay is less than C4C2SnoopDelay,
474 * we will not enter the lowest power state.
475 */
476 void
477 ml_set_maxbusdelay(uint32_t mdelay)
478 {
479 uint64_t maxdelay = mdelay;
480
481 if (pmDispatch != NULL
482 && pmDispatch->setMaxBusDelay != NULL)
483 pmDispatch->setMaxBusDelay(maxdelay);
484 }
485
486 uint64_t
487 ml_get_maxintdelay(void)
488 {
489 uint64_t max_delay = 0;
490
491 if (pmDispatch != NULL
492 && pmDispatch->getMaxIntDelay != NULL)
493 max_delay = pmDispatch->getMaxIntDelay();
494
495 return(max_delay);
496 }
497
498 /*
499 * Set the maximum delay allowed for an interrupt.
500 */
501 void
502 ml_set_maxintdelay(uint64_t mdelay)
503 {
504 if (pmDispatch != NULL
505 && pmDispatch->setMaxIntDelay != NULL)
506 pmDispatch->setMaxIntDelay(mdelay);
507 }
508
509 /*
510 * Put a CPU into "safe" mode with respect to power.
511 *
512 * Some systems cannot operate at a continuous "normal" speed without
513 * exceeding the thermal design. This is called per-CPU to place the
514 * CPUs into a "safe" operating mode.
515 */
516 void
517 pmSafeMode(x86_lcpu_t *lcpu, uint32_t flags)
518 {
519 if (pmDispatch != NULL
520 && pmDispatch->pmCPUSafeMode != NULL)
521 pmDispatch->pmCPUSafeMode(lcpu, flags);
522 else {
523 /*
524 * Do something reasonable if the KEXT isn't present.
525 *
526 * We only look at the PAUSE and RESUME flags. The other flag(s)
527 * will not make any sense without the KEXT, so just ignore them.
528 *
529 * We set the CPU's state to indicate that it's halted. If this
530 * is the CPU we're currently running on, then spin until the
531 * state becomes non-halted.
532 */
533 if (flags & PM_SAFE_FL_PAUSE) {
534 lcpu->state = LCPU_PAUSE;
535 if (lcpu == x86_lcpu()) {
536 while (lcpu->state == LCPU_PAUSE)
537 cpu_pause();
538 }
539 }
540
541 /*
542 * Clear the halted flag for the specified CPU, that will
543 * get it out of it's spin loop.
544 */
545 if (flags & PM_SAFE_FL_RESUME) {
546 lcpu->state = LCPU_RUN;
547 }
548 }
549 }
550
551 static uint32_t saved_run_count = 0;
552
553 void
554 machine_run_count(uint32_t count)
555 {
556 if (pmDispatch != NULL
557 && pmDispatch->pmSetRunCount != NULL)
558 pmDispatch->pmSetRunCount(count);
559 else
560 saved_run_count = count;
561 }
562
563 boolean_t
564 machine_cpu_is_inactive(int cpu)
565 {
566 if (pmDispatch != NULL
567 && pmDispatch->pmIsCPUUnAvailable != NULL)
568 return(pmDispatch->pmIsCPUUnAvailable(cpu_to_lcpu(cpu)));
569 else
570 return(FALSE);
571 }
572
573 static uint32_t
574 pmGetSavedRunCount(void)
575 {
576 return(saved_run_count);
577 }
578
579 /*
580 * Returns the root of the package tree.
581 */
582 static x86_pkg_t *
583 pmGetPkgRoot(void)
584 {
585 return(x86_pkgs);
586 }
587
588 static boolean_t
589 pmCPUGetHibernate(int cpu)
590 {
591 return(cpu_datap(cpu)->cpu_hibernate);
592 }
593
594 static processor_t
595 pmLCPUtoProcessor(int lcpu)
596 {
597 return(cpu_datap(lcpu)->cpu_processor);
598 }
599
600 static void
601 pmReSyncDeadlines(int cpu)
602 {
603 static boolean_t registered = FALSE;
604
605 if (!registered) {
606 PM_interrupt_register(&etimer_resync_deadlines);
607 registered = TRUE;
608 }
609
610 if ((uint32_t)cpu == current_cpu_datap()->lcpu.cpu_num)
611 etimer_resync_deadlines();
612 else
613 cpu_PM_interrupt(cpu);
614 }
615
616 static void
617 pmSendIPI(int cpu)
618 {
619 lapic_send_ipi(cpu, LAPIC_PM_INTERRUPT);
620 }
621
622 /*
623 * Called by the power management kext to register itself and to get the
624 * callbacks it might need into other kernel functions. This interface
625 * is versioned to allow for slight mis-matches between the kext and the
626 * kernel.
627 */
628 void
629 pmKextRegister(uint32_t version, pmDispatch_t *cpuFuncs,
630 pmCallBacks_t *callbacks)
631 {
632 if (callbacks != NULL && version == PM_DISPATCH_VERSION) {
633 callbacks->setRTCPop = setPop;
634 callbacks->resyncDeadlines = pmReSyncDeadlines;
635 callbacks->initComplete = pmInitComplete;
636 callbacks->GetLCPU = pmGetLogicalCPU;
637 callbacks->GetCore = pmGetCore;
638 callbacks->GetDie = pmGetDie;
639 callbacks->GetPackage = pmGetPackage;
640 callbacks->GetMyLCPU = pmGetMyLogicalCPU;
641 callbacks->GetMyCore = pmGetMyCore;
642 callbacks->GetMyDie = pmGetMyDie;
643 callbacks->GetMyPackage = pmGetMyPackage;
644 callbacks->GetPkgRoot = pmGetPkgRoot;
645 callbacks->LockCPUTopology = pmLockCPUTopology;
646 callbacks->GetHibernate = pmCPUGetHibernate;
647 callbacks->LCPUtoProcessor = pmLCPUtoProcessor;
648 callbacks->ThreadBind = thread_bind;
649 callbacks->GetSavedRunCount = pmGetSavedRunCount;
650 callbacks->pmSendIPI = pmSendIPI;
651 callbacks->topoParms = &topoParms;
652 } else {
653 panic("Version mis-match between Kernel and CPU PM");
654 }
655
656 if (cpuFuncs != NULL) {
657 pmDispatch = cpuFuncs;
658
659 if (pmDispatch->pmIPIHandler != NULL) {
660 lapic_set_pm_func((i386_intr_func_t)pmDispatch->pmIPIHandler);
661 }
662 }
663 }
664
665 /*
666 * Unregisters the power management functions from the kext.
667 */
668 void
669 pmUnRegister(pmDispatch_t *cpuFuncs)
670 {
671 if (cpuFuncs != NULL && pmDispatch == cpuFuncs) {
672 pmDispatch = NULL;
673 }
674 }
675
676 /******************************************************************************
677 *
678 * All of the following are deprecated interfaces and no longer used.
679 *
680 ******************************************************************************/
681 kern_return_t
682 pmsControl(__unused uint32_t request, __unused user_addr_t reqaddr,
683 __unused uint32_t reqsize)
684 {
685 return(KERN_SUCCESS);
686 }
687
688 void
689 pmsInit(void)
690 {
691 }
692
693 void
694 pmsStart(void)
695 {
696 }
697
698 void
699 pmsPark(void)
700 {
701 }
702
703 void
704 pmsRun(__unused uint32_t nstep)
705 {
706 }
707
708 kern_return_t
709 pmsBuild(__unused pmsDef *pd, __unused uint32_t pdsize,
710 __unused pmsSetFunc_t *functab,
711 __unused uint32_t platformData, __unused pmsQueryFunc_t queryFunc)
712 {
713 return(KERN_SUCCESS);
714 }