]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pmCPU.c
d2efc8bc9bf03cf7b08296da4861342d5c10e7d2
[apple/xnu.git] / osfmk / i386 / pmCPU.c
1 /*
2 * Copyright (c) 2004-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * CPU-specific power management support.
31 *
32 * Implements the "wrappers" to the KEXT.
33 */
34 #include <kern/machine.h>
35 #include <i386/machine_routines.h>
36 #include <i386/machine_cpu.h>
37 #include <i386/misc_protos.h>
38 #include <i386/pmap.h>
39 #include <i386/asm.h>
40 #include <i386/mp.h>
41 #include <i386/proc_reg.h>
42 #include <kern/pms.h>
43 #include <kern/processor.h>
44 #include <i386/cpu_threads.h>
45 #include <i386/pmCPU.h>
46 #include <i386/cpuid.h>
47 #include <i386/rtclock.h>
48 #include <kern/sched_prim.h>
49
50 /*
51 * Kernel parameter determining whether threads are halted unconditionally
52 * in the idle state. This is the default behavior.
53 * See machine_idle() for use.
54 */
55 int idlehalt = 1;
56
57 extern int disableConsoleOutput;
58
59 decl_simple_lock_data(,pm_init_lock);
60
61 /*
62 * The following is set when the KEXT loads and initializes.
63 */
64 pmDispatch_t *pmDispatch = NULL;
65
66 static uint32_t pmInitDone = 0;
67
68
69 /*
70 * Initialize the Cstate change code.
71 */
72 void
73 power_management_init(void)
74 {
75 static boolean_t initialized = FALSE;
76
77 /*
78 * Initialize the lock for the KEXT initialization.
79 */
80 if (!initialized) {
81 simple_lock_init(&pm_init_lock, 0);
82 initialized = TRUE;
83 }
84
85 if (pmDispatch != NULL && pmDispatch->cstateInit != NULL)
86 (*pmDispatch->cstateInit)();
87 }
88
89 /*
90 * Called when the CPU is idle. It calls into the power management kext
91 * to determine the best way to idle the CPU.
92 */
93 void
94 machine_idle(void)
95 {
96 cpu_data_t *my_cpu = current_cpu_datap();
97
98 if (my_cpu == NULL)
99 goto out;
100
101 /*
102 * If idlehalt isn't set, then don't do any power management related
103 * idle handling.
104 */
105 if (!idlehalt)
106 goto out;
107
108 my_cpu->lcpu.state = LCPU_IDLE;
109 DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
110 MARK_CPU_IDLE(cpu_number());
111
112 if (pmInitDone
113 && pmDispatch != NULL
114 && pmDispatch->cstateMachineIdle != NULL)
115 (*pmDispatch->cstateMachineIdle)(0x7FFFFFFFFFFFFFFFULL);
116 else {
117 /*
118 * If no power management, re-enable interrupts and halt.
119 * This will keep the CPU from spinning through the scheduler
120 * and will allow at least some minimal power savings (but it
121 * cause problems in some MP configurations w.r.t. the APIC
122 * stopping during a GV3 transition).
123 */
124 __asm__ volatile ("sti; hlt");
125 }
126
127 /*
128 * Mark the CPU as running again.
129 */
130 MARK_CPU_ACTIVE(cpu_number());
131 DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
132 my_cpu->lcpu.state = LCPU_RUN;
133
134 /*
135 * Re-enable interrupts.
136 */
137 out:
138 __asm__ volatile("sti");
139 }
140
141 /*
142 * Called when the CPU is to be halted. It will choose the best C-State
143 * to be in.
144 */
145 void
146 pmCPUHalt(uint32_t reason)
147 {
148 cpu_data_t *cpup = current_cpu_datap();
149
150 switch (reason) {
151 case PM_HALT_DEBUG:
152 cpup->lcpu.state = LCPU_PAUSE;
153 __asm__ volatile ("wbinvd; hlt");
154 break;
155
156 case PM_HALT_PANIC:
157 cpup->lcpu.state = LCPU_PAUSE;
158 __asm__ volatile ("cli; wbinvd; hlt");
159 break;
160
161 case PM_HALT_NORMAL:
162 default:
163 __asm__ volatile ("cli");
164
165 if (pmInitDone
166 && pmDispatch != NULL
167 && pmDispatch->pmCPUHalt != NULL) {
168 /*
169 * Halt the CPU (and put it in a low power state.
170 */
171 (*pmDispatch->pmCPUHalt)();
172
173 /*
174 * We've exited halt, so get the the CPU schedulable again.
175 */
176 i386_init_slave_fast();
177
178 panic("init_slave_fast returned");
179 } else {
180 /*
181 * If no power managment and a processor is taken off-line,
182 * then invalidate the cache and halt it (it will not be able
183 * to be brought back on-line without resetting the CPU).
184 */
185 __asm__ volatile ("wbinvd");
186 cpup->lcpu.state = LCPU_HALT;
187 __asm__ volatile ( "wbinvd; hlt" );
188
189 panic("back from Halt");
190 }
191 break;
192 }
193 }
194
195 void
196 pmMarkAllCPUsOff(void)
197 {
198 if (pmInitDone
199 && pmDispatch != NULL
200 && pmDispatch->markAllCPUsOff != NULL)
201 (*pmDispatch->markAllCPUsOff)();
202 }
203
204 static void
205 pmInitComplete(void)
206 {
207 pmInitDone = 1;
208 }
209
210 static x86_lcpu_t *
211 pmGetLogicalCPU(int cpu)
212 {
213 return(cpu_to_lcpu(cpu));
214 }
215
216 static x86_lcpu_t *
217 pmGetMyLogicalCPU(void)
218 {
219 cpu_data_t *cpup = current_cpu_datap();
220
221 return(&cpup->lcpu);
222 }
223
224 static x86_core_t *
225 pmGetCore(int cpu)
226 {
227 return(cpu_to_core(cpu));
228 }
229
230 static x86_core_t *
231 pmGetMyCore(void)
232 {
233 cpu_data_t *cpup = current_cpu_datap();
234
235 return(cpup->lcpu.core);
236 }
237
238 static x86_die_t *
239 pmGetDie(int cpu)
240 {
241 return(cpu_to_die(cpu));
242 }
243
244 static x86_die_t *
245 pmGetMyDie(void)
246 {
247 cpu_data_t *cpup = current_cpu_datap();
248
249 return(cpup->lcpu.die);
250 }
251
252 static x86_pkg_t *
253 pmGetPackage(int cpu)
254 {
255 return(cpu_to_package(cpu));
256 }
257
258 static x86_pkg_t *
259 pmGetMyPackage(void)
260 {
261 cpu_data_t *cpup = current_cpu_datap();
262
263 return(cpup->lcpu.package);
264 }
265
266 static void
267 pmLockCPUTopology(int lock)
268 {
269 if (lock) {
270 simple_lock(&x86_topo_lock);
271 } else {
272 simple_unlock(&x86_topo_lock);
273 }
274 }
275
276 /*
277 * Called to get the next deadline that has been set by the
278 * power management code.
279 */
280 uint64_t
281 pmCPUGetDeadline(cpu_data_t *cpu)
282 {
283 uint64_t deadline = EndOfAllTime;
284
285 if (pmInitDone
286 && pmDispatch != NULL
287 && pmDispatch->GetDeadline != NULL)
288 deadline = (*pmDispatch->GetDeadline)(&cpu->lcpu);
289
290 return(deadline);
291 }
292
293 /*
294 * Called to determine if the supplied deadline or the power management
295 * deadline is sooner. Returns which ever one is first.
296 */
297 uint64_t
298 pmCPUSetDeadline(cpu_data_t *cpu, uint64_t deadline)
299 {
300 if (pmInitDone
301 && pmDispatch != NULL
302 && pmDispatch->SetDeadline != NULL)
303 deadline = (*pmDispatch->SetDeadline)(&cpu->lcpu, deadline);
304
305 return(deadline);
306 }
307
308 /*
309 * Called when a power management deadline expires.
310 */
311 void
312 pmCPUDeadline(cpu_data_t *cpu)
313 {
314 if (pmInitDone
315 && pmDispatch != NULL
316 && pmDispatch->Deadline != NULL)
317 (*pmDispatch->Deadline)(&cpu->lcpu);
318 }
319
320 /*
321 * Called to get a CPU out of idle.
322 */
323 boolean_t
324 pmCPUExitIdle(cpu_data_t *cpu)
325 {
326 boolean_t do_ipi;
327
328 if (pmInitDone
329 && pmDispatch != NULL
330 && pmDispatch->exitIdle != NULL)
331 do_ipi = (*pmDispatch->exitIdle)(&cpu->lcpu);
332 else
333 do_ipi = TRUE;
334
335 return(do_ipi);
336 }
337
338 kern_return_t
339 pmCPUExitHalt(int cpu)
340 {
341 kern_return_t rc = KERN_INVALID_ARGUMENT;
342
343 if (pmInitDone
344 && pmDispatch != NULL
345 && pmDispatch->exitHalt != NULL)
346 rc = pmDispatch->exitHalt(cpu_to_lcpu(cpu));
347
348 return(rc);
349 }
350
351 /*
352 * Called to initialize the power management structures for the CPUs.
353 */
354 void
355 pmCPUStateInit(void)
356 {
357 if (pmDispatch != NULL && pmDispatch->pmCPUStateInit != NULL)
358 (*pmDispatch->pmCPUStateInit)();
359 }
360
361 /*
362 * Called when a CPU is being restarted after being powered off (as in S3).
363 */
364 void
365 pmCPUMarkRunning(cpu_data_t *cpu)
366 {
367 cpu_data_t *cpup = current_cpu_datap();
368
369 if (pmInitDone
370 && pmDispatch != NULL
371 && pmDispatch->markCPURunning != NULL)
372 (*pmDispatch->markCPURunning)(&cpu->lcpu);
373 else
374 cpup->lcpu.state = LCPU_RUN;
375 }
376
377 /*
378 * Called to get/set CPU power management state.
379 */
380 int
381 pmCPUControl(uint32_t cmd, void *datap)
382 {
383 int rc = -1;
384
385 if (pmDispatch != NULL
386 && pmDispatch->pmCPUControl != NULL)
387 rc = (*pmDispatch->pmCPUControl)(cmd, datap);
388
389 return(rc);
390 }
391
392 /*
393 * Called to save the timer state used by power management prior
394 * to "sleeping".
395 */
396 void
397 pmTimerSave(void)
398 {
399 if (pmDispatch != NULL
400 && pmDispatch->pmTimerStateSave != NULL)
401 (*pmDispatch->pmTimerStateSave)();
402 }
403
404 /*
405 * Called to restore the timer state used by power management after
406 * waking from "sleep".
407 */
408 void
409 pmTimerRestore(void)
410 {
411 if (pmDispatch != NULL
412 && pmDispatch->pmTimerStateRestore != NULL)
413 (*pmDispatch->pmTimerStateRestore)();
414 }
415
416 /*
417 * Set the worst-case time for the C4 to C2 transition.
418 * No longer does anything.
419 */
420 void
421 ml_set_maxsnoop(__unused uint32_t maxdelay)
422 {
423 }
424
425
426 /*
427 * Get the worst-case time for the C4 to C2 transition. Returns nanoseconds.
428 */
429 unsigned
430 ml_get_maxsnoop(void)
431 {
432 uint64_t max_snoop = 0;
433
434 if (pmDispatch != NULL
435 && pmDispatch->getMaxSnoop != NULL)
436 max_snoop = pmDispatch->getMaxSnoop();
437
438 return((unsigned)(max_snoop & 0xffffffff));
439 }
440
441
442 uint32_t
443 ml_get_maxbusdelay(void)
444 {
445 uint64_t max_delay = 0;
446
447 if (pmDispatch != NULL
448 && pmDispatch->getMaxBusDelay != NULL)
449 max_delay = pmDispatch->getMaxBusDelay();
450
451 return((uint32_t)(max_delay & 0xffffffff));
452 }
453
454 /*
455 * Set the maximum delay time allowed for snoop on the bus.
456 *
457 * Note that this value will be compared to the amount of time that it takes
458 * to transition from a non-snooping power state (C4) to a snooping state (C2).
459 * If maxBusDelay is less than C4C2SnoopDelay,
460 * we will not enter the lowest power state.
461 */
462 void
463 ml_set_maxbusdelay(uint32_t mdelay)
464 {
465 uint64_t maxdelay = mdelay;
466
467 if (pmDispatch != NULL
468 && pmDispatch->setMaxBusDelay != NULL)
469 pmDispatch->setMaxBusDelay(maxdelay);
470 }
471
472 uint64_t
473 ml_get_maxintdelay(void)
474 {
475 uint64_t max_delay = 0;
476
477 if (pmDispatch != NULL
478 && pmDispatch->getMaxIntDelay != NULL)
479 max_delay = pmDispatch->getMaxIntDelay();
480
481 return(max_delay);
482 }
483
484 /*
485 * Set the maximum delay allowed for an interrupt.
486 */
487 void
488 ml_set_maxintdelay(uint64_t mdelay)
489 {
490 if (pmDispatch != NULL
491 && pmDispatch->setMaxIntDelay != NULL)
492 pmDispatch->setMaxIntDelay(mdelay);
493 }
494
495 /*
496 * Put a CPU into "safe" mode with respect to power.
497 *
498 * Some systems cannot operate at a continuous "normal" speed without
499 * exceeding the thermal design. This is called per-CPU to place the
500 * CPUs into a "safe" operating mode.
501 */
502 void
503 pmSafeMode(x86_lcpu_t *lcpu, uint32_t flags)
504 {
505 if (pmDispatch != NULL
506 && pmDispatch->pmCPUSafeMode != NULL)
507 pmDispatch->pmCPUSafeMode(lcpu, flags);
508 else {
509 /*
510 * Do something reasonable if the KEXT isn't present.
511 *
512 * We only look at the PAUSE and RESUME flags. The other flag(s)
513 * will not make any sense without the KEXT, so just ignore them.
514 *
515 * We set the CPU's state to indicate that it's halted. If this
516 * is the CPU we're currently running on, then spin until the
517 * state becomes non-halted.
518 */
519 if (flags & PM_SAFE_FL_PAUSE) {
520 lcpu->state = LCPU_PAUSE;
521 if (lcpu == x86_lcpu()) {
522 while (lcpu->state == LCPU_PAUSE)
523 cpu_pause();
524 }
525 }
526
527 /*
528 * Clear the halted flag for the specified CPU, that will
529 * get it out of it's spin loop.
530 */
531 if (flags & PM_SAFE_FL_RESUME) {
532 lcpu->state = LCPU_RUN;
533 }
534 }
535 }
536
537 /*
538 * Returns the root of the package tree.
539 */
540 static x86_pkg_t *
541 pmGetPkgRoot(void)
542 {
543 return(x86_pkgs);
544 }
545
546 static boolean_t
547 pmCPUGetHibernate(int cpu)
548 {
549 return(cpu_datap(cpu)->cpu_hibernate);
550 }
551
552 static processor_t
553 pmLCPUtoProcessor(int lcpu)
554 {
555 return(cpu_datap(lcpu)->cpu_processor);
556 }
557
558 /*
559 * Called by the power management kext to register itself and to get the
560 * callbacks it might need into other kernel functions. This interface
561 * is versioned to allow for slight mis-matches between the kext and the
562 * kernel.
563 */
564 void
565 pmKextRegister(uint32_t version, pmDispatch_t *cpuFuncs,
566 pmCallBacks_t *callbacks)
567 {
568 if (callbacks != NULL && version == PM_DISPATCH_VERSION) {
569 callbacks->setRTCPop = setPop;
570 callbacks->resyncDeadlines = etimer_resync_deadlines;
571 callbacks->initComplete= pmInitComplete;
572 callbacks->GetLCPU = pmGetLogicalCPU;
573 callbacks->GetCore = pmGetCore;
574 callbacks->GetDie = pmGetDie;
575 callbacks->GetPackage = pmGetPackage;
576 callbacks->GetMyLCPU = pmGetMyLogicalCPU;
577 callbacks->GetMyCore = pmGetMyCore;
578 callbacks->GetMyDie = pmGetMyDie;
579 callbacks->GetMyPackage= pmGetMyPackage;
580 callbacks->GetPkgRoot = pmGetPkgRoot;
581 callbacks->LockCPUTopology = pmLockCPUTopology;
582 callbacks->GetHibernate = pmCPUGetHibernate;
583 callbacks->LCPUtoProcessor = pmLCPUtoProcessor;
584 callbacks->ThreadBind = thread_bind;
585 callbacks->topoParms = &topoParms;
586 }
587
588 if (cpuFuncs != NULL) {
589 pmDispatch = cpuFuncs;
590 }
591 }
592
593 /*
594 * Unregisters the power management functions from the kext.
595 */
596 void
597 pmUnRegister(pmDispatch_t *cpuFuncs)
598 {
599 if (cpuFuncs != NULL && pmDispatch == cpuFuncs) {
600 pmDispatch = NULL;
601 }
602 }
603
604 /******************************************************************************
605 *
606 * All of the following are deprecated interfaces and no longer used.
607 *
608 ******************************************************************************/
609 kern_return_t
610 pmsControl(__unused uint32_t request, __unused user_addr_t reqaddr,
611 __unused uint32_t reqsize)
612 {
613 return(KERN_SUCCESS);
614 }
615
616 void
617 pmsInit(void)
618 {
619 }
620
621 void
622 pmsStart(void)
623 {
624 }
625
626 void
627 pmsPark(void)
628 {
629 }
630
631 void
632 pmsRun(__unused uint32_t nstep)
633 {
634 }
635
636 kern_return_t
637 pmsBuild(__unused pmsDef *pd, __unused uint32_t pdsize,
638 __unused pmsSetFunc_t *functab,
639 __unused uint32_t platformData, __unused pmsQueryFunc_t queryFunc)
640 {
641 return(KERN_SUCCESS);
642 }