]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pmCPU.c
xnu-1699.32.7.tar.gz
[apple/xnu.git] / osfmk / i386 / pmCPU.c
CommitLineData
0c530ab8 1/*
7ddcb079 2 * Copyright (c) 2004-2011 Apple Inc. All rights reserved.
0c530ab8 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0c530ab8 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0c530ab8 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0c530ab8 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28
29/*
30 * CPU-specific power management support.
31 *
32 * Implements the "wrappers" to the KEXT.
33 */
0c530ab8 34#include <i386/asm.h>
b0d623f7 35#include <i386/machine_cpu.h>
0c530ab8 36#include <i386/mp.h>
b0d623f7 37#include <i386/machine_routines.h>
0c530ab8 38#include <i386/proc_reg.h>
b0d623f7
A
39#include <i386/pmap.h>
40#include <i386/misc_protos.h>
41#include <kern/machine.h>
0c530ab8
A
42#include <kern/pms.h>
43#include <kern/processor.h>
7e4a7d39 44#include <kern/etimer.h>
2d21ac55 45#include <i386/cpu_threads.h>
0c530ab8
A
46#include <i386/pmCPU.h>
47#include <i386/cpuid.h>
6d2010ae 48#include <i386/rtclock_protos.h>
593a1d5f 49#include <kern/sched_prim.h>
b0d623f7 50#include <i386/lapic.h>
6d2010ae
A
51#include <i386/pal_routines.h>
52
53#include <sys/kdebug.h>
593a1d5f 54
0c530ab8
A
55extern int disableConsoleOutput;
56
7ddcb079 57#define DELAY_UNSET 0xFFFFFFFFFFFFFFFFULL
0c530ab8
A
58
59/*
60 * The following is set when the KEXT loads and initializes.
61 */
6d2010ae 62pmDispatch_t *pmDispatch = NULL;
0c530ab8 63
7ddcb079
A
64static uint32_t pmInitDone = 0;
65static boolean_t earlyTopology = FALSE;
66static uint64_t earlyMaxBusDelay = DELAY_UNSET;
67static uint64_t earlyMaxIntDelay = DELAY_UNSET;
2d21ac55 68
0c530ab8
A
69
70/*
71 * Initialize the Cstate change code.
72 */
73void
74power_management_init(void)
75{
0c530ab8
A
76 if (pmDispatch != NULL && pmDispatch->cstateInit != NULL)
77 (*pmDispatch->cstateInit)();
78}
79
0c530ab8 80/*
593a1d5f
A
81 * Called when the CPU is idle. It calls into the power management kext
82 * to determine the best way to idle the CPU.
0c530ab8 83 */
593a1d5f
A
84void
85machine_idle(void)
0c530ab8 86{
593a1d5f 87 cpu_data_t *my_cpu = current_cpu_datap();
0c530ab8 88
593a1d5f
A
89 if (my_cpu == NULL)
90 goto out;
0c530ab8 91
593a1d5f
A
92 my_cpu->lcpu.state = LCPU_IDLE;
93 DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
94 MARK_CPU_IDLE(cpu_number());
0c530ab8 95
7ddcb079
A
96 if (pmInitDone) {
97 /*
98 * Handle case where ml_set_maxbusdelay() or ml_set_maxintdelay()
99 * were called prior to the CPU PM kext being registered. We do
100 * this here since we know at this point since it'll be at idle
101 * where the decision using these values will be used.
102 */
103 if (earlyMaxBusDelay != DELAY_UNSET)
104 ml_set_maxbusdelay((uint32_t)(earlyMaxBusDelay & 0xFFFFFFFF));
105
106 if (earlyMaxIntDelay != DELAY_UNSET)
107 ml_set_maxintdelay(earlyMaxIntDelay);
108 }
109
593a1d5f
A
110 if (pmInitDone
111 && pmDispatch != NULL
b7266188
A
112 && pmDispatch->MachineIdle != NULL)
113 (*pmDispatch->MachineIdle)(0x7FFFFFFFFFFFFFFFULL);
593a1d5f
A
114 else {
115 /*
116 * If no power management, re-enable interrupts and halt.
117 * This will keep the CPU from spinning through the scheduler
118 * and will allow at least some minimal power savings (but it
119 * cause problems in some MP configurations w.r.t. the APIC
120 * stopping during a GV3 transition).
121 */
6d2010ae
A
122 pal_hlt();
123
124 /* Once woken, re-disable interrupts. */
125 pal_cli();
0c530ab8 126 }
0c530ab8 127
593a1d5f
A
128 /*
129 * Mark the CPU as running again.
130 */
131 MARK_CPU_ACTIVE(cpu_number());
132 DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
133 my_cpu->lcpu.state = LCPU_RUN;
134
135 /*
136 * Re-enable interrupts.
137 */
138 out:
6d2010ae 139 pal_sti();
2d21ac55
A
140}
141
142/*
143 * Called when the CPU is to be halted. It will choose the best C-State
144 * to be in.
145 */
146void
147pmCPUHalt(uint32_t reason)
148{
593a1d5f 149 cpu_data_t *cpup = current_cpu_datap();
2d21ac55
A
150
151 switch (reason) {
152 case PM_HALT_DEBUG:
593a1d5f 153 cpup->lcpu.state = LCPU_PAUSE;
6d2010ae 154 pal_stop_cpu(FALSE);
2d21ac55
A
155 break;
156
157 case PM_HALT_PANIC:
593a1d5f 158 cpup->lcpu.state = LCPU_PAUSE;
6d2010ae 159 pal_stop_cpu(TRUE);
2d21ac55
A
160 break;
161
162 case PM_HALT_NORMAL:
163 default:
6d2010ae 164 pal_cli();
2d21ac55 165
6d2010ae 166 if (pmInitDone
2d21ac55
A
167 && pmDispatch != NULL
168 && pmDispatch->pmCPUHalt != NULL) {
593a1d5f
A
169 /*
170 * Halt the CPU (and put it in a low power state.
171 */
2d21ac55 172 (*pmDispatch->pmCPUHalt)();
2d21ac55 173
593a1d5f
A
174 /*
175 * We've exited halt, so get the the CPU schedulable again.
176 */
177 i386_init_slave_fast();
178
179 panic("init_slave_fast returned");
6d2010ae
A
180 } else
181 {
2d21ac55
A
182 /*
183 * If no power managment and a processor is taken off-line,
184 * then invalidate the cache and halt it (it will not be able
185 * to be brought back on-line without resetting the CPU).
186 */
187 __asm__ volatile ("wbinvd");
593a1d5f 188 cpup->lcpu.state = LCPU_HALT;
6d2010ae 189 pal_stop_cpu(FALSE);
593a1d5f
A
190
191 panic("back from Halt");
2d21ac55 192 }
6d2010ae 193
2d21ac55 194 break;
0c530ab8
A
195 }
196}
197
2d21ac55 198void
593a1d5f 199pmMarkAllCPUsOff(void)
2d21ac55 200{
593a1d5f
A
201 if (pmInitDone
202 && pmDispatch != NULL
203 && pmDispatch->markAllCPUsOff != NULL)
204 (*pmDispatch->markAllCPUsOff)();
2d21ac55
A
205}
206
207static void
208pmInitComplete(void)
209{
7ddcb079
A
210 if (earlyTopology
211 && pmDispatch != NULL
212 && pmDispatch->pmCPUStateInit != NULL) {
060df5ea 213 (*pmDispatch->pmCPUStateInit)();
7ddcb079
A
214 earlyTopology = FALSE;
215 }
060df5ea 216
2d21ac55
A
217 pmInitDone = 1;
218}
219
220static x86_lcpu_t *
221pmGetLogicalCPU(int cpu)
0c530ab8 222{
2d21ac55
A
223 return(cpu_to_lcpu(cpu));
224}
225
226static x86_lcpu_t *
227pmGetMyLogicalCPU(void)
228{
229 cpu_data_t *cpup = current_cpu_datap();
0c530ab8 230
2d21ac55
A
231 return(&cpup->lcpu);
232}
233
234static x86_core_t *
235pmGetCore(int cpu)
236{
237 return(cpu_to_core(cpu));
0c530ab8
A
238}
239
2d21ac55
A
240static x86_core_t *
241pmGetMyCore(void)
0c530ab8 242{
2d21ac55 243 cpu_data_t *cpup = current_cpu_datap();
0c530ab8 244
2d21ac55 245 return(cpup->lcpu.core);
0c530ab8
A
246}
247
593a1d5f
A
248static x86_die_t *
249pmGetDie(int cpu)
250{
251 return(cpu_to_die(cpu));
252}
253
254static x86_die_t *
255pmGetMyDie(void)
256{
257 cpu_data_t *cpup = current_cpu_datap();
258
259 return(cpup->lcpu.die);
260}
261
2d21ac55
A
262static x86_pkg_t *
263pmGetPackage(int cpu)
0c530ab8 264{
2d21ac55
A
265 return(cpu_to_package(cpu));
266}
267
268static x86_pkg_t *
269pmGetMyPackage(void)
270{
271 cpu_data_t *cpup = current_cpu_datap();
272
593a1d5f 273 return(cpup->lcpu.package);
2d21ac55
A
274}
275
276static void
277pmLockCPUTopology(int lock)
278{
279 if (lock) {
280 simple_lock(&x86_topo_lock);
281 } else {
282 simple_unlock(&x86_topo_lock);
283 }
0c530ab8
A
284}
285
286/*
2d21ac55
A
287 * Called to get the next deadline that has been set by the
288 * power management code.
6d2010ae
A
289 * Note: a return of 0 from AICPM and this routine signifies
290 * that no deadline is set.
0c530ab8 291 */
2d21ac55
A
292uint64_t
293pmCPUGetDeadline(cpu_data_t *cpu)
294{
060df5ea 295 uint64_t deadline = 0;
0c530ab8 296
7ddcb079 297 if (pmInitDone
2d21ac55
A
298 && pmDispatch != NULL
299 && pmDispatch->GetDeadline != NULL)
300 deadline = (*pmDispatch->GetDeadline)(&cpu->lcpu);
301
302 return(deadline);
0c530ab8
A
303}
304
305/*
2d21ac55
A
306 * Called to determine if the supplied deadline or the power management
307 * deadline is sooner. Returns which ever one is first.
0c530ab8 308 */
2d21ac55
A
309uint64_t
310pmCPUSetDeadline(cpu_data_t *cpu, uint64_t deadline)
0c530ab8 311{
b0d623f7 312 if (pmInitDone
2d21ac55
A
313 && pmDispatch != NULL
314 && pmDispatch->SetDeadline != NULL)
315 deadline = (*pmDispatch->SetDeadline)(&cpu->lcpu, deadline);
316
317 return(deadline);
0c530ab8
A
318}
319
0c530ab8 320/*
2d21ac55 321 * Called when a power management deadline expires.
0c530ab8
A
322 */
323void
2d21ac55 324pmCPUDeadline(cpu_data_t *cpu)
0c530ab8 325{
2d21ac55
A
326 if (pmInitDone
327 && pmDispatch != NULL
328 && pmDispatch->Deadline != NULL)
329 (*pmDispatch->Deadline)(&cpu->lcpu);
330}
331
332/*
333 * Called to get a CPU out of idle.
334 */
335boolean_t
336pmCPUExitIdle(cpu_data_t *cpu)
337{
338 boolean_t do_ipi;
339
340 if (pmInitDone
341 && pmDispatch != NULL
342 && pmDispatch->exitIdle != NULL)
343 do_ipi = (*pmDispatch->exitIdle)(&cpu->lcpu);
344 else
345 do_ipi = TRUE;
346
347 return(do_ipi);
0c530ab8
A
348}
349
593a1d5f
A
350kern_return_t
351pmCPUExitHalt(int cpu)
352{
353 kern_return_t rc = KERN_INVALID_ARGUMENT;
354
355 if (pmInitDone
356 && pmDispatch != NULL
357 && pmDispatch->exitHalt != NULL)
358 rc = pmDispatch->exitHalt(cpu_to_lcpu(cpu));
359
360 return(rc);
361}
362
e2fac8b1
A
363kern_return_t
364pmCPUExitHaltToOff(int cpu)
365{
366 kern_return_t rc = KERN_INVALID_ARGUMENT;
367
368 if (pmInitDone
369 && pmDispatch != NULL
370 && pmDispatch->exitHaltToOff != NULL)
371 rc = pmDispatch->exitHaltToOff(cpu_to_lcpu(cpu));
372
373 return(rc);
374}
375
2d21ac55 376/*
593a1d5f 377 * Called to initialize the power management structures for the CPUs.
2d21ac55 378 */
0c530ab8 379void
593a1d5f 380pmCPUStateInit(void)
0c530ab8 381{
593a1d5f
A
382 if (pmDispatch != NULL && pmDispatch->pmCPUStateInit != NULL)
383 (*pmDispatch->pmCPUStateInit)();
060df5ea
A
384 else
385 earlyTopology = TRUE;
0c530ab8
A
386}
387
2d21ac55 388/*
593a1d5f 389 * Called when a CPU is being restarted after being powered off (as in S3).
2d21ac55 390 */
0c530ab8 391void
593a1d5f 392pmCPUMarkRunning(cpu_data_t *cpu)
0c530ab8 393{
593a1d5f
A
394 cpu_data_t *cpup = current_cpu_datap();
395
2d21ac55
A
396 if (pmInitDone
397 && pmDispatch != NULL
593a1d5f
A
398 && pmDispatch->markCPURunning != NULL)
399 (*pmDispatch->markCPURunning)(&cpu->lcpu);
400 else
401 cpup->lcpu.state = LCPU_RUN;
2d21ac55
A
402}
403
404/*
405 * Called to get/set CPU power management state.
406 */
407int
408pmCPUControl(uint32_t cmd, void *datap)
409{
410 int rc = -1;
411
412 if (pmDispatch != NULL
413 && pmDispatch->pmCPUControl != NULL)
414 rc = (*pmDispatch->pmCPUControl)(cmd, datap);
415
416 return(rc);
0c530ab8
A
417}
418
593a1d5f
A
419/*
420 * Called to save the timer state used by power management prior
421 * to "sleeping".
422 */
423void
424pmTimerSave(void)
425{
426 if (pmDispatch != NULL
427 && pmDispatch->pmTimerStateSave != NULL)
428 (*pmDispatch->pmTimerStateSave)();
429}
430
431/*
432 * Called to restore the timer state used by power management after
433 * waking from "sleep".
434 */
435void
436pmTimerRestore(void)
437{
438 if (pmDispatch != NULL
439 && pmDispatch->pmTimerStateRestore != NULL)
440 (*pmDispatch->pmTimerStateRestore)();
441}
442
2d21ac55
A
443/*
444 * Set the worst-case time for the C4 to C2 transition.
445 * No longer does anything.
446 */
0c530ab8 447void
2d21ac55 448ml_set_maxsnoop(__unused uint32_t maxdelay)
0c530ab8 449{
0c530ab8
A
450}
451
2d21ac55
A
452
453/*
454 * Get the worst-case time for the C4 to C2 transition. Returns nanoseconds.
455 */
456unsigned
457ml_get_maxsnoop(void)
458{
459 uint64_t max_snoop = 0;
460
7ddcb079
A
461 if (pmInitDone
462 && pmDispatch != NULL
2d21ac55
A
463 && pmDispatch->getMaxSnoop != NULL)
464 max_snoop = pmDispatch->getMaxSnoop();
465
466 return((unsigned)(max_snoop & 0xffffffff));
467}
468
469
470uint32_t
471ml_get_maxbusdelay(void)
472{
473 uint64_t max_delay = 0;
474
7ddcb079
A
475 if (pmInitDone
476 && pmDispatch != NULL
2d21ac55
A
477 && pmDispatch->getMaxBusDelay != NULL)
478 max_delay = pmDispatch->getMaxBusDelay();
479
480 return((uint32_t)(max_delay & 0xffffffff));
481}
482
483/*
484 * Set the maximum delay time allowed for snoop on the bus.
485 *
486 * Note that this value will be compared to the amount of time that it takes
487 * to transition from a non-snooping power state (C4) to a snooping state (C2).
488 * If maxBusDelay is less than C4C2SnoopDelay,
489 * we will not enter the lowest power state.
490 */
0c530ab8 491void
2d21ac55 492ml_set_maxbusdelay(uint32_t mdelay)
0c530ab8 493{
2d21ac55
A
494 uint64_t maxdelay = mdelay;
495
496 if (pmDispatch != NULL
7ddcb079
A
497 && pmDispatch->setMaxBusDelay != NULL) {
498 earlyMaxBusDelay = DELAY_UNSET;
2d21ac55 499 pmDispatch->setMaxBusDelay(maxdelay);
7ddcb079
A
500 } else
501 earlyMaxBusDelay = maxdelay;
593a1d5f
A
502}
503
504uint64_t
505ml_get_maxintdelay(void)
506{
507 uint64_t max_delay = 0;
508
509 if (pmDispatch != NULL
510 && pmDispatch->getMaxIntDelay != NULL)
511 max_delay = pmDispatch->getMaxIntDelay();
512
513 return(max_delay);
514}
515
516/*
517 * Set the maximum delay allowed for an interrupt.
518 */
519void
520ml_set_maxintdelay(uint64_t mdelay)
521{
522 if (pmDispatch != NULL
7ddcb079
A
523 && pmDispatch->setMaxIntDelay != NULL) {
524 earlyMaxIntDelay = DELAY_UNSET;
593a1d5f 525 pmDispatch->setMaxIntDelay(mdelay);
7ddcb079
A
526 } else
527 earlyMaxIntDelay = mdelay;
0c530ab8
A
528}
529
6d2010ae
A
530boolean_t
531ml_get_interrupt_prewake_applicable()
532{
533 boolean_t applicable = FALSE;
534
535 if (pmInitDone
536 && pmDispatch != NULL
537 && pmDispatch->pmInterruptPrewakeApplicable != NULL)
538 applicable = pmDispatch->pmInterruptPrewakeApplicable();
539
540 return applicable;
541}
542
2d21ac55
A
543/*
544 * Put a CPU into "safe" mode with respect to power.
545 *
546 * Some systems cannot operate at a continuous "normal" speed without
547 * exceeding the thermal design. This is called per-CPU to place the
548 * CPUs into a "safe" operating mode.
549 */
0c530ab8 550void
2d21ac55
A
551pmSafeMode(x86_lcpu_t *lcpu, uint32_t flags)
552{
553 if (pmDispatch != NULL
554 && pmDispatch->pmCPUSafeMode != NULL)
555 pmDispatch->pmCPUSafeMode(lcpu, flags);
556 else {
557 /*
558 * Do something reasonable if the KEXT isn't present.
559 *
560 * We only look at the PAUSE and RESUME flags. The other flag(s)
561 * will not make any sense without the KEXT, so just ignore them.
562 *
593a1d5f
A
563 * We set the CPU's state to indicate that it's halted. If this
564 * is the CPU we're currently running on, then spin until the
565 * state becomes non-halted.
2d21ac55
A
566 */
567 if (flags & PM_SAFE_FL_PAUSE) {
593a1d5f 568 lcpu->state = LCPU_PAUSE;
2d21ac55 569 if (lcpu == x86_lcpu()) {
593a1d5f 570 while (lcpu->state == LCPU_PAUSE)
2d21ac55
A
571 cpu_pause();
572 }
573 }
574
575 /*
576 * Clear the halted flag for the specified CPU, that will
577 * get it out of it's spin loop.
578 */
579 if (flags & PM_SAFE_FL_RESUME) {
593a1d5f 580 lcpu->state = LCPU_RUN;
2d21ac55
A
581 }
582 }
583}
584
c910b4d9
A
585static uint32_t saved_run_count = 0;
586
587void
588machine_run_count(uint32_t count)
589{
590 if (pmDispatch != NULL
591 && pmDispatch->pmSetRunCount != NULL)
592 pmDispatch->pmSetRunCount(count);
593 else
594 saved_run_count = count;
595}
596
597boolean_t
b7266188 598machine_processor_is_inactive(processor_t processor)
c910b4d9 599{
b7266188
A
600 int cpu = processor->cpu_id;
601
c910b4d9
A
602 if (pmDispatch != NULL
603 && pmDispatch->pmIsCPUUnAvailable != NULL)
604 return(pmDispatch->pmIsCPUUnAvailable(cpu_to_lcpu(cpu)));
605 else
606 return(FALSE);
607}
608
b7266188
A
609processor_t
610machine_choose_processor(processor_set_t pset,
611 processor_t preferred)
612{
613 int startCPU;
614 int endCPU;
615 int preferredCPU;
616 int chosenCPU;
617
618 if (!pmInitDone)
619 return(preferred);
620
621 if (pset == NULL) {
622 startCPU = -1;
623 endCPU = -1;
624 } else {
625 startCPU = pset->cpu_set_low;
626 endCPU = pset->cpu_set_hi;
627 }
628
629 if (preferred == NULL)
630 preferredCPU = -1;
631 else
632 preferredCPU = preferred->cpu_id;
633
634 if (pmDispatch != NULL
635 && pmDispatch->pmChooseCPU != NULL) {
636 chosenCPU = pmDispatch->pmChooseCPU(startCPU, endCPU, preferredCPU);
637
638 if (chosenCPU == -1)
639 return(NULL);
640 return(cpu_datap(chosenCPU)->cpu_processor);
641 }
642
643 return(preferred);
644}
645
060df5ea 646static int
6d2010ae 647pmThreadGetUrgency(uint64_t *rt_period, uint64_t *rt_deadline)
060df5ea
A
648{
649
6d2010ae 650 return(thread_get_urgency(rt_period, rt_deadline));
060df5ea
A
651}
652
6d2010ae
A
653#if DEBUG
654uint32_t urgency_stats[64][THREAD_URGENCY_MAX];
655#endif
656
657#define URGENCY_NOTIFICATION_ASSERT_NS (5 * 1000 * 1000)
658uint64_t urgency_notification_assert_abstime_threshold, urgency_notification_max_recorded;
659
060df5ea
A
660void
661thread_tell_urgency(int urgency,
6d2010ae
A
662 uint64_t rt_period,
663 uint64_t rt_deadline)
664{
665 uint64_t urgency_notification_time_start, delta;
666 boolean_t urgency_assert = (urgency_notification_assert_abstime_threshold != 0);
667 assert(get_preemption_level() > 0 || ml_get_interrupts_enabled() == FALSE);
668#if DEBUG
669 urgency_stats[cpu_number() % 64][urgency]++;
670#endif
671 if (!pmInitDone
672 || pmDispatch == NULL
673 || pmDispatch->pmThreadTellUrgency == NULL)
674 return;
675
676 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_START, urgency, rt_period, (rt_deadline >> 32), rt_deadline, 0);
677
678 if (__improbable((urgency_assert == TRUE)))
679 urgency_notification_time_start = mach_absolute_time();
680
681 pmDispatch->pmThreadTellUrgency(urgency, rt_period, rt_deadline);
682
683 if (__improbable((urgency_assert == TRUE))) {
684 delta = mach_absolute_time() - urgency_notification_time_start;
685
686 if (__improbable(delta > urgency_notification_max_recorded)) {
687 /* This is not synchronized, but it doesn't matter
688 * if we (rarely) miss an event, as it is statistically
689 * unlikely that it will never recur.
690 */
691 urgency_notification_max_recorded = delta;
692
693 if (__improbable((delta > urgency_notification_assert_abstime_threshold) && !machine_timeout_suspended()))
694 panic("Urgency notification callout %p exceeded threshold, 0x%llx abstime units", pmDispatch->pmThreadTellUrgency, delta);
695 }
696 }
060df5ea 697
6d2010ae 698 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_END, urgency, rt_period, (rt_deadline >> 32), rt_deadline, 0);
060df5ea
A
699}
700
701void
702active_rt_threads(boolean_t active)
703{
704 if (!pmInitDone
705 || pmDispatch == NULL
706 || pmDispatch->pmActiveRTThreads == NULL)
707 return;
708
709 pmDispatch->pmActiveRTThreads(active);
710}
711
c910b4d9
A
712static uint32_t
713pmGetSavedRunCount(void)
714{
715 return(saved_run_count);
716}
717
2d21ac55
A
718/*
719 * Returns the root of the package tree.
720 */
721static x86_pkg_t *
722pmGetPkgRoot(void)
723{
724 return(x86_pkgs);
725}
726
727static boolean_t
728pmCPUGetHibernate(int cpu)
0c530ab8 729{
2d21ac55 730 return(cpu_datap(cpu)->cpu_hibernate);
0c530ab8
A
731}
732
2d21ac55
A
733static processor_t
734pmLCPUtoProcessor(int lcpu)
735{
736 return(cpu_datap(lcpu)->cpu_processor);
737}
738
c910b4d9
A
739static void
740pmReSyncDeadlines(int cpu)
741{
742 static boolean_t registered = FALSE;
743
744 if (!registered) {
745 PM_interrupt_register(&etimer_resync_deadlines);
746 registered = TRUE;
747 }
748
749 if ((uint32_t)cpu == current_cpu_datap()->lcpu.cpu_num)
750 etimer_resync_deadlines();
751 else
752 cpu_PM_interrupt(cpu);
753}
754
b0d623f7
A
755static void
756pmSendIPI(int cpu)
757{
758 lapic_send_ipi(cpu, LAPIC_PM_INTERRUPT);
759}
760
060df5ea
A
761static void
762pmGetNanotimeInfo(pm_rtc_nanotime_t *rtc_nanotime)
763{
764 /*
765 * Make sure that nanotime didn't change while we were reading it.
766 */
767 do {
6d2010ae
A
768 rtc_nanotime->generation = pal_rtc_nanotime_info.generation; /* must be first */
769 rtc_nanotime->tsc_base = pal_rtc_nanotime_info.tsc_base;
770 rtc_nanotime->ns_base = pal_rtc_nanotime_info.ns_base;
771 rtc_nanotime->scale = pal_rtc_nanotime_info.scale;
772 rtc_nanotime->shift = pal_rtc_nanotime_info.shift;
773 } while(pal_rtc_nanotime_info.generation != 0
774 && rtc_nanotime->generation != pal_rtc_nanotime_info.generation);
060df5ea
A
775}
776
777static uint32_t
6d2010ae 778pmTimerQueueMigrate(int target_cpu)
7e4a7d39 779{
6d2010ae
A
780 /* Call the etimer code to do this. */
781 return (target_cpu != cpu_number())
782 ? etimer_queue_migrate(target_cpu)
783 : 0;
7e4a7d39
A
784}
785
6d2010ae 786
2d21ac55
A
787/*
788 * Called by the power management kext to register itself and to get the
789 * callbacks it might need into other kernel functions. This interface
790 * is versioned to allow for slight mis-matches between the kext and the
791 * kernel.
792 */
0c530ab8 793void
2d21ac55
A
794pmKextRegister(uint32_t version, pmDispatch_t *cpuFuncs,
795 pmCallBacks_t *callbacks)
0c530ab8 796{
2d21ac55 797 if (callbacks != NULL && version == PM_DISPATCH_VERSION) {
c910b4d9
A
798 callbacks->setRTCPop = setPop;
799 callbacks->resyncDeadlines = pmReSyncDeadlines;
800 callbacks->initComplete = pmInitComplete;
801 callbacks->GetLCPU = pmGetLogicalCPU;
802 callbacks->GetCore = pmGetCore;
803 callbacks->GetDie = pmGetDie;
804 callbacks->GetPackage = pmGetPackage;
805 callbacks->GetMyLCPU = pmGetMyLogicalCPU;
806 callbacks->GetMyCore = pmGetMyCore;
807 callbacks->GetMyDie = pmGetMyDie;
808 callbacks->GetMyPackage = pmGetMyPackage;
809 callbacks->GetPkgRoot = pmGetPkgRoot;
810 callbacks->LockCPUTopology = pmLockCPUTopology;
811 callbacks->GetHibernate = pmCPUGetHibernate;
812 callbacks->LCPUtoProcessor = pmLCPUtoProcessor;
813 callbacks->ThreadBind = thread_bind;
814 callbacks->GetSavedRunCount = pmGetSavedRunCount;
7e4a7d39 815 callbacks->GetNanotimeInfo = pmGetNanotimeInfo;
060df5ea
A
816 callbacks->ThreadGetUrgency = pmThreadGetUrgency;
817 callbacks->RTCClockAdjust = rtc_clock_adjust;
818 callbacks->timerQueueMigrate = pmTimerQueueMigrate;
c910b4d9 819 callbacks->topoParms = &topoParms;
6d2010ae 820 callbacks->pmSendIPI = pmSendIPI;
060df5ea
A
821 callbacks->InterruptPending = lapic_is_interrupt_pending;
822 callbacks->IsInterrupting = lapic_is_interrupting;
823 callbacks->InterruptStats = lapic_interrupt_counts;
824 callbacks->DisableApicTimer = lapic_disable_timer;
c910b4d9 825 } else {
6d2010ae 826 panic("Version mis-match between Kernel and CPU PM");
2d21ac55
A
827 }
828
829 if (cpuFuncs != NULL) {
830 pmDispatch = cpuFuncs;
b0d623f7 831
7ddcb079
A
832 if (earlyTopology
833 && pmDispatch->pmCPUStateInit != NULL) {
834 (*pmDispatch->pmCPUStateInit)();
835 earlyTopology = FALSE;
836 }
837
b0d623f7
A
838 if (pmDispatch->pmIPIHandler != NULL) {
839 lapic_set_pm_func((i386_intr_func_t)pmDispatch->pmIPIHandler);
840 }
2d21ac55 841 }
0c530ab8
A
842}
843
2d21ac55
A
844/*
845 * Unregisters the power management functions from the kext.
846 */
0c530ab8 847void
2d21ac55 848pmUnRegister(pmDispatch_t *cpuFuncs)
0c530ab8 849{
2d21ac55
A
850 if (cpuFuncs != NULL && pmDispatch == cpuFuncs) {
851 pmDispatch = NULL;
852 }
0c530ab8 853}
2d21ac55 854
593a1d5f
A
855/******************************************************************************
856 *
857 * All of the following are deprecated interfaces and no longer used.
858 *
859 ******************************************************************************/
860kern_return_t
861pmsControl(__unused uint32_t request, __unused user_addr_t reqaddr,
862 __unused uint32_t reqsize)
863{
864 return(KERN_SUCCESS);
865}
866
867void
868pmsInit(void)
869{
870}
871
872void
873pmsStart(void)
874{
875}
876
877void
878pmsPark(void)
879{
880}
881
882void
883pmsRun(__unused uint32_t nstep)
884{
885}
886
887kern_return_t
888pmsBuild(__unused pmsDef *pd, __unused uint32_t pdsize,
889 __unused pmsSetFunc_t *functab,
890 __unused uint32_t platformData, __unused pmsQueryFunc_t queryFunc)
891{
892 return(KERN_SUCCESS);
893}