]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/acpi.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / osfmk / i386 / acpi.c
1 /*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <i386/pmap.h>
30 #include <i386/proc_reg.h>
31 #include <i386/mp_desc.h>
32 #include <i386/misc_protos.h>
33 #include <i386/mp.h>
34 #include <i386/cpu_data.h>
35 #if CONFIG_MTRR
36 #include <i386/mtrr.h>
37 #endif
38 #if HYPERVISOR
39 #include <kern/hv_support.h>
40 #endif
41 #if CONFIG_VMX
42 #include <i386/vmx/vmx_cpu.h>
43 #endif
44 #include <i386/ucode.h>
45 #include <i386/acpi.h>
46 #include <i386/fpu.h>
47 #include <i386/lapic.h>
48 #include <i386/mp.h>
49 #include <i386/mp_desc.h>
50 #include <i386/serial_io.h>
51 #if CONFIG_MCA
52 #include <i386/machine_check.h>
53 #endif
54 #include <i386/pmCPU.h>
55
56 #include <i386/tsc.h>
57
58 #include <kern/cpu_data.h>
59 #include <kern/machine.h>
60 #include <kern/timer_queue.h>
61 #include <console/serial_protos.h>
62 #include <machine/pal_routines.h>
63 #include <vm/vm_page.h>
64
65 #if HIBERNATION
66 #include <IOKit/IOHibernatePrivate.h>
67 #endif
68 #include <IOKit/IOPlatformExpert.h>
69 #include <sys/kdebug.h>
70
71 #if MONOTONIC
72 #include <kern/monotonic.h>
73 #endif /* MONOTONIC */
74
75 #if CONFIG_SLEEP
76 extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon);
77 extern void acpi_wake_prot(void);
78 #endif
79 extern kern_return_t IOCPURunPlatformQuiesceActions(void);
80 extern kern_return_t IOCPURunPlatformActiveActions(void);
81 extern kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message);
82
83 extern void fpinit(void);
84
85 vm_offset_t
86 acpi_install_wake_handler(void)
87 {
88 #if CONFIG_SLEEP
89 install_real_mode_bootstrap(acpi_wake_prot);
90 return REAL_MODE_BOOTSTRAP_OFFSET;
91 #else
92 return 0;
93 #endif
94 }
95
96 #if CONFIG_SLEEP
97
98 unsigned int save_kdebug_enable = 0;
99 static uint64_t acpi_sleep_abstime;
100 static uint64_t acpi_idle_abstime;
101 static uint64_t acpi_wake_abstime, acpi_wake_postrebase_abstime;
102 boolean_t deep_idle_rebase = TRUE;
103
104 #if HIBERNATION
105 struct acpi_hibernate_callback_data {
106 acpi_sleep_callback func;
107 void *refcon;
108 };
109 typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t;
110
111 static void
112 acpi_hibernate(void *refcon)
113 {
114 uint32_t mode;
115
116 acpi_hibernate_callback_data_t *data =
117 (acpi_hibernate_callback_data_t *)refcon;
118
119 if (current_cpu_datap()->cpu_hibernate) {
120 mode = hibernate_write_image();
121
122 if (mode == kIOHibernatePostWriteHalt) {
123 // off
124 HIBLOG("power off\n");
125 IOCPURunPlatformHaltRestartActions(kPEHaltCPU);
126 if (PE_halt_restart) {
127 (*PE_halt_restart)(kPEHaltCPU);
128 }
129 } else if (mode == kIOHibernatePostWriteRestart) {
130 // restart
131 HIBLOG("restart\n");
132 IOCPURunPlatformHaltRestartActions(kPERestartCPU);
133 if (PE_halt_restart) {
134 (*PE_halt_restart)(kPERestartCPU);
135 }
136 } else {
137 // sleep
138 HIBLOG("sleep\n");
139
140 // should we come back via regular wake, set the state in memory.
141 cpu_datap(0)->cpu_hibernate = 0;
142 }
143 }
144
145 #if CONFIG_VMX
146 vmx_suspend();
147 #endif
148 kdebug_enable = 0;
149
150 IOCPURunPlatformQuiesceActions();
151
152 acpi_sleep_abstime = mach_absolute_time();
153
154 (data->func)(data->refcon);
155
156 /* should never get here! */
157 }
158 #endif /* HIBERNATION */
159 #endif /* CONFIG_SLEEP */
160
161 extern void slave_pstart(void);
162 extern void hibernate_rebuild_vm_structs(void);
163
164 extern unsigned int wake_nkdbufs;
165 extern unsigned int trace_wrap;
166
167 void
168 acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
169 {
170 #if HIBERNATION
171 acpi_hibernate_callback_data_t data;
172 #endif
173 boolean_t did_hibernate;
174 cpu_data_t *cdp = current_cpu_datap();
175 unsigned int cpu;
176 kern_return_t rc;
177 unsigned int my_cpu;
178 uint64_t start;
179 uint64_t elapsed = 0;
180 uint64_t elapsed_trace_start = 0;
181
182 my_cpu = cpu_number();
183 kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", cdp->cpu_hibernate,
184 my_cpu);
185
186 /* Get all CPUs to be in the "off" state */
187 for (cpu = 0; cpu < real_ncpus; cpu += 1) {
188 if (cpu == my_cpu) {
189 continue;
190 }
191 rc = pmCPUExitHaltToOff(cpu);
192 if (rc != KERN_SUCCESS) {
193 panic("Error %d trying to transition CPU %d to OFF",
194 rc, cpu);
195 }
196 }
197
198 /* shutdown local APIC before passing control to firmware */
199 lapic_shutdown();
200
201 #if HIBERNATION
202 data.func = func;
203 data.refcon = refcon;
204 #endif
205
206 #if MONOTONIC
207 mt_cpu_down(cdp);
208 #endif /* MONOTONIC */
209
210 /* Save power management timer state */
211 pmTimerSave();
212
213 #if HYPERVISOR
214 /* Notify hypervisor that we are about to sleep */
215 hv_suspend();
216 #endif
217
218 /*
219 * Enable FPU/SIMD unit for potential hibernate acceleration
220 */
221 clear_ts();
222
223 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START);
224
225 save_kdebug_enable = kdebug_enable;
226 kdebug_enable = 0;
227
228 acpi_sleep_abstime = mach_absolute_time();
229
230 #if CONFIG_SLEEP
231 /*
232 * Save master CPU state and sleep platform.
233 * Will not return until platform is woken up,
234 * or if sleep failed.
235 */
236 uint64_t old_cr3 = x86_64_pre_sleep();
237 #if HIBERNATION
238 acpi_sleep_cpu(acpi_hibernate, &data);
239 #else
240 #if CONFIG_VMX
241 vmx_suspend();
242 #endif
243 acpi_sleep_cpu(func, refcon);
244 #endif
245
246 acpi_wake_abstime = mach_absolute_time();
247 /* Rebase TSC->absolute time conversion, using timestamp
248 * recorded before sleep.
249 */
250 rtc_nanotime_init(acpi_sleep_abstime);
251 acpi_wake_postrebase_abstime = start = mach_absolute_time();
252 assert(start >= acpi_sleep_abstime);
253
254 x86_64_post_sleep(old_cr3);
255
256 #endif /* CONFIG_SLEEP */
257
258 /* Reset UART if kprintf is enabled.
259 * However kprintf should not be used before rtc_sleep_wakeup()
260 * for compatibility with firewire kprintf.
261 */
262
263 if (FALSE == disable_serial_output) {
264 pal_serial_init();
265 }
266
267 #if HIBERNATION
268 if (current_cpu_datap()->cpu_hibernate) {
269 did_hibernate = TRUE;
270 } else
271 #endif
272 {
273 did_hibernate = FALSE;
274 }
275
276 /* Re-enable fast syscall */
277 cpu_syscall_init(current_cpu_datap());
278
279 #if CONFIG_MCA
280 /* Re-enable machine check handling */
281 mca_cpu_init();
282 #endif
283
284 #if CONFIG_MTRR
285 /* restore MTRR settings */
286 mtrr_update_cpu();
287 #endif
288
289 /* update CPU microcode */
290 ucode_update_wake();
291
292 #if CONFIG_MTRR
293 /* set up PAT following boot processor power up */
294 pat_init();
295 #endif
296
297 #if CONFIG_VMX
298 /*
299 * Restore VT mode
300 */
301 vmx_resume(did_hibernate);
302 #endif
303
304 /*
305 * Go through all of the CPUs and mark them as requiring
306 * a full restart.
307 */
308 pmMarkAllCPUsOff();
309
310
311 /* re-enable and re-init local apic (prior to starting timers) */
312 if (lapic_probe()) {
313 lapic_configure();
314 }
315
316 #if KASAN
317 /*
318 * The sleep implementation uses indirect noreturn calls, so we miss stack
319 * unpoisoning. Do it explicitly.
320 */
321 kasan_unpoison_curstack(true);
322 #endif
323
324 #if HIBERNATION
325 hibernate_rebuild_vm_structs();
326 #endif
327
328 elapsed += mach_absolute_time() - start;
329
330 rtc_decrementer_configure();
331 kdebug_enable = save_kdebug_enable;
332
333 if (kdebug_enable == 0) {
334 if (wake_nkdbufs) {
335 start = mach_absolute_time();
336 kdebug_trace_start(wake_nkdbufs, NULL, trace_wrap != 0, TRUE);
337 elapsed_trace_start += mach_absolute_time() - start;
338 }
339 }
340 start = mach_absolute_time();
341
342 /* Reconfigure FP/SIMD unit */
343 init_fpu();
344 clear_ts();
345
346 IOCPURunPlatformActiveActions();
347
348 #if HIBERNATION
349 if (did_hibernate) {
350 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START);
351 hibernate_machine_init();
352 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END);
353
354 current_cpu_datap()->cpu_hibernate = 0;
355 }
356 #endif /* HIBERNATION */
357
358 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, start, elapsed,
359 elapsed_trace_start, acpi_wake_abstime);
360
361 /* Restore power management register state */
362 pmCPUMarkRunning(current_cpu_datap());
363
364 /* Restore power management timer state */
365 pmTimerRestore();
366
367 /* Restart timer interrupts */
368 rtc_timer_start();
369
370
371 #if MONOTONIC
372 mt_cpu_up(cdp);
373 #endif /* MONOTONIC */
374
375 #if HIBERNATION
376 kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate);
377 #endif /* HIBERNATION */
378
379 #if CONFIG_SLEEP
380 /* Becase we don't save the bootstrap page, and we share it
381 * between sleep and mp slave init, we need to recreate it
382 * after coming back from sleep or hibernate */
383 install_real_mode_bootstrap(slave_pstart);
384 #endif /* CONFIG_SLEEP */
385 }
386
387 /*
388 * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel
389 * to idle the boot processor in the deepest C-state for S0 sleep. All slave
390 * processors are expected already to have been offlined in the deepest C-state.
391 *
392 * The contract with ACPI is that although the kernel is called with interrupts
393 * disabled, interrupts may need to be re-enabled to dismiss any pending timer
394 * interrupt. However, the callback function will be called once this has
395 * occurred and interrupts are guaranteed to be disabled at that time,
396 * and to remain disabled during C-state entry, exit (wake) and return
397 * from acpi_idle_kernel.
398 */
399 void
400 acpi_idle_kernel(acpi_sleep_callback func, void *refcon)
401 {
402 boolean_t istate = ml_get_interrupts_enabled();
403
404 kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n",
405 cpu_number(), istate ? "enabled" : "disabled");
406
407 assert(cpu_number() == master_cpu);
408
409 #if MONOTONIC
410 mt_cpu_down(cpu_datap(0));
411 #endif /* MONOTONIC */
412
413 /* Cancel any pending deadline */
414 setPop(0);
415 while (lapic_is_interrupting(LAPIC_TIMER_VECTOR)
416 #if MONOTONIC
417 || lapic_is_interrupting(LAPIC_VECTOR(PERFCNT))
418 #endif /* MONOTONIC */
419 ) {
420 (void) ml_set_interrupts_enabled(TRUE);
421 setPop(0);
422 ml_set_interrupts_enabled(FALSE);
423 }
424
425 if (current_cpu_datap()->cpu_hibernate) {
426 /* Call hibernate_write_image() to put disk to low power state */
427 hibernate_write_image();
428 cpu_datap(0)->cpu_hibernate = 0;
429 }
430
431 /*
432 * Call back to caller to indicate that interrupts will remain
433 * disabled while we deep idle, wake and return.
434 */
435 IOCPURunPlatformQuiesceActions();
436
437 func(refcon);
438
439 acpi_idle_abstime = mach_absolute_time();
440
441 KERNEL_DEBUG_CONSTANT(
442 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_START,
443 acpi_idle_abstime, deep_idle_rebase, 0, 0, 0);
444
445 /*
446 * Disable tracing during S0-sleep
447 * unless overridden by sysctl -w tsc.deep_idle_rebase=0
448 */
449 if (deep_idle_rebase) {
450 save_kdebug_enable = kdebug_enable;
451 kdebug_enable = 0;
452 }
453
454 /*
455 * Call into power-management to enter the lowest C-state.
456 * Note when called on the boot processor this routine will
457 * return directly when awoken.
458 */
459 pmCPUHalt(PM_HALT_SLEEP);
460
461 /*
462 * Get wakeup time relative to the TSC which has progressed.
463 * Then rebase nanotime to reflect time not progressing over sleep
464 * - unless overriden so that tracing can occur during deep_idle.
465 */
466 acpi_wake_abstime = mach_absolute_time();
467 if (deep_idle_rebase) {
468 rtc_sleep_wakeup(acpi_idle_abstime);
469 kdebug_enable = save_kdebug_enable;
470 }
471 acpi_wake_postrebase_abstime = mach_absolute_time();
472 assert(mach_absolute_time() >= acpi_idle_abstime);
473
474 KERNEL_DEBUG_CONSTANT(
475 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END,
476 acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0);
477
478 /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */
479 if (kdebug_enable == 0) {
480 if (wake_nkdbufs) {
481 __kdebug_only uint64_t start = mach_absolute_time();
482 kdebug_trace_start(wake_nkdbufs, NULL, trace_wrap != 0, TRUE);
483 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 15), start);
484 }
485 }
486
487 IOCPURunPlatformActiveActions();
488
489 /* Restart timer interrupts */
490 rtc_timer_start();
491 }
492
493 extern char real_mode_bootstrap_end[];
494 extern char real_mode_bootstrap_base[];
495
496 void
497 install_real_mode_bootstrap(void *prot_entry)
498 {
499 /*
500 * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET.
501 * This is in page 1 which has been reserved for this purpose by
502 * machine_startup() from the boot processor.
503 * The slave boot code is responsible for switching to protected
504 * mode and then jumping to the common startup, _start().
505 */
506 bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base),
507 (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET,
508 real_mode_bootstrap_end - real_mode_bootstrap_base);
509
510 /*
511 * Set the location at the base of the stack to point to the
512 * common startup entry.
513 */
514 ml_phys_write_word(
515 PROT_MODE_START + REAL_MODE_BOOTSTRAP_OFFSET,
516 (unsigned int)kvtophys((vm_offset_t)prot_entry));
517
518 /* Flush caches */
519 __asm__("wbinvd");
520 }
521
522 boolean_t
523 ml_recent_wake(void)
524 {
525 uint64_t ctime = mach_absolute_time();
526 assert(ctime > acpi_wake_postrebase_abstime);
527 return (ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC;
528 }