2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <i386/pmap.h>
30 #include <i386/proc_reg.h>
31 #include <i386/mp_desc.h>
32 #include <i386/misc_protos.h>
34 #include <i386/cpu_data.h>
36 #include <i386/mtrr.h>
39 #include <kern/hv_support.h>
42 #include <i386/vmx/vmx_cpu.h>
44 #include <i386/ucode.h>
45 #include <i386/acpi.h>
47 #include <i386/lapic.h>
49 #include <i386/mp_desc.h>
50 #include <i386/serial_io.h>
52 #include <i386/machine_check.h>
54 #include <i386/pmCPU.h>
58 #define UINT64 uint64_t
59 #define UINT32 uint32_t
60 #define UINT16 uint16_t
62 #define RSDP_VERSION_ACPI10 0
63 #define RSDP_VERSION_ACPI20 2
64 #include <acpi/Acpi.h>
65 #include <acpi/Acpi_v1.h>
66 #include <pexpert/i386/efi.h>
68 #include <kern/cpu_data.h>
69 #include <kern/machine.h>
70 #include <kern/timer_queue.h>
71 #include <console/serial_protos.h>
72 #include <machine/pal_routines.h>
73 #include <vm/vm_page.h>
76 #include <IOKit/IOHibernatePrivate.h>
78 #include <IOKit/IOPlatformExpert.h>
79 #include <sys/kdebug.h>
82 #include <kern/monotonic.h>
83 #endif /* MONOTONIC */
86 extern void acpi_sleep_cpu(acpi_sleep_callback
, void * refcon
);
87 extern void acpi_wake_prot(void);
89 extern kern_return_t
IOCPURunPlatformQuiesceActions(void);
90 extern kern_return_t
IOCPURunPlatformActiveActions(void);
91 extern kern_return_t
IOCPURunPlatformHaltRestartActions(uint32_t message
);
93 extern void fpinit(void);
95 #if DEVELOPMENT || DEBUG
96 #define DBG(x...) kprintf(x)
102 acpi_install_wake_handler(void)
105 install_real_mode_bootstrap(acpi_wake_prot
);
106 return REAL_MODE_BOOTSTRAP_OFFSET
;
114 unsigned int save_kdebug_enable
= 0;
115 static uint64_t acpi_sleep_abstime
;
116 static uint64_t acpi_idle_abstime
;
117 static uint64_t acpi_wake_abstime
, acpi_wake_postrebase_abstime
;
118 boolean_t deep_idle_rebase
= TRUE
;
121 struct acpi_hibernate_callback_data
{
122 acpi_sleep_callback func
;
125 typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t
;
128 acpi_hibernate(void *refcon
)
132 acpi_hibernate_callback_data_t
*data
=
133 (acpi_hibernate_callback_data_t
*)refcon
;
135 if (current_cpu_datap()->cpu_hibernate
) {
136 mode
= hibernate_write_image();
138 if (mode
== kIOHibernatePostWriteHalt
) {
140 HIBLOG("power off\n");
141 IOCPURunPlatformHaltRestartActions(kPEHaltCPU
);
142 if (PE_halt_restart
) {
143 (*PE_halt_restart
)(kPEHaltCPU
);
145 } else if (mode
== kIOHibernatePostWriteRestart
) {
148 IOCPURunPlatformHaltRestartActions(kPERestartCPU
);
149 if (PE_halt_restart
) {
150 (*PE_halt_restart
)(kPERestartCPU
);
156 // should we come back via regular wake, set the state in memory.
157 cpu_datap(0)->cpu_hibernate
= 0;
166 IOCPURunPlatformQuiesceActions();
168 acpi_sleep_abstime
= mach_absolute_time();
170 (data
->func
)(data
->refcon
);
172 /* should never get here! */
174 #endif /* HIBERNATION */
175 #endif /* CONFIG_SLEEP */
177 extern void slave_pstart(void);
180 acpi_sleep_kernel(acpi_sleep_callback func
, void *refcon
)
183 acpi_hibernate_callback_data_t data
;
185 boolean_t did_hibernate
;
186 cpu_data_t
*cdp
= current_cpu_datap();
191 uint64_t elapsed
= 0;
192 uint64_t elapsed_trace_start
= 0;
194 my_cpu
= cpu_number();
195 kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", cdp
->cpu_hibernate
,
198 /* Get all CPUs to be in the "off" state */
199 for (cpu
= 0; cpu
< real_ncpus
; cpu
+= 1) {
203 rc
= pmCPUExitHaltToOff(cpu
);
204 if (rc
!= KERN_SUCCESS
) {
205 panic("Error %d trying to transition CPU %d to OFF",
210 /* shutdown local APIC before passing control to firmware */
211 lapic_shutdown(true);
215 data
.refcon
= refcon
;
220 #endif /* MONOTONIC */
222 /* Save power management timer state */
226 /* Notify hypervisor that we are about to sleep */
231 * Enable FPU/SIMD unit for potential hibernate acceleration
235 KDBG(IOKDBG_CODE(DBG_HIBERNATE
, 0) | DBG_FUNC_START
);
237 save_kdebug_enable
= kdebug_enable
;
240 acpi_sleep_abstime
= mach_absolute_time();
244 * Save master CPU state and sleep platform.
245 * Will not return until platform is woken up,
246 * or if sleep failed.
248 uint64_t old_cr3
= x86_64_pre_sleep();
250 acpi_sleep_cpu(acpi_hibernate
, &data
);
255 acpi_sleep_cpu(func
, refcon
);
258 acpi_wake_abstime
= mach_absolute_time();
259 /* Rebase TSC->absolute time conversion, using timestamp
260 * recorded before sleep.
262 rtc_nanotime_init(acpi_sleep_abstime
);
263 acpi_wake_postrebase_abstime
= start
= mach_absolute_time();
264 assert(start
>= acpi_sleep_abstime
);
266 x86_64_post_sleep(old_cr3
);
268 #endif /* CONFIG_SLEEP */
270 /* Reset UART if kprintf is enabled.
271 * However kprintf should not be used before rtc_sleep_wakeup()
272 * for compatibility with firewire kprintf.
275 if (FALSE
== disable_serial_output
) {
280 if (current_cpu_datap()->cpu_hibernate
) {
281 did_hibernate
= TRUE
;
285 did_hibernate
= FALSE
;
288 /* Re-enable fast syscall */
289 cpu_syscall_init(current_cpu_datap());
292 /* Re-enable machine check handling */
297 /* restore MTRR settings */
301 /* update CPU microcode and apply CPU workarounds */
302 ucode_update_wake_and_apply_cpu_was();
305 /* set up PAT following boot processor power up */
313 vmx_resume(did_hibernate
);
317 * Go through all of the CPUs and mark them as requiring
323 /* re-enable and re-init local apic (prior to starting timers) */
325 lapic_configure(true);
330 * The sleep implementation uses indirect noreturn calls, so we miss stack
331 * unpoisoning. Do it explicitly.
333 kasan_unpoison_curstack(true);
336 elapsed
+= mach_absolute_time() - start
;
338 rtc_decrementer_configure();
339 kdebug_enable
= save_kdebug_enable
;
341 if (kdebug_enable
== 0) {
342 elapsed_trace_start
+= kdebug_wake();
344 start
= mach_absolute_time();
346 /* Reconfigure FP/SIMD unit */
350 IOCPURunPlatformActiveActions();
352 KDBG(IOKDBG_CODE(DBG_HIBERNATE
, 0) | DBG_FUNC_END
, start
, elapsed
,
353 elapsed_trace_start
, acpi_wake_abstime
);
355 /* Restore power management register state */
356 pmCPUMarkRunning(current_cpu_datap());
358 /* Restore power management timer state */
361 /* Restart timer interrupts */
367 #endif /* MONOTONIC */
370 kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate
);
371 #endif /* HIBERNATION */
374 /* Becase we don't save the bootstrap page, and we share it
375 * between sleep and mp slave init, we need to recreate it
376 * after coming back from sleep or hibernate */
377 install_real_mode_bootstrap(slave_pstart
);
378 #endif /* CONFIG_SLEEP */
382 ml_hibernate_active_pre(void)
385 hibernate_rebuild_vm_structs();
386 #endif /* HIBERNATION */
390 ml_hibernate_active_post(void)
393 if (current_cpu_datap()->cpu_hibernate
) {
394 KDBG(IOKDBG_CODE(DBG_HIBERNATE
, 2) | DBG_FUNC_START
);
395 hibernate_machine_init();
396 KDBG(IOKDBG_CODE(DBG_HIBERNATE
, 2) | DBG_FUNC_END
);
397 current_cpu_datap()->cpu_hibernate
= 0;
399 #endif /* HIBERNATION */
403 * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel
404 * to idle the boot processor in the deepest C-state for S0 sleep. All slave
405 * processors are expected already to have been offlined in the deepest C-state.
407 * The contract with ACPI is that although the kernel is called with interrupts
408 * disabled, interrupts may need to be re-enabled to dismiss any pending timer
409 * interrupt. However, the callback function will be called once this has
410 * occurred and interrupts are guaranteed to be disabled at that time,
411 * and to remain disabled during C-state entry, exit (wake) and return
412 * from acpi_idle_kernel.
415 acpi_idle_kernel(acpi_sleep_callback func
, void *refcon
)
417 boolean_t istate
= ml_get_interrupts_enabled();
419 kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n",
420 cpu_number(), istate
? "enabled" : "disabled");
422 assert(cpu_number() == master_cpu
);
425 mt_cpu_down(cpu_datap(0));
426 #endif /* MONOTONIC */
428 /* Cancel any pending deadline */
430 while (lapic_is_interrupting(LAPIC_TIMER_VECTOR
)
432 || lapic_is_interrupting(LAPIC_VECTOR(PERFCNT
))
433 #endif /* MONOTONIC */
435 (void) ml_set_interrupts_enabled(TRUE
);
437 ml_set_interrupts_enabled(FALSE
);
440 if (current_cpu_datap()->cpu_hibernate
) {
441 /* Call hibernate_write_image() to put disk to low power state */
442 hibernate_write_image();
443 cpu_datap(0)->cpu_hibernate
= 0;
447 * Call back to caller to indicate that interrupts will remain
448 * disabled while we deep idle, wake and return.
450 IOCPURunPlatformQuiesceActions();
454 acpi_idle_abstime
= mach_absolute_time();
456 KERNEL_DEBUG_CONSTANT(
457 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_DEEP_IDLE
) | DBG_FUNC_START
,
458 acpi_idle_abstime
, deep_idle_rebase
, 0, 0, 0);
461 * Disable tracing during S0-sleep
462 * unless overridden by sysctl -w tsc.deep_idle_rebase=0
464 if (deep_idle_rebase
) {
465 save_kdebug_enable
= kdebug_enable
;
470 * Call into power-management to enter the lowest C-state.
471 * Note when called on the boot processor this routine will
472 * return directly when awoken.
474 pmCPUHalt(PM_HALT_SLEEP
);
477 * Get wakeup time relative to the TSC which has progressed.
478 * Then rebase nanotime to reflect time not progressing over sleep
479 * - unless overriden so that tracing can occur during deep_idle.
481 acpi_wake_abstime
= mach_absolute_time();
482 if (deep_idle_rebase
) {
483 rtc_sleep_wakeup(acpi_idle_abstime
);
484 kdebug_enable
= save_kdebug_enable
;
486 acpi_wake_postrebase_abstime
= mach_absolute_time();
487 assert(mach_absolute_time() >= acpi_idle_abstime
);
489 KERNEL_DEBUG_CONSTANT(
490 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_DEEP_IDLE
) | DBG_FUNC_END
,
491 acpi_wake_abstime
, acpi_wake_abstime
- acpi_idle_abstime
, 0, 0, 0);
494 mt_cpu_up(cpu_datap(0));
495 #endif /* MONOTONIC */
497 /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */
498 if (kdebug_enable
== 0) {
502 IOCPURunPlatformActiveActions();
504 /* Restart timer interrupts */
508 extern char real_mode_bootstrap_end
[];
509 extern char real_mode_bootstrap_base
[];
512 install_real_mode_bootstrap(void *prot_entry
)
515 * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET.
516 * This is in page 1 which has been reserved for this purpose by
517 * machine_startup() from the boot processor.
518 * The slave boot code is responsible for switching to protected
519 * mode and then jumping to the common startup, _start().
521 bcopy_phys(kvtophys((vm_offset_t
) real_mode_bootstrap_base
),
522 (addr64_t
) REAL_MODE_BOOTSTRAP_OFFSET
,
523 real_mode_bootstrap_end
- real_mode_bootstrap_base
);
526 * Set the location at the base of the stack to point to the
527 * common startup entry.
530 PROT_MODE_START
+ REAL_MODE_BOOTSTRAP_OFFSET
,
531 (unsigned int)kvtophys((vm_offset_t
)prot_entry
));
540 uint64_t ctime
= mach_absolute_time();
541 assert(ctime
> acpi_wake_postrebase_abstime
);
542 return (ctime
- acpi_wake_postrebase_abstime
) < 5 * NSEC_PER_SEC
;
546 cksum8(uint8_t *ptr
, uint32_t size
)
551 for (i
= 0; i
< size
; i
++) {
559 * Parameterized search for a specified table given an sdtp (either RSDT or XSDT).
560 * Note that efiboot does not modify the addresses of tables in the RSDT or XSDT
561 * TableOffsetEntry array, so we do not need to "convert" from efiboot virtual to
564 #define SEARCH_FOR_ACPI_TABLE(sdtp, signature, entry_type) \
566 uint32_t i, pointer_count; \
568 /* Walk the list of tables in the *SDT, looking for the signature passed in */ \
569 pointer_count = ((sdtp)->Length - sizeof(ACPI_TABLE_HEADER)) / sizeof(entry_type); \
571 for (i = 0; i < pointer_count; i++) { \
572 ACPI_TABLE_HEADER *next_table = \
573 (ACPI_TABLE_HEADER *)PHYSMAP_PTOV( \
574 (uintptr_t)(sdtp)->TableOffsetEntry[i]); \
575 if (strncmp(&next_table->Signature[0], (signature), 4) == 0) { \
577 * Checksum the table first, then return it if the checksum \
580 if (cksum8((uint8_t *)next_table, next_table->Length) == 0) { \
583 DBG("Invalid checksum for table [%s]@0x%lx!\n", (signature), \
584 (unsigned long)(sdtp)->TableOffsetEntry[i]); \
593 static ACPI_TABLE_HEADER
*
594 acpi_find_table_via_xsdt(XSDT_DESCRIPTOR
*xsdtp
, const char *signature
)
596 SEARCH_FOR_ACPI_TABLE(xsdtp
, signature
, UINT64
);
599 static ACPI_TABLE_HEADER
*
600 acpi_find_table_via_rsdt(RSDT_DESCRIPTOR
*rsdtp
, const char *signature
)
602 SEARCH_FOR_ACPI_TABLE(rsdtp
, signature
, UINT32
);
606 * Returns a pointer to an ACPI table header corresponding to the table
607 * whose signature is passed in, or NULL if no such table could be found.
609 static ACPI_TABLE_HEADER
*
610 acpi_find_table(uintptr_t rsdp_physaddr
, const char *signature
)
612 static RSDP_DESCRIPTOR
*rsdp
= NULL
;
613 static XSDT_DESCRIPTOR
*xsdtp
= NULL
;
614 static RSDT_DESCRIPTOR
*rsdtp
= NULL
;
616 if (signature
== NULL
) {
617 DBG("Invalid NULL signature passed to acpi_find_table\n");
622 * RSDT or XSDT is required; without it, we cannot locate other tables.
624 if (__improbable(rsdp
== NULL
|| (rsdtp
== NULL
&& xsdtp
== NULL
))) {
625 rsdp
= PHYSMAP_PTOV(rsdp_physaddr
);
627 /* Verify RSDP signature */
628 if (__improbable(strncmp((void *)rsdp
, "RSD PTR ", 8) != 0)) {
629 DBG("RSDP signature mismatch: Aborting acpi_find_table\n");
634 /* Verify RSDP checksum */
635 if (__improbable(cksum8((uint8_t *)rsdp
, sizeof(RSDP_DESCRIPTOR
)) != 0)) {
636 DBG("RSDP@0x%lx signature mismatch: Aborting acpi_find_table\n",
637 (unsigned long)rsdp_physaddr
);
642 /* Ensure the revision of the RSDP indicates the presence of an RSDT or XSDT */
643 if (__improbable(rsdp
->Revision
>= RSDP_VERSION_ACPI20
&& rsdp
->XsdtPhysicalAddress
== 0ULL)) {
644 DBG("RSDP XSDT Physical Address is 0!: Aborting acpi_find_table\n");
647 } else if (__probable(rsdp
->Revision
>= RSDP_VERSION_ACPI20
)) {
648 /* XSDT (with 64-bit pointers to tables) */
650 xsdtp
= PHYSMAP_PTOV(rsdp
->XsdtPhysicalAddress
);
651 if (cksum8((uint8_t *)xsdtp
, xsdtp
->Length
) != 0) {
652 DBG("ERROR: XSDT@0x%lx checksum is non-zero; not using this XSDT\n",
653 (unsigned long)rsdp
->XsdtPhysicalAddress
);
657 } else if (__improbable(rsdp
->Revision
== RSDP_VERSION_ACPI10
&& rsdp
->RsdtPhysicalAddress
== 0)) {
658 DBG("RSDP RSDT Physical Address is 0!: Aborting acpi_find_table\n");
661 } else if (__improbable(rsdp
->Revision
== RSDP_VERSION_ACPI10
)) {
662 /* RSDT (with 32-bit pointers to tables) */
664 rsdtp
= PHYSMAP_PTOV((uintptr_t)rsdp
->RsdtPhysicalAddress
);
665 if (cksum8((uint8_t *)rsdtp
, rsdtp
->Length
) != 0) {
666 DBG("ERROR: RSDT@0x%lx checksum is non-zero; not using this RSDT\n",
667 (unsigned long)rsdp
->RsdtPhysicalAddress
);
672 DBG("Unrecognized RSDP Revision (0x%x): Aborting acpi_find_table\n",
679 assert(xsdtp
!= NULL
|| rsdtp
!= NULL
);
681 if (__probable(xsdtp
!= NULL
)) {
682 return acpi_find_table_via_xsdt(xsdtp
, signature
);
683 } else if (rsdtp
!= NULL
) {
684 return acpi_find_table_via_rsdt(rsdtp
, signature
);
691 * Returns the count of enabled logical processors present in the ACPI
692 * MADT, or 0 if the MADT could not be located.
695 acpi_count_enabled_logical_processors(void)
697 MULTIPLE_APIC_TABLE
*madtp
;
699 APIC_HEADER
*next_apic_entryp
;
700 uint32_t enabled_cpu_count
= 0;
701 uint64_t rsdp_physaddr
;
703 rsdp_physaddr
= efi_get_rsdp_physaddr();
704 if (__improbable(rsdp_physaddr
== 0)) {
705 DBG("acpi_count_enabled_logical_processors: Could not get RSDP physaddr from EFI.\n");
709 madtp
= (MULTIPLE_APIC_TABLE
*)acpi_find_table(rsdp_physaddr
, ACPI_SIG_MADT
);
711 if (__improbable(madtp
== NULL
)) {
712 DBG("acpi_count_enabled_logical_processors: Could not find the MADT.\n");
716 end_ptr
= (void *)((uintptr_t)madtp
+ madtp
->Length
);
717 next_apic_entryp
= (APIC_HEADER
*)((uintptr_t)madtp
+ sizeof(MULTIPLE_APIC_TABLE
));
719 while ((void *)next_apic_entryp
< end_ptr
) {
720 switch (next_apic_entryp
->Type
) {
723 MADT_PROCESSOR_APIC
*madt_procp
= (MADT_PROCESSOR_APIC
*)next_apic_entryp
;
724 if (madt_procp
->ProcessorEnabled
) {
732 DBG("Ignoring MADT entry type 0x%x length 0x%x\n", next_apic_entryp
->Type
,
733 next_apic_entryp
->Length
);
737 next_apic_entryp
= (APIC_HEADER
*)((uintptr_t)next_apic_entryp
+ next_apic_entryp
->Length
);
740 return enabled_cpu_count
;