2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <i386/pmap.h>
30 #include <i386/proc_reg.h>
31 #include <i386/mp_desc.h>
32 #include <i386/misc_protos.h>
34 #include <i386/cpu_data.h>
36 #include <i386/mtrr.h>
39 #include <kern/hv_support.h>
42 #include <i386/vmx/vmx_cpu.h>
44 #include <i386/ucode.h>
45 #include <i386/acpi.h>
47 #include <i386/lapic.h>
49 #include <i386/mp_desc.h>
50 #include <i386/serial_io.h>
52 #include <i386/machine_check.h>
54 #include <i386/pmCPU.h>
58 #define UINT64 uint64_t
59 #define UINT32 uint32_t
60 #define UINT16 uint16_t
62 #define RSDP_VERSION_ACPI10 0
63 #define RSDP_VERSION_ACPI20 2
64 #include <acpi/Acpi.h>
65 #include <acpi/Acpi_v1.h>
66 #include <pexpert/i386/efi.h>
68 #include <kern/cpu_data.h>
69 #include <kern/machine.h>
70 #include <kern/timer_queue.h>
71 #include <console/serial_protos.h>
72 #include <machine/pal_routines.h>
73 #include <vm/vm_page.h>
76 #include <IOKit/IOHibernatePrivate.h>
78 #include <IOKit/IOPlatformExpert.h>
79 #include <sys/kdebug.h>
82 #include <kern/monotonic.h>
83 #endif /* MONOTONIC */
86 extern void acpi_sleep_cpu(acpi_sleep_callback
, void * refcon
);
87 extern void acpi_wake_prot(void);
89 extern kern_return_t
IOCPURunPlatformQuiesceActions(void);
90 extern kern_return_t
IOCPURunPlatformActiveActions(void);
91 extern kern_return_t
IOCPURunPlatformHaltRestartActions(uint32_t message
);
93 extern void fpinit(void);
95 #if DEVELOPMENT || DEBUG
96 #define DBG(x...) kprintf(x)
102 acpi_install_wake_handler(void)
105 install_real_mode_bootstrap(acpi_wake_prot
);
106 return REAL_MODE_BOOTSTRAP_OFFSET
;
114 unsigned int save_kdebug_enable
= 0;
115 static uint64_t acpi_sleep_abstime
;
116 static uint64_t acpi_idle_abstime
;
117 static uint64_t acpi_wake_abstime
, acpi_wake_postrebase_abstime
;
118 boolean_t deep_idle_rebase
= TRUE
;
121 struct acpi_hibernate_callback_data
{
122 acpi_sleep_callback func
;
125 typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t
;
128 acpi_hibernate(void *refcon
)
132 acpi_hibernate_callback_data_t
*data
=
133 (acpi_hibernate_callback_data_t
*)refcon
;
135 if (current_cpu_datap()->cpu_hibernate
) {
136 mode
= hibernate_write_image();
138 if (mode
== kIOHibernatePostWriteHalt
) {
140 HIBLOG("power off\n");
141 IOCPURunPlatformHaltRestartActions(kPEHaltCPU
);
142 if (PE_halt_restart
) {
143 (*PE_halt_restart
)(kPEHaltCPU
);
145 } else if (mode
== kIOHibernatePostWriteRestart
) {
148 IOCPURunPlatformHaltRestartActions(kPERestartCPU
);
149 if (PE_halt_restart
) {
150 (*PE_halt_restart
)(kPERestartCPU
);
156 // should we come back via regular wake, set the state in memory.
157 cpu_datap(0)->cpu_hibernate
= 0;
166 IOCPURunPlatformQuiesceActions();
168 acpi_sleep_abstime
= mach_absolute_time();
170 (data
->func
)(data
->refcon
);
172 /* should never get here! */
174 #endif /* HIBERNATION */
175 #endif /* CONFIG_SLEEP */
177 extern void slave_pstart(void);
180 acpi_sleep_kernel(acpi_sleep_callback func
, void *refcon
)
183 acpi_hibernate_callback_data_t data
;
185 boolean_t did_hibernate
;
186 cpu_data_t
*cdp
= current_cpu_datap();
191 uint64_t elapsed
= 0;
192 uint64_t elapsed_trace_start
= 0;
194 my_cpu
= cpu_number();
195 kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", cdp
->cpu_hibernate
,
198 /* Get all CPUs to be in the "off" state */
199 for (cpu
= 0; cpu
< real_ncpus
; cpu
+= 1) {
203 rc
= pmCPUExitHaltToOff(cpu
);
204 if (rc
!= KERN_SUCCESS
) {
205 panic("Error %d trying to transition CPU %d to OFF",
210 /* shutdown local APIC before passing control to firmware */
211 lapic_shutdown(true);
215 data
.refcon
= refcon
;
220 #endif /* MONOTONIC */
222 /* Save power management timer state */
226 /* Notify hypervisor that we are about to sleep */
231 * Enable FPU/SIMD unit for potential hibernate acceleration
235 KDBG(IOKDBG_CODE(DBG_HIBERNATE
, 0) | DBG_FUNC_START
);
237 save_kdebug_enable
= kdebug_enable
;
240 acpi_sleep_abstime
= mach_absolute_time();
244 * Save master CPU state and sleep platform.
245 * Will not return until platform is woken up,
246 * or if sleep failed.
248 uint64_t old_cr3
= x86_64_pre_sleep();
250 acpi_sleep_cpu(acpi_hibernate
, &data
);
255 acpi_sleep_cpu(func
, refcon
);
258 acpi_wake_abstime
= mach_absolute_time();
259 /* Rebase TSC->absolute time conversion, using timestamp
260 * recorded before sleep.
262 rtc_nanotime_init(acpi_sleep_abstime
);
263 acpi_wake_postrebase_abstime
= start
= mach_absolute_time();
264 assert(start
>= acpi_sleep_abstime
);
266 x86_64_post_sleep(old_cr3
);
268 #endif /* CONFIG_SLEEP */
270 /* Reset UART if kprintf is enabled.
271 * However kprintf should not be used before rtc_sleep_wakeup()
272 * for compatibility with firewire kprintf.
275 if (FALSE
== disable_serial_output
) {
280 if (current_cpu_datap()->cpu_hibernate
) {
281 did_hibernate
= TRUE
;
285 did_hibernate
= FALSE
;
288 /* Re-enable fast syscall */
289 cpu_syscall_init(current_cpu_datap());
292 /* Re-enable machine check handling */
297 /* restore MTRR settings */
301 /* update CPU microcode and apply CPU workarounds */
302 ucode_update_wake_and_apply_cpu_was();
305 /* set up PAT following boot processor power up */
313 vmx_resume(did_hibernate
);
317 * Go through all of the CPUs and mark them as requiring
323 /* re-enable and re-init local apic (prior to starting timers) */
325 lapic_configure(true);
330 * The sleep implementation uses indirect noreturn calls, so we miss stack
331 * unpoisoning. Do it explicitly.
333 kasan_unpoison_curstack(true);
336 elapsed
+= mach_absolute_time() - start
;
338 rtc_decrementer_configure();
339 kdebug_enable
= save_kdebug_enable
;
341 if (kdebug_enable
== 0) {
342 elapsed_trace_start
+= kdebug_wake();
344 start
= mach_absolute_time();
346 /* Reconfigure FP/SIMD unit */
352 /* Notify hypervisor that we are about to resume */
356 IOCPURunPlatformActiveActions();
358 KDBG(IOKDBG_CODE(DBG_HIBERNATE
, 0) | DBG_FUNC_END
, start
, elapsed
,
359 elapsed_trace_start
, acpi_wake_abstime
);
361 /* Restore power management register state */
362 pmCPUMarkRunning(current_cpu_datap());
364 /* Restore power management timer state */
367 /* Restart timer interrupts */
372 #endif /* MONOTONIC */
375 kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate
);
376 #endif /* HIBERNATION */
379 /* Becase we don't save the bootstrap page, and we share it
380 * between sleep and mp slave init, we need to recreate it
381 * after coming back from sleep or hibernate */
382 install_real_mode_bootstrap(slave_pstart
);
383 #endif /* CONFIG_SLEEP */
387 ml_hibernate_active_pre(void)
390 hibernate_rebuild_vm_structs();
391 #endif /* HIBERNATION */
395 ml_hibernate_active_post(void)
398 if (current_cpu_datap()->cpu_hibernate
) {
399 KDBG(IOKDBG_CODE(DBG_HIBERNATE
, 2) | DBG_FUNC_START
);
400 hibernate_machine_init();
401 KDBG(IOKDBG_CODE(DBG_HIBERNATE
, 2) | DBG_FUNC_END
);
402 current_cpu_datap()->cpu_hibernate
= 0;
404 #endif /* HIBERNATION */
408 * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel
409 * to idle the boot processor in the deepest C-state for S0 sleep. All slave
410 * processors are expected already to have been offlined in the deepest C-state.
412 * The contract with ACPI is that although the kernel is called with interrupts
413 * disabled, interrupts may need to be re-enabled to dismiss any pending timer
414 * interrupt. However, the callback function will be called once this has
415 * occurred and interrupts are guaranteed to be disabled at that time,
416 * and to remain disabled during C-state entry, exit (wake) and return
417 * from acpi_idle_kernel.
420 acpi_idle_kernel(acpi_sleep_callback func
, void *refcon
)
422 boolean_t istate
= ml_get_interrupts_enabled();
424 kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n",
425 cpu_number(), istate
? "enabled" : "disabled");
427 assert(cpu_number() == master_cpu
);
430 mt_cpu_down(cpu_datap(0));
431 #endif /* MONOTONIC */
433 /* Cancel any pending deadline */
435 while (lapic_is_interrupting(LAPIC_TIMER_VECTOR
)
437 || lapic_is_interrupting(LAPIC_VECTOR(PERFCNT
))
438 #endif /* MONOTONIC */
440 (void) ml_set_interrupts_enabled(TRUE
);
442 ml_set_interrupts_enabled(FALSE
);
445 if (current_cpu_datap()->cpu_hibernate
) {
446 /* Call hibernate_write_image() to put disk to low power state */
447 hibernate_write_image();
448 cpu_datap(0)->cpu_hibernate
= 0;
452 * Call back to caller to indicate that interrupts will remain
453 * disabled while we deep idle, wake and return.
455 IOCPURunPlatformQuiesceActions();
459 acpi_idle_abstime
= mach_absolute_time();
461 KERNEL_DEBUG_CONSTANT(
462 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_DEEP_IDLE
) | DBG_FUNC_START
,
463 acpi_idle_abstime
, deep_idle_rebase
, 0, 0, 0);
466 * Disable tracing during S0-sleep
467 * unless overridden by sysctl -w tsc.deep_idle_rebase=0
469 if (deep_idle_rebase
) {
470 save_kdebug_enable
= kdebug_enable
;
475 * Call into power-management to enter the lowest C-state.
476 * Note when called on the boot processor this routine will
477 * return directly when awoken.
479 pmCPUHalt(PM_HALT_SLEEP
);
482 * Get wakeup time relative to the TSC which has progressed.
483 * Then rebase nanotime to reflect time not progressing over sleep
484 * - unless overriden so that tracing can occur during deep_idle.
486 acpi_wake_abstime
= mach_absolute_time();
487 if (deep_idle_rebase
) {
488 rtc_sleep_wakeup(acpi_idle_abstime
);
489 kdebug_enable
= save_kdebug_enable
;
491 acpi_wake_postrebase_abstime
= mach_absolute_time();
492 assert(mach_absolute_time() >= acpi_idle_abstime
);
494 KERNEL_DEBUG_CONSTANT(
495 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_DEEP_IDLE
) | DBG_FUNC_END
,
496 acpi_wake_abstime
, acpi_wake_abstime
- acpi_idle_abstime
, 0, 0, 0);
499 mt_cpu_up(cpu_datap(0));
500 #endif /* MONOTONIC */
502 /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */
503 if (kdebug_enable
== 0) {
507 IOCPURunPlatformActiveActions();
509 /* Restart timer interrupts */
513 extern char real_mode_bootstrap_end
[];
514 extern char real_mode_bootstrap_base
[];
517 install_real_mode_bootstrap(void *prot_entry
)
520 * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET.
521 * This is in page 1 which has been reserved for this purpose by
522 * machine_startup() from the boot processor.
523 * The slave boot code is responsible for switching to protected
524 * mode and then jumping to the common startup, _start().
526 bcopy_phys(kvtophys((vm_offset_t
) real_mode_bootstrap_base
),
527 (addr64_t
) REAL_MODE_BOOTSTRAP_OFFSET
,
528 real_mode_bootstrap_end
- real_mode_bootstrap_base
);
531 * Set the location at the base of the stack to point to the
532 * common startup entry.
535 PROT_MODE_START
+ REAL_MODE_BOOTSTRAP_OFFSET
,
536 (unsigned int)kvtophys((vm_offset_t
)prot_entry
));
545 uint64_t ctime
= mach_absolute_time();
546 assert(ctime
> acpi_wake_postrebase_abstime
);
547 return (ctime
- acpi_wake_postrebase_abstime
) < 5 * NSEC_PER_SEC
;
551 cksum8(uint8_t *ptr
, uint32_t size
)
556 for (i
= 0; i
< size
; i
++) {
564 * Parameterized search for a specified table given an sdtp (either RSDT or XSDT).
565 * Note that efiboot does not modify the addresses of tables in the RSDT or XSDT
566 * TableOffsetEntry array, so we do not need to "convert" from efiboot virtual to
569 #define SEARCH_FOR_ACPI_TABLE(sdtp, signature, entry_type) \
571 uint32_t i, pointer_count; \
573 /* Walk the list of tables in the *SDT, looking for the signature passed in */ \
574 pointer_count = ((sdtp)->Length - sizeof(ACPI_TABLE_HEADER)) / sizeof(entry_type); \
576 for (i = 0; i < pointer_count; i++) { \
577 ACPI_TABLE_HEADER *next_table = \
578 (ACPI_TABLE_HEADER *)PHYSMAP_PTOV( \
579 (uintptr_t)(sdtp)->TableOffsetEntry[i]); \
580 if (strncmp(&next_table->Signature[0], (signature), 4) == 0) { \
582 * Checksum the table first, then return it if the checksum \
585 if (cksum8((uint8_t *)next_table, next_table->Length) == 0) { \
588 DBG("Invalid checksum for table [%s]@0x%lx!\n", (signature), \
589 (unsigned long)(sdtp)->TableOffsetEntry[i]); \
598 static ACPI_TABLE_HEADER
*
599 acpi_find_table_via_xsdt(XSDT_DESCRIPTOR
*xsdtp
, const char *signature
)
601 SEARCH_FOR_ACPI_TABLE(xsdtp
, signature
, UINT64
);
604 static ACPI_TABLE_HEADER
*
605 acpi_find_table_via_rsdt(RSDT_DESCRIPTOR
*rsdtp
, const char *signature
)
607 SEARCH_FOR_ACPI_TABLE(rsdtp
, signature
, UINT32
);
611 * Returns a pointer to an ACPI table header corresponding to the table
612 * whose signature is passed in, or NULL if no such table could be found.
614 static ACPI_TABLE_HEADER
*
615 acpi_find_table(uintptr_t rsdp_physaddr
, const char *signature
)
617 static RSDP_DESCRIPTOR
*rsdp
= NULL
;
618 static XSDT_DESCRIPTOR
*xsdtp
= NULL
;
619 static RSDT_DESCRIPTOR
*rsdtp
= NULL
;
621 if (signature
== NULL
) {
622 DBG("Invalid NULL signature passed to acpi_find_table\n");
627 * RSDT or XSDT is required; without it, we cannot locate other tables.
629 if (__improbable(rsdp
== NULL
|| (rsdtp
== NULL
&& xsdtp
== NULL
))) {
630 rsdp
= PHYSMAP_PTOV(rsdp_physaddr
);
632 /* Verify RSDP signature */
633 if (__improbable(strncmp((void *)rsdp
, "RSD PTR ", 8) != 0)) {
634 DBG("RSDP signature mismatch: Aborting acpi_find_table\n");
639 /* Verify RSDP checksum */
640 if (__improbable(cksum8((uint8_t *)rsdp
, sizeof(RSDP_DESCRIPTOR
)) != 0)) {
641 DBG("RSDP@0x%lx signature mismatch: Aborting acpi_find_table\n",
642 (unsigned long)rsdp_physaddr
);
647 /* Ensure the revision of the RSDP indicates the presence of an RSDT or XSDT */
648 if (__improbable(rsdp
->Revision
>= RSDP_VERSION_ACPI20
&& rsdp
->XsdtPhysicalAddress
== 0ULL)) {
649 DBG("RSDP XSDT Physical Address is 0!: Aborting acpi_find_table\n");
652 } else if (__probable(rsdp
->Revision
>= RSDP_VERSION_ACPI20
)) {
653 /* XSDT (with 64-bit pointers to tables) */
655 xsdtp
= PHYSMAP_PTOV(rsdp
->XsdtPhysicalAddress
);
656 if (cksum8((uint8_t *)xsdtp
, xsdtp
->Length
) != 0) {
657 DBG("ERROR: XSDT@0x%lx checksum is non-zero; not using this XSDT\n",
658 (unsigned long)rsdp
->XsdtPhysicalAddress
);
662 } else if (__improbable(rsdp
->Revision
== RSDP_VERSION_ACPI10
&& rsdp
->RsdtPhysicalAddress
== 0)) {
663 DBG("RSDP RSDT Physical Address is 0!: Aborting acpi_find_table\n");
666 } else if (__improbable(rsdp
->Revision
== RSDP_VERSION_ACPI10
)) {
667 /* RSDT (with 32-bit pointers to tables) */
669 rsdtp
= PHYSMAP_PTOV((uintptr_t)rsdp
->RsdtPhysicalAddress
);
670 if (cksum8((uint8_t *)rsdtp
, rsdtp
->Length
) != 0) {
671 DBG("ERROR: RSDT@0x%lx checksum is non-zero; not using this RSDT\n",
672 (unsigned long)rsdp
->RsdtPhysicalAddress
);
677 DBG("Unrecognized RSDP Revision (0x%x): Aborting acpi_find_table\n",
684 assert(xsdtp
!= NULL
|| rsdtp
!= NULL
);
686 if (__probable(xsdtp
!= NULL
)) {
687 return acpi_find_table_via_xsdt(xsdtp
, signature
);
688 } else if (rsdtp
!= NULL
) {
689 return acpi_find_table_via_rsdt(rsdtp
, signature
);
696 * Returns the count of enabled logical processors present in the ACPI
697 * MADT, or 0 if the MADT could not be located.
700 acpi_count_enabled_logical_processors(void)
702 MULTIPLE_APIC_TABLE
*madtp
;
704 APIC_HEADER
*next_apic_entryp
;
705 uint32_t enabled_cpu_count
= 0;
706 uint64_t rsdp_physaddr
;
708 rsdp_physaddr
= efi_get_rsdp_physaddr();
709 if (__improbable(rsdp_physaddr
== 0)) {
710 DBG("acpi_count_enabled_logical_processors: Could not get RSDP physaddr from EFI.\n");
714 madtp
= (MULTIPLE_APIC_TABLE
*)acpi_find_table(rsdp_physaddr
, ACPI_SIG_MADT
);
716 if (__improbable(madtp
== NULL
)) {
717 DBG("acpi_count_enabled_logical_processors: Could not find the MADT.\n");
721 end_ptr
= (void *)((uintptr_t)madtp
+ madtp
->Length
);
722 next_apic_entryp
= (APIC_HEADER
*)((uintptr_t)madtp
+ sizeof(MULTIPLE_APIC_TABLE
));
724 while ((void *)next_apic_entryp
< end_ptr
) {
725 switch (next_apic_entryp
->Type
) {
728 MADT_PROCESSOR_APIC
*madt_procp
= (MADT_PROCESSOR_APIC
*)next_apic_entryp
;
729 if (madt_procp
->ProcessorEnabled
) {
737 DBG("Ignoring MADT entry type 0x%x length 0x%x\n", next_apic_entryp
->Type
,
738 next_apic_entryp
->Length
);
742 next_apic_entryp
= (APIC_HEADER
*)((uintptr_t)next_apic_entryp
+ next_apic_entryp
->Length
);
745 return enabled_cpu_count
;