#include <IOKit/IOPlatformExpert.h>
#include <sys/kdebug.h>
+#if MONOTONIC
+#include <kern/monotonic.h>
+#endif /* MONOTONIC */
+
#if CONFIG_SLEEP
extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon);
extern void acpi_wake_prot(void);
}
}
+
+#if CONFIG_VMX
+ vmx_suspend();
+#endif
kdebug_enable = 0;
IOCPURunPlatformQuiesceActions();
extern void slave_pstart(void);
extern void hibernate_rebuild_vm_structs(void);
-extern unsigned int wake_nkdbufs;
+extern unsigned int wake_nkdbufs;
+extern unsigned int trace_wrap;
void
acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
acpi_hibernate_callback_data_t data;
#endif
boolean_t did_hibernate;
+ cpu_data_t *cdp = current_cpu_datap();
unsigned int cpu;
kern_return_t rc;
unsigned int my_cpu;
uint64_t elapsed = 0;
uint64_t elapsed_trace_start = 0;
- kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n",
- current_cpu_datap()->cpu_hibernate, cpu_number());
+ my_cpu = cpu_number();
+ kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", cdp->cpu_hibernate,
+ my_cpu);
- /* Get all CPUs to be in the "off" state */
- my_cpu = cpu_number();
+ /* Get all CPUs to be in the "off" state */
for (cpu = 0; cpu < real_ncpus; cpu += 1) {
- if (cpu == my_cpu)
+ if (cpu == my_cpu)
continue;
rc = pmCPUExitHaltToOff(cpu);
if (rc != KERN_SUCCESS)
data.refcon = refcon;
#endif
+#if MONOTONIC
+ mt_cpu_down(cdp);
+#endif /* MONOTONIC */
+
/* Save power management timer state */
pmTimerSave();
hv_suspend();
#endif
-#if CONFIG_VMX
- /*
- * Turn off VT, otherwise switching to legacy mode will fail
- */
- vmx_suspend();
-#endif
-
/*
* Enable FPU/SIMD unit for potential hibernate acceleration
*/
clear_ts();
- KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START);
save_kdebug_enable = kdebug_enable;
kdebug_enable = 0;
#if HIBERNATION
acpi_sleep_cpu(acpi_hibernate, &data);
#else
+#if CONFIG_VMX
+ vmx_suspend();
+#endif
acpi_sleep_cpu(func, refcon);
#endif
- start = mach_absolute_time();
+ acpi_wake_abstime = mach_absolute_time();
+ /* Rebase TSC->absolute time conversion, using timestamp
+ * recorded before sleep.
+ */
+ rtc_nanotime_init(acpi_sleep_abstime);
+ acpi_wake_postrebase_abstime = start = mach_absolute_time();
+ assert(start >= acpi_sleep_abstime);
x86_64_post_sleep(old_cr3);
did_hibernate = FALSE;
}
- /* Re-enable mode (including 64-bit if applicable) */
- cpu_mode_init(current_cpu_datap());
+ /* Re-enable fast syscall */
+ cpu_syscall_init(current_cpu_datap());
#if CONFIG_MCA
/* Re-enable machine check handling */
/* update CPU microcode */
ucode_update_wake();
+#if CONFIG_MTRR
+ /* set up PAT following boot processor power up */
+ pat_init();
+#endif
+
#if CONFIG_VMX
/*
* Restore VT mode
*/
- vmx_resume();
-#endif
-
-#if CONFIG_MTRR
- /* set up PAT following boot processor power up */
- pat_init();
+ vmx_resume(did_hibernate);
#endif
/*
if (lapic_probe())
lapic_configure();
+#if KASAN
+ /*
+ * The sleep implementation uses indirect noreturn calls, so we miss stack
+ * unpoisoning. Do it explicitly.
+ */
+ kasan_unpoison_curstack(true);
+#endif
+
#if HIBERNATION
hibernate_rebuild_vm_structs();
#endif
elapsed += mach_absolute_time() - start;
- acpi_wake_abstime = mach_absolute_time();
-
- /* let the realtime clock reset */
- rtc_sleep_wakeup(acpi_sleep_abstime);
- acpi_wake_postrebase_abstime = mach_absolute_time();
- assert(mach_absolute_time() >= acpi_sleep_abstime);
+ rtc_decrementer_configure();
kdebug_enable = save_kdebug_enable;
if (kdebug_enable == 0) {
if (wake_nkdbufs) {
start = mach_absolute_time();
- start_kern_tracing(wake_nkdbufs, TRUE);
+ kdebug_trace_start(wake_nkdbufs, NULL, trace_wrap != 0, TRUE);
elapsed_trace_start += mach_absolute_time() - start;
}
}
#if HIBERNATION
if (did_hibernate) {
- elapsed += mach_absolute_time() - start;
-
- KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START, elapsed, elapsed_trace_start, 0, 0, 0);
+ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START);
hibernate_machine_init();
- KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END);
current_cpu_datap()->cpu_hibernate = 0;
-
- KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0);
- } else
+ }
#endif /* HIBERNATION */
- KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+
+ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, start, elapsed,
+ elapsed_trace_start, acpi_wake_abstime);
/* Restore power management register state */
pmCPUMarkRunning(current_cpu_datap());
assert(cpu_number() == master_cpu);
- /*
- * Effectively set the boot cpu offline.
- * This will stop further deadlines being set.
- */
- cpu_datap(master_cpu)->cpu_running = FALSE;
-
/* Cancel any pending deadline */
setPop(0);
while (lapic_is_interrupting(LAPIC_TIMER_VECTOR)) {
ml_set_interrupts_enabled(FALSE);
}
+ if (current_cpu_datap()->cpu_hibernate) {
+ /* Call hibernate_write_image() to put disk to low power state */
+ hibernate_write_image();
+ cpu_datap(0)->cpu_hibernate = 0;
+ }
+
/*
* Call back to caller to indicate that interrupts will remain
* disabled while we deep idle, wake and return.
*/
+ IOCPURunPlatformQuiesceActions();
+
func(refcon);
acpi_idle_abstime = mach_absolute_time();
}
acpi_wake_postrebase_abstime = mach_absolute_time();
assert(mach_absolute_time() >= acpi_idle_abstime);
- cpu_datap(master_cpu)->cpu_running = TRUE;
KERNEL_DEBUG_CONSTANT(
MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END,
/* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */
if (kdebug_enable == 0) {
- if (wake_nkdbufs)
- start_kern_tracing(wake_nkdbufs, TRUE);
+ if (wake_nkdbufs) {
+ __kdebug_only uint64_t start = mach_absolute_time();
+ kdebug_trace_start(wake_nkdbufs, NULL, trace_wrap != 0, TRUE);
+ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 15), start);
+ }
}
IOCPURunPlatformActiveActions();