X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/e2fac8b15b12a7979f72090454d850e612fc5b13..7ddcb079202367355dddccdfa4318e57d50318be:/osfmk/i386/acpi.c?ds=sidebyside diff --git a/osfmk/i386/acpi.c b/osfmk/i386/acpi.c index 17143604c..f13561244 100644 --- a/osfmk/i386/acpi.c +++ b/osfmk/i386/acpi.c @@ -26,50 +26,60 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#include +#include +#include #include +#include #include -#include -#include +#if CONFIG_MTRR #include +#endif +#if CONFIG_VMX #include +#endif +#include #include #include #include #include #include #include +#if CONFIG_MCA #include +#endif #include +#include + #include #include +#include +#include #if HIBERNATION #include #endif #include -extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon); -extern char acpi_wake_start[]; -extern char acpi_wake_end[]; +#include -extern void set_kbd_leds(int leds); +#if CONFIG_SLEEP +extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon); +extern void acpi_wake_prot(void); +#endif extern void fpinit(void); vm_offset_t acpi_install_wake_handler(void) { - /* copy wake code to ACPI_WAKE_ADDR in low memory */ - bcopy_phys(kvtophys((vm_offset_t)acpi_wake_start), - (addr64_t) ACPI_WAKE_ADDR, - acpi_wake_end - acpi_wake_start); - - /* flush cache */ - wbinvd(); - - /* return physical address of the wakeup code */ - return ACPI_WAKE_ADDR; +#if CONFIG_SLEEP + install_real_mode_bootstrap(acpi_wake_prot); + return REAL_MODE_BOOTSTRAP_OFFSET; +#else + return 0; +#endif } #if HIBERNATION @@ -79,6 +89,11 @@ struct acpi_hibernate_callback_data { }; typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t; +unsigned int save_kdebug_enable = 0; +static uint64_t acpi_sleep_abstime; + + +#if CONFIG_SLEEP static void acpi_hibernate(void *refcon) { @@ -89,8 +104,9 @@ acpi_hibernate(void *refcon) if (current_cpu_datap()->cpu_hibernate) { +#if defined(__i386__) cpu_IA32e_enable(current_cpu_datap()); - +#endif mode = hibernate_write_image(); if( mode == kIOHibernatePostWriteHalt ) @@ -114,47 +130,56 @@ acpi_hibernate(void *refcon) cpu_datap(0)->cpu_hibernate = 0; } +#if defined(__i386__) /* * If we're in 64-bit mode, drop back into legacy mode during sleep. */ cpu_IA32e_disable(current_cpu_datap()); - +#endif } + kdebug_enable = 0; + + acpi_sleep_abstime = mach_absolute_time(); (data->func)(data->refcon); /* should never get here! */ } -#endif +#endif /* CONFIG_SLEEP */ +#endif /* HIBERNATION */ + +extern void slave_pstart(void); -static uint64_t acpi_sleep_abstime; void acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) { #if HIBERNATION acpi_hibernate_callback_data_t data; - boolean_t did_hibernate; #endif + boolean_t did_hibernate; unsigned int cpu; kern_return_t rc; unsigned int my_cpu; + uint64_t now; + uint64_t my_tsc; + uint64_t my_abs; - kprintf("acpi_sleep_kernel hib=%d\n", - current_cpu_datap()->cpu_hibernate); + kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", + current_cpu_datap()->cpu_hibernate, cpu_number()); - /* Geta ll CPUs to be in the "off" state */ - my_cpu = cpu_number(); + /* Get all CPUs to be in the "off" state */ + my_cpu = cpu_number(); for (cpu = 0; cpu < real_ncpus; cpu += 1) { if (cpu == my_cpu) continue; rc = pmCPUExitHaltToOff(cpu); if (rc != KERN_SUCCESS) - panic("Error %d trying to transition CPU %d to OFF", - rc, cpu); + panic("Error %d trying to transition CPU %d to OFF", + rc, cpu); } - /* shutdown local APIC before passing control to BIOS */ + /* shutdown local APIC before passing control to firmware */ lapic_shutdown(); #if HIBERNATION @@ -165,43 +190,67 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) /* Save power management timer state */ pmTimerSave(); +#if CONFIG_VMX /* * Turn off VT, otherwise switching to legacy mode will fail */ vmx_suspend(); +#endif +#if defined(__i386__) /* * If we're in 64-bit mode, drop back into legacy mode during sleep. */ cpu_IA32e_disable(current_cpu_datap()); +#endif + /* + * Enable FPU/SIMD unit for potential hibernate acceleration + */ + clear_ts(); + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START, 0, 0, 0, 0, 0); + + save_kdebug_enable = kdebug_enable; + kdebug_enable = 0; acpi_sleep_abstime = mach_absolute_time(); +#if CONFIG_SLEEP /* * Save master CPU state and sleep platform. * Will not return until platform is woken up, * or if sleep failed. */ +#ifdef __x86_64__ + uint64_t old_cr3 = x86_64_pre_sleep(); +#endif #if HIBERNATION acpi_sleep_cpu(acpi_hibernate, &data); #else acpi_sleep_cpu(func, refcon); #endif +#ifdef __x86_64__ + x86_64_post_sleep(old_cr3); +#endif + +#endif /* CONFIG_SLEEP */ + /* Reset UART if kprintf is enabled. * However kprintf should not be used before rtc_sleep_wakeup() * for compatibility with firewire kprintf. */ if (FALSE == disable_serial_output) - serial_init(); + pal_serial_init(); #if HIBERNATION if (current_cpu_datap()->cpu_hibernate) { +#if defined(__i386__) int i; for (i = 0; i < PMAP_NWINDOWS; i++) *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0; - current_cpu_datap()->cpu_hibernate = 0; +#endif did_hibernate = TRUE; } else @@ -213,19 +262,30 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) /* Re-enable mode (including 64-bit if applicable) */ cpu_mode_init(current_cpu_datap()); +#if CONFIG_MCA /* Re-enable machine check handling */ mca_cpu_init(); +#endif +#if CONFIG_MTRR /* restore MTRR settings */ mtrr_update_cpu(); +#endif + + /* update CPU microcode */ + ucode_update_wake(); +#if CONFIG_VMX /* * Restore VT mode */ vmx_resume(); +#endif +#if CONFIG_MTRR /* set up PAT following boot processor power up */ pat_init(); +#endif /* * Go through all of the CPUs and mark them as requiring @@ -233,15 +293,32 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) */ pmMarkAllCPUsOff(); + ml_get_timebase(&now); + + /* re-enable and re-init local apic (prior to starting timers) */ + if (lapic_probe()) + lapic_configure(); + /* let the realtime clock reset */ rtc_sleep_wakeup(acpi_sleep_abstime); - if (did_hibernate) + kdebug_enable = save_kdebug_enable; + + if (did_hibernate) { + + my_tsc = (now >> 32) | (now << 32); + my_abs = tmrCvt(my_tsc, tscFCvtt2n); + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START, + (uint32_t)(my_abs >> 32), (uint32_t)my_abs, 0, 0, 0); hibernate_machine_init(); + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END, 0, 0, 0, 0, 0); - /* re-enable and re-init local apic */ - if (lapic_probe()) - lapic_configure(); + current_cpu_datap()->cpu_hibernate = 0; + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0); + } else + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0); /* Restore power management register state */ pmCPUMarkRunning(current_cpu_datap()); @@ -249,16 +326,59 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) /* Restore power management timer state */ pmTimerRestore(); - /* Restart tick interrupts from the LAPIC timer */ - rtc_lapic_start_ticking(); + /* Restart timer interrupts */ + rtc_timer_start(); - fpinit(); - clear_fpu(); + /* Reconfigure FP/SIMD unit */ + init_fpu(); #if HIBERNATION +#ifdef __i386__ + /* The image is written out using the copy engine, which disables + * preemption. Since the copy engine writes out the page which contains + * the preemption variable when it is disabled, we need to explicitly + * enable it here */ if (did_hibernate) enable_preemption(); +#endif kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate); #endif + +#if CONFIG_SLEEP + /* Becase we don't save the bootstrap page, and we share it + * between sleep and mp slave init, we need to recreate it + * after coming back from sleep or hibernate */ + install_real_mode_bootstrap(slave_pstart); +#endif +} + +extern char real_mode_bootstrap_end[]; +extern char real_mode_bootstrap_base[]; + +void +install_real_mode_bootstrap(void *prot_entry) +{ + /* + * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET. + * This is in page 1 which has been reserved for this purpose by + * machine_startup() from the boot processor. + * The slave boot code is responsible for switching to protected + * mode and then jumping to the common startup, _start(). + */ + bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base), + (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET, + real_mode_bootstrap_end-real_mode_bootstrap_base); + + /* + * Set the location at the base of the stack to point to the + * common startup entry. + */ + ml_phys_write_word( + PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET, + (unsigned int)kvtophys((vm_offset_t)prot_entry)); + + /* Flush caches */ + __asm__("wbinvd"); } +