X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/8f6c56a50524aa785f7e596d52dddfb331e18961..7ddcb079202367355dddccdfa4318e57d50318be:/osfmk/i386/acpi.c?ds=inline diff --git a/osfmk/i386/acpi.c b/osfmk/i386/acpi.c index 006fe9d9b..f13561244 100644 --- a/osfmk/i386/acpi.c +++ b/osfmk/i386/acpi.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -26,116 +26,359 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#include -#include #include +#include +#include +#include +#include +#include +#if CONFIG_MTRR #include +#endif +#if CONFIG_VMX +#include +#endif +#include #include +#include +#include #include +#include +#include +#if CONFIG_MCA +#include +#endif +#include + +#include #include +#include +#include +#include +#if HIBERNATION #include +#endif #include -extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon); -extern char acpi_wake_start[]; -extern char acpi_wake_end[]; +#include -extern int serial_init(void); -extern unsigned int disableSerialOuput; +#if CONFIG_SLEEP +extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon); +extern void acpi_wake_prot(void); +#endif -extern void set_kbd_leds(int leds); +extern void fpinit(void); vm_offset_t acpi_install_wake_handler(void) { - /* copy wake code to ACPI_WAKE_ADDR in low memory */ - bcopy_phys((addr64_t) kvtophys((vm_offset_t)acpi_wake_start), - (addr64_t) ACPI_WAKE_ADDR, - acpi_wake_end - acpi_wake_start); +#if CONFIG_SLEEP + install_real_mode_bootstrap(acpi_wake_prot); + return REAL_MODE_BOOTSTRAP_OFFSET; +#else + return 0; +#endif +} - /* flush cache */ - wbinvd(); +#if HIBERNATION +struct acpi_hibernate_callback_data { + acpi_sleep_callback func; + void *refcon; +}; +typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t; - /* return physical address of the wakeup code */ - return ACPI_WAKE_ADDR; -} +unsigned int save_kdebug_enable = 0; +static uint64_t acpi_sleep_abstime; -typedef struct acpi_hibernate_callback_data { - acpi_sleep_callback func; - void *refcon; -} acpi_hibernate_callback_data; +#if CONFIG_SLEEP static void acpi_hibernate(void *refcon) { - boolean_t hib; + uint32_t mode; + + acpi_hibernate_callback_data_t *data = + (acpi_hibernate_callback_data_t *)refcon; + + if (current_cpu_datap()->cpu_hibernate) + { +#if defined(__i386__) + cpu_IA32e_enable(current_cpu_datap()); +#endif + mode = hibernate_write_image(); - acpi_hibernate_callback_data *data = (acpi_hibernate_callback_data *)refcon; + if( mode == kIOHibernatePostWriteHalt ) + { + // off + HIBLOG("power off\n"); + if (PE_halt_restart) (*PE_halt_restart)(kPEHaltCPU); + } + else if( mode == kIOHibernatePostWriteRestart ) + { + // restart + HIBLOG("restart\n"); + if (PE_halt_restart) (*PE_halt_restart)(kPERestartCPU); + } + else + { + // sleep + HIBLOG("sleep\n"); + + // should we come back via regular wake, set the state in memory. + cpu_datap(0)->cpu_hibernate = 0; + } - if (current_cpu_datap()->cpu_hibernate) { - hib = hibernate_write_image(); - } +#if defined(__i386__) + /* + * If we're in 64-bit mode, drop back into legacy mode during sleep. + */ + cpu_IA32e_disable(current_cpu_datap()); +#endif + } + kdebug_enable = 0; - (data->func)(data->refcon); + acpi_sleep_abstime = mach_absolute_time(); - /* should never get here! */ + (data->func)(data->refcon); + + /* should never get here! */ } +#endif /* CONFIG_SLEEP */ +#endif /* HIBERNATION */ + +extern void slave_pstart(void); + void acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) { - acpi_hibernate_callback_data data; - boolean_t did_hibernate; +#if HIBERNATION + acpi_hibernate_callback_data_t data; +#endif + boolean_t did_hibernate; + unsigned int cpu; + kern_return_t rc; + unsigned int my_cpu; + uint64_t now; + uint64_t my_tsc; + uint64_t my_abs; + + kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", + current_cpu_datap()->cpu_hibernate, cpu_number()); - /* shutdown local APIC before passing control to BIOS */ + /* Get all CPUs to be in the "off" state */ + my_cpu = cpu_number(); + for (cpu = 0; cpu < real_ncpus; cpu += 1) { + if (cpu == my_cpu) + continue; + rc = pmCPUExitHaltToOff(cpu); + if (rc != KERN_SUCCESS) + panic("Error %d trying to transition CPU %d to OFF", + rc, cpu); + } + + /* shutdown local APIC before passing control to firmware */ lapic_shutdown(); - data.func = func; - data.refcon = refcon; +#if HIBERNATION + data.func = func; + data.refcon = refcon; +#endif + + /* Save power management timer state */ + pmTimerSave(); + +#if CONFIG_VMX + /* + * Turn off VT, otherwise switching to legacy mode will fail + */ + vmx_suspend(); +#endif +#if defined(__i386__) + /* + * If we're in 64-bit mode, drop back into legacy mode during sleep. + */ + cpu_IA32e_disable(current_cpu_datap()); +#endif + /* + * Enable FPU/SIMD unit for potential hibernate acceleration + */ + clear_ts(); + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START, 0, 0, 0, 0, 0); + + save_kdebug_enable = kdebug_enable; + kdebug_enable = 0; + + acpi_sleep_abstime = mach_absolute_time(); + +#if CONFIG_SLEEP /* * Save master CPU state and sleep platform. * Will not return until platform is woken up, * or if sleep failed. */ - acpi_sleep_cpu(acpi_hibernate, &data); +#ifdef __x86_64__ + uint64_t old_cr3 = x86_64_pre_sleep(); +#endif +#if HIBERNATION + acpi_sleep_cpu(acpi_hibernate, &data); +#else + acpi_sleep_cpu(func, refcon); +#endif + +#ifdef __x86_64__ + x86_64_post_sleep(old_cr3); +#endif + +#endif /* CONFIG_SLEEP */ + + /* Reset UART if kprintf is enabled. + * However kprintf should not be used before rtc_sleep_wakeup() + * for compatibility with firewire kprintf. + */ + + if (FALSE == disable_serial_output) + pal_serial_init(); - /* reset UART if kprintf is enabled */ - if (FALSE == disableSerialOuput) - serial_init(); +#if HIBERNATION + if (current_cpu_datap()->cpu_hibernate) { +#if defined(__i386__) + int i; + for (i = 0; i < PMAP_NWINDOWS; i++) + *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0; +#endif + did_hibernate = TRUE; - if (current_cpu_datap()->cpu_hibernate) { - * (int *) CM1 = 0; - * (int *) CM2 = 0; - * (int *) CM3 = 0; + } else +#endif + { + did_hibernate = FALSE; + } - current_cpu_datap()->cpu_hibernate = 0; + /* Re-enable mode (including 64-bit if applicable) */ + cpu_mode_init(current_cpu_datap()); - did_hibernate = TRUE; - } else { - did_hibernate = FALSE; - } +#if CONFIG_MCA + /* Re-enable machine check handling */ + mca_cpu_init(); +#endif +#if CONFIG_MTRR /* restore MTRR settings */ mtrr_update_cpu(); +#endif + + /* update CPU microcode */ + ucode_update_wake(); + +#if CONFIG_VMX + /* + * Restore VT mode + */ + vmx_resume(); +#endif +#if CONFIG_MTRR /* set up PAT following boot processor power up */ pat_init(); +#endif + + /* + * Go through all of the CPUs and mark them as requiring + * a full restart. + */ + pmMarkAllCPUsOff(); - if (did_hibernate) { - hibernate_machine_init(); - } - - /* re-enable and re-init local apic */ + ml_get_timebase(&now); + + /* re-enable and re-init local apic (prior to starting timers) */ if (lapic_probe()) - lapic_init(); + lapic_configure(); /* let the realtime clock reset */ - rtc_sleep_wakeup(); + rtc_sleep_wakeup(acpi_sleep_abstime); + + kdebug_enable = save_kdebug_enable; + + if (did_hibernate) { + + my_tsc = (now >> 32) | (now << 32); + my_abs = tmrCvt(my_tsc, tscFCvtt2n); + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START, + (uint32_t)(my_abs >> 32), (uint32_t)my_abs, 0, 0, 0); + hibernate_machine_init(); + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END, 0, 0, 0, 0, 0); + + current_cpu_datap()->cpu_hibernate = 0; + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0); + } else + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0); + + /* Restore power management register state */ + pmCPUMarkRunning(current_cpu_datap()); + + /* Restore power management timer state */ + pmTimerRestore(); - if (did_hibernate) { - enable_preemption(); - } + /* Restart timer interrupts */ + rtc_timer_start(); + + /* Reconfigure FP/SIMD unit */ + init_fpu(); + +#if HIBERNATION +#ifdef __i386__ + /* The image is written out using the copy engine, which disables + * preemption. Since the copy engine writes out the page which contains + * the preemption variable when it is disabled, we need to explicitly + * enable it here */ + if (did_hibernate) + enable_preemption(); +#endif + + kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate); +#endif + +#if CONFIG_SLEEP + /* Becase we don't save the bootstrap page, and we share it + * between sleep and mp slave init, we need to recreate it + * after coming back from sleep or hibernate */ + install_real_mode_bootstrap(slave_pstart); +#endif } + +extern char real_mode_bootstrap_end[]; +extern char real_mode_bootstrap_base[]; + +void +install_real_mode_bootstrap(void *prot_entry) +{ + /* + * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET. + * This is in page 1 which has been reserved for this purpose by + * machine_startup() from the boot processor. + * The slave boot code is responsible for switching to protected + * mode and then jumping to the common startup, _start(). + */ + bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base), + (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET, + real_mode_bootstrap_end-real_mode_bootstrap_base); + + /* + * Set the location at the base of the stack to point to the + * common startup entry. + */ + ml_phys_write_word( + PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET, + (unsigned int)kvtophys((vm_offset_t)prot_entry)); + + /* Flush caches */ + __asm__("wbinvd"); +} +