]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/acpi.c
xnu-1504.7.4.tar.gz
[apple/xnu.git] / osfmk / i386 / acpi.c
index 539a82fde9ad53133d19ec6bd0bbb138c66f507c..b27d050b2b45f8a75e85188f18a0f887df004a7a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 
+#include <i386/pmap.h>
+#include <i386/proc_reg.h>
+#include <i386/mp_desc.h>
 #include <i386/misc_protos.h>
+#include <i386/mp.h>
 #include <i386/cpu_data.h>
-#include <i386/proc_reg.h>
-#include <i386/pmap.h>
 #include <i386/mtrr.h>
+#if CONFIG_VMX
 #include <i386/vmx/vmx_cpu.h>
+#endif
 #include <i386/acpi.h>
 #include <i386/fpu.h>
+#include <i386/lapic.h>
 #include <i386/mp.h>
 #include <i386/mp_desc.h>
 #include <i386/serial_io.h>
-#include <i386/hpet.h>
+#if CONFIG_MCA
 #include <i386/machine_check.h>
+#endif
+#include <i386/pmCPU.h>
 
 #include <kern/cpu_data.h>
 #include <console/serial_protos.h>
 #endif
 #include <IOKit/IOPlatformExpert.h>
 
+#if CONFIG_SLEEP
 extern void    acpi_sleep_cpu(acpi_sleep_callback, void * refcon);
-extern char acpi_wake_start[];
-extern char    acpi_wake_end[];
-
-extern void        set_kbd_leds(int leds);
+extern void acpi_wake_prot(void);
+#endif
 
 extern void    fpinit(void);
 
 vm_offset_t
 acpi_install_wake_handler(void)
 {
-       /* copy wake code to ACPI_WAKE_ADDR in low memory */
-       bcopy_phys(kvtophys((vm_offset_t)acpi_wake_start),
-                  (addr64_t) ACPI_WAKE_ADDR,
-                  acpi_wake_end - acpi_wake_start);
-
-       /* flush cache */
-       wbinvd();
-
-       /* return physical address of the wakeup code */
-       return ACPI_WAKE_ADDR;
+#if CONFIG_SLEEP
+       install_real_mode_bootstrap(acpi_wake_prot);
+       return REAL_MODE_BOOTSTRAP_OFFSET;
+#else
+       return 0;
+#endif
 }
 
 #if HIBERNATION
@@ -78,6 +80,7 @@ struct acpi_hibernate_callback_data {
 };
 typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t;
 
+#if CONFIG_SLEEP
 static void
 acpi_hibernate(void *refcon)
 {
@@ -88,7 +91,9 @@ acpi_hibernate(void *refcon)
 
        if (current_cpu_datap()->cpu_hibernate) 
        {
+#if defined(__i386__)
                cpu_IA32e_enable(current_cpu_datap());
+#endif
 
                mode = hibernate_write_image();
 
@@ -113,32 +118,49 @@ acpi_hibernate(void *refcon)
                        cpu_datap(0)->cpu_hibernate = 0;                        
                }
 
+#if defined(__i386__)
                /*
                 * If we're in 64-bit mode, drop back into legacy mode during sleep.
                 */
                cpu_IA32e_disable(current_cpu_datap());
-
+#endif
        }
 
        (data->func)(data->refcon);
 
        /* should never get here! */
 }
-#endif
+#endif /* CONFIG_SLEEP */
+#endif /* HIBERNATION */
 
 static uint64_t                acpi_sleep_abstime;
+extern void                    slave_pstart(void);
 
 void
 acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
 {
 #if HIBERNATION
        acpi_hibernate_callback_data_t data;
-       boolean_t did_hibernate;
 #endif
+       boolean_t did_hibernate;
+       unsigned int    cpu;
+       kern_return_t   rc;
+       unsigned int    my_cpu;
 
        kprintf("acpi_sleep_kernel hib=%d\n",
                        current_cpu_datap()->cpu_hibernate);
 
+       /* Get all CPUs to be in the "off" state */
+       my_cpu = cpu_number();
+       for (cpu = 0; cpu < real_ncpus; cpu += 1) {
+               if (cpu == my_cpu)
+                       continue;
+               rc = pmCPUExitHaltToOff(cpu);
+               if (rc != KERN_SUCCESS)
+                       panic("Error %d trying to transition CPU %d to OFF",
+                             rc, cpu);
+       }
+
        /* shutdown local APIC before passing control to BIOS */
        lapic_shutdown();
 
@@ -147,42 +169,60 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
        data.refcon = refcon;
 #endif
 
-       /* Save HPET state */
-       hpet_save();
+       /* Save power management timer state */
+       pmTimerSave();
 
+#if CONFIG_VMX
        /* 
         * Turn off VT, otherwise switching to legacy mode will fail
         */
        vmx_suspend();
+#endif
 
+#if defined(__i386__)
        /*
         * If we're in 64-bit mode, drop back into legacy mode during sleep.
         */
        cpu_IA32e_disable(current_cpu_datap());
+#endif
 
        acpi_sleep_abstime = mach_absolute_time();
 
+#if CONFIG_SLEEP
        /*
         * Save master CPU state and sleep platform.
         * Will not return until platform is woken up,
         * or if sleep failed.
         */
+#ifdef __x86_64__
+       uint64_t old_cr3 = x86_64_pre_sleep();
+#endif
 #if HIBERNATION
        acpi_sleep_cpu(acpi_hibernate, &data);
 #else
        acpi_sleep_cpu(func, refcon);
 #endif
+#ifdef __x86_64__
+       x86_64_post_sleep(old_cr3);
+#endif
+
+#endif /* CONFIG_SLEEP */
+
+       /* Reset UART if kprintf is enabled.
+        * However kprintf should not be used before rtc_sleep_wakeup()
+        * for compatibility with firewire kprintf.
+        */
 
-       /* reset UART if kprintf is enabled */
        if (FALSE == disable_serial_output)
                serial_init();
 
 #if HIBERNATION
        if (current_cpu_datap()->cpu_hibernate) {
+#if defined(__i386__)
                int i;
                for (i = 0; i < PMAP_NWINDOWS; i++)
                        *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0;
-               current_cpu_datap()->cpu_hibernate = 0;
+#endif
                did_hibernate = TRUE;
 
        } else
@@ -194,32 +234,46 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
        /* Re-enable mode (including 64-bit if applicable) */
        cpu_mode_init(current_cpu_datap());
 
+#if CONFIG_MCA
        /* Re-enable machine check handling */
        mca_cpu_init();
+#endif
 
        /* restore MTRR settings */
        mtrr_update_cpu();
 
+#if CONFIG_VMX
        /* 
         * Restore VT mode
         */
        vmx_resume();
+#endif
 
        /* set up PAT following boot processor power up */
        pat_init();
 
+       /*
+        * Go through all of the CPUs and mark them as requiring
+        * a full restart.
+        */
+       pmMarkAllCPUsOff();
+
        /* let the realtime clock reset */
        rtc_sleep_wakeup(acpi_sleep_abstime);
 
-       if (did_hibernate)
+       if (did_hibernate){
                hibernate_machine_init();
-
+               current_cpu_datap()->cpu_hibernate = 0;
+       }
        /* re-enable and re-init local apic */
        if (lapic_probe())
-               lapic_init();
+               lapic_configure();
+
+       /* Restore power management register state */
+       pmCPUMarkRunning(current_cpu_datap());
 
-       /* Restore HPET state */
-       hpet_restore();
+       /* Restore power management timer state */
+       pmTimerRestore();
 
        /* Restart tick interrupts from the LAPIC timer */
        rtc_lapic_start_ticking();
@@ -228,9 +282,52 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
        clear_fpu();
 
 #if HIBERNATION
+#ifdef __i386__
+       /* The image is written out using the copy engine, which disables
+        * preemption. Since the copy engine writes out the page which contains
+        * the preemption variable when it is disabled, we need to explicitly
+        * enable it here */
        if (did_hibernate)
                enable_preemption();
+#endif
 
        kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate);
 #endif
+
+#if CONFIG_SLEEP
+       /* Becase we don't save the bootstrap page, and we share it
+        * between sleep and mp slave init, we need to recreate it 
+        * after coming back from sleep or hibernate */
+       install_real_mode_bootstrap(slave_pstart);
+#endif
+}
+
+extern char real_mode_bootstrap_end[];
+extern char real_mode_bootstrap_base[];
+
+void
+install_real_mode_bootstrap(void *prot_entry)
+{
+       /*
+        * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET.
+        * This is in page 1 which has been reserved for this purpose by
+        * machine_startup() from the boot processor.
+        * The slave boot code is responsible for switching to protected
+        * mode and then jumping to the common startup, _start().
+        */
+       bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base),
+                  (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET,
+                  real_mode_bootstrap_end-real_mode_bootstrap_base);
+
+       /*
+        * Set the location at the base of the stack to point to the
+        * common startup entry.
+        */
+       ml_phys_write_word(
+               PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET,
+               (unsigned int)kvtophys((vm_offset_t)prot_entry));
+       
+       /* Flush caches */
+       __asm__("wbinvd");
 }
+