]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/i386_init.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_init.c
index 560a88ffc65d907e25b0715d435caa1075c1021a..2d8abbdb6f4e7de5a76a63c896fe213f10618fb2 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2003-2016 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -54,8 +54,6 @@
  * the rights to redistribute these changes.
  */
 
-#include <platforms.h>
-#include <mach_kdb.h>
 
 #include <mach/i386/vm_param.h>
 
@@ -74,6 +72,7 @@
 #include <kern/xpr.h>
 #include <kern/cpu_data.h>
 #include <kern/processor.h>
+#include <sys/kdebug.h>
 #include <console/serial_protos.h>
 #include <vm/vm_page.h>
 #include <vm/pmap.h>
 #include <i386/pmCPU.h>
 #include <i386/tsc.h>
 #include <i386/locks.h> /* LcksOpts */
-#ifdef __i386__
-#include <i386/cpu_capabilities.h>
-#if    MACH_KDB
-#include <machine/db_machdep.h>
-#endif
-#endif
 #if DEBUG
 #include <machine/pal_routines.h>
 #endif
-
 #if DEBUG
 #define DBG(x...)       kprintf(x)
 #else
 #define DBG(x...)
 #endif
-#if    MACH_KDB
-#include <ddb/db_aout.h>
-#endif /* MACH_KDB */
 
 int                    debug_task;
 
@@ -128,15 +117,17 @@ extern const char version[];
 extern const char      version_variant[];
 extern int             nx_enabled;
 
-#ifdef __x86_64__
-extern void            *low_eintstack;
-#endif
+/*
+ * Set initial values so that ml_phys_* routines can use the booter's ID mapping
+ * to touch physical space before the kernel's physical aperture exists.
+ */
+uint64_t               physmap_base = 0;
+uint64_t               physmap_max = 4*GB;
 
-void                   *KPTphys;
+pd_entry_t             *KPTphys;
 pd_entry_t             *IdlePTD;
-#ifdef __i386__
-pd_entry_t             *IdlePDPT64;
-#endif
+pdpt_entry_t           *IdlePDPT;
+pml4_entry_t           *IdlePML4;
 
 char *physfree;
 
@@ -150,9 +141,7 @@ ALLOCPAGES(int npages)
        uintptr_t tmp = (uintptr_t)physfree;
        bzero(physfree, npages * PAGE_SIZE);
        physfree += npages * PAGE_SIZE;
-#ifdef __x86_64__
        tmp += VM_MIN_KERNEL_ADDRESS & ~LOW_4GB_MASK;
-#endif
        return (void *)tmp;
 }
 
@@ -169,7 +158,6 @@ fillkpt(pt_entry_t *base, int prot, uintptr_t src, int index, int count)
 
 extern pmap_paddr_t first_avail;
 
-#ifdef __x86_64__
 int break_kprintf = 0;
 
 uint64_t
@@ -188,20 +176,18 @@ x86_64_post_sleep(uint64_t new_cr3)
        set_cr3_raw((uint32_t) new_cr3);
 }
 
-#endif
 
-#ifdef __i386__
-#define ID_MAP_VTOP(x) x
-#endif
 
 
-#ifdef __x86_64__
 // Set up the physical mapping - NPHYSMAP GB of memory mapped at a high address
 // NPHYSMAP is determined by the maximum supported RAM size plus 4GB to account
 // the PCI hole (which is less 4GB but not more).
 
-// Compile-time guard:
-extern int maxphymapsupported[NPHYSMAP <= PTE_PER_PAGE ? 1 : -1];
+/* Compile-time guard: NPHYSMAP is capped to 256GiB, accounting for
+ * randomisation
+ */
+extern int maxphymapsupported[NPHYSMAP <= (PTE_PER_PAGE/2) ? 1 : -1];
+
 static void
 physmap_init(void)
 {
@@ -210,31 +196,78 @@ physmap_init(void)
                pt_entry_t entries[PTE_PER_PAGE];
        } * physmapL2 = ALLOCPAGES(NPHYSMAP);
 
-       uintptr_t i;
-       for(i=0;i<NPHYSMAP;i++) {
-               physmapL3[i] = ((uintptr_t)ID_MAP_VTOP(&physmapL2[i]))
+       uint64_t i;
+       uint8_t phys_random_L3 = early_random() & 0xFF;
+
+       /* We assume NX support. Mark all levels of the PHYSMAP NX
+        * to avoid granting executability via a single bit flip.
+        */
+#if DEVELOPMENT || DEBUG
+       uint32_t reg[4];
+       do_cpuid(0x80000000, reg);
+       if (reg[eax] >= 0x80000001) {
+               do_cpuid(0x80000001, reg);
+               assert(reg[edx] & CPUID_EXTFEATURE_XD);
+       }
+#endif /* DEVELOPMENT || DEBUG */
+
+       for(i = 0; i < NPHYSMAP; i++) {
+               physmapL3[i + phys_random_L3] =
+                               ((uintptr_t)ID_MAP_VTOP(&physmapL2[i]))
                                | INTEL_PTE_VALID
+                               | INTEL_PTE_NX
                                | INTEL_PTE_WRITE;
-               uintptr_t j;
-               for(j=0;j<PTE_PER_PAGE;j++) {
-                       physmapL2[i].entries[j] = (((i*PTE_PER_PAGE+j)<<PDSHIFT)
+
+               uint64_t j;
+               for(j = 0; j < PTE_PER_PAGE; j++) {
+                       physmapL2[i].entries[j] =
+                           ((i * PTE_PER_PAGE + j) << PDSHIFT)
                                                        | INTEL_PTE_PS
                                                        | INTEL_PTE_VALID
-                                                       | INTEL_PTE_WRITE);
+                                                       | INTEL_PTE_NX
+                                                       | INTEL_PTE_WRITE;
                }
        }
 
-       IdlePML4[KERNEL_PHYSMAP_INDEX] = ((uintptr_t)ID_MAP_VTOP(physmapL3))
-                                               | INTEL_PTE_VALID
-                                               | INTEL_PTE_WRITE;
-       if (cpuid_extfeatures() & CPUID_EXTFEATURE_XD) {
-               IdlePML4[KERNEL_PHYSMAP_INDEX] |= INTEL_PTE_NX;
-       }
+       IdlePML4[KERNEL_PHYSMAP_PML4_INDEX] =
+                                       ((uintptr_t)ID_MAP_VTOP(physmapL3))
+                                       | INTEL_PTE_VALID
+                                       | INTEL_PTE_NX
+                                       | INTEL_PTE_WRITE;
+
+       physmap_base = KVADDR(KERNEL_PHYSMAP_PML4_INDEX, phys_random_L3, 0, 0);
+       physmap_max = physmap_base + NPHYSMAP * GB;
+       DBG("Physical address map base: 0x%qx\n", physmap_base);
+       DBG("Physical map idlepml4[%d]: 0x%llx\n",
+               KERNEL_PHYSMAP_PML4_INDEX, IdlePML4[KERNEL_PHYSMAP_PML4_INDEX]);
+}
 
-       DBG("physical map idlepml4[%d]: 0x%llx\n",
-               KERNEL_PHYSMAP_INDEX, IdlePML4[KERNEL_PHYSMAP_INDEX]);
+static void
+descriptor_alias_init()
+{
+       vm_offset_t     master_gdt_phys;
+       vm_offset_t     master_gdt_alias_phys;
+       vm_offset_t     master_idt_phys;
+       vm_offset_t     master_idt_alias_phys;
+
+       assert(((vm_offset_t)master_gdt & PAGE_MASK) == 0);
+       assert(((vm_offset_t)master_idt64 & PAGE_MASK) == 0);
+
+       master_gdt_phys       = (vm_offset_t) ID_MAP_VTOP(master_gdt);
+       master_idt_phys       = (vm_offset_t) ID_MAP_VTOP(master_idt64);
+       master_gdt_alias_phys = (vm_offset_t) ID_MAP_VTOP(MASTER_GDT_ALIAS);
+       master_idt_alias_phys = (vm_offset_t) ID_MAP_VTOP(MASTER_IDT_ALIAS);
+       
+       DBG("master_gdt_phys:       %p\n", (void *) master_gdt_phys);
+       DBG("master_idt_phys:       %p\n", (void *) master_idt_phys);
+       DBG("master_gdt_alias_phys: %p\n", (void *) master_gdt_alias_phys);
+       DBG("master_idt_alias_phys: %p\n", (void *) master_idt_alias_phys);
+
+       KPTphys[atop_kernel(master_gdt_alias_phys)] = master_gdt_phys |
+               INTEL_PTE_VALID | INTEL_PTE_NX | INTEL_PTE_WRITE;
+       KPTphys[atop_kernel(master_idt_alias_phys)] = master_idt_phys |
+               INTEL_PTE_VALID | INTEL_PTE_NX; /* read-only */
 }
-#endif
 
 static void
 Idle_PTs_init(void)
@@ -242,38 +275,41 @@ Idle_PTs_init(void)
        /* Allocate the "idle" kernel page tables: */
        KPTphys  = ALLOCPAGES(NKPT);            /* level 1 */
        IdlePTD  = ALLOCPAGES(NPGPTD);          /* level 2 */
+       IdlePDPT = ALLOCPAGES(1);               /* level 3 */
+       IdlePML4 = ALLOCPAGES(1);               /* level 4 */
 
-#ifdef __x86_64__
-       physmap_init();
-#else
-       IdlePDPT64 = ALLOCPAGES(1);
-
-       // Recursive mapping of PTEs
-       fillkpt(IdlePTD, INTEL_PTE_WRITE, (uintptr_t)IdlePTD, PTDPTDI, NPGPTD);
-       // commpage
-       fillkpt(IdlePTD, INTEL_PTE_WRITE|INTEL_PTE_USER, (uintptr_t)ALLOCPAGES(1), _COMM_PAGE32_BASE_ADDRESS >> PDESHIFT,1);
-#endif
        // Fill the lowest level with everything up to physfree
        fillkpt(KPTphys,
-                       INTEL_PTE_WRITE, 0, 0, (int)(((uintptr_t)physfree) >> PAGE_SHIFT));
+               INTEL_PTE_WRITE, 0, 0, (int)(((uintptr_t)physfree) >> PAGE_SHIFT));
 
-       // Rewrite the 2nd-lowest level  to point to pages of KPTphys.
-       // This was previously filled statically by idle_pt.c, and thus
-       // must be done after the KPTphys fill since IdlePTD is in use
+       /* IdlePTD */
        fillkpt(IdlePTD,
-                       INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(KPTphys), 0, NKPT);
+               INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(KPTphys), 0, NKPT);
 
        // IdlePDPT entries
-#ifdef __i386__
-       fillkpt(IdlePDPT, 0, (uintptr_t)IdlePTD, 0, NPGPTD);
-#else
-       fillkpt(IdlePDPT, INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePTD), 0, NPGPTD);
-#endif
+       fillkpt(IdlePDPT,
+               INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePTD), 0, NPGPTD);
+
+       // IdlePML4 single entry for kernel space.
+       fillkpt(IdlePML4 + KERNEL_PML4_INDEX,
+               INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePDPT), 0, 1);
+       
+       postcode(VSTART_PHYSMAP_INIT);
+
+       physmap_init();
+
+       postcode(VSTART_DESC_ALIAS_INIT);
+
+       descriptor_alias_init();
+
+       postcode(VSTART_SET_CR3);
+
+       // Switch to the page tables..
+       set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4));
 
-       // Flush the TLB now we're done rewriting the page tables..
-       set_cr3_raw(get_cr3_raw());
 }
 
+
 /*
  * vstart() is called in the natural mode (64bit for K64, 32 for K32)
  * on a set of bootstrap pagetables which use large, 2MB pages to map 
@@ -289,12 +325,13 @@ Idle_PTs_init(void)
  * Non-bootstrap processors are called with argument boot_args_start NULL.
  * These processors switch immediately to the existing kernel page tables.
  */
+__attribute__((noreturn))
 void
 vstart(vm_offset_t boot_args_start)
 {
        boolean_t       is_boot_cpu = !(boot_args_start == 0);
        int             cpu;
-       uint32_t lphysfree;
+       uint32_t        lphysfree;
 
        postcode(VSTART_ENTRY);
 
@@ -305,7 +342,8 @@ vstart(vm_offset_t boot_args_start)
                kernelBootArgs = (boot_args *)boot_args_start;
                lphysfree = kernelBootArgs->kaddr + kernelBootArgs->ksize;
                physfree = (void *)(uintptr_t)((lphysfree + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1));
-#if DEBUG
+
+#if DEVELOPMENT || DEBUG
                pal_serial_init();
 #endif
                DBG("revision      0x%x\n", kernelBootArgs->Revision);
@@ -320,98 +358,55 @@ vstart(vm_offset_t boot_args_start)
                        kernelBootArgs, 
                        &kernelBootArgs->ksize,
                        &kernelBootArgs->kaddr);
-#ifdef __x86_64__
-               /* enable NX/XD, boot processor */
-               if (cpuid_extfeatures() & CPUID_EXTFEATURE_XD) {
-                       wrmsr64(MSR_IA32_EFER, rdmsr64(MSR_IA32_EFER) | MSR_IA32_EFER_NXE);
-                       DBG("vstart() NX/XD enabled\n");
-               }
-#endif
-               postcode(PSTART_PAGE_TABLES);
-
-               Idle_PTs_init();
-
-               first_avail = (vm_offset_t)ID_MAP_VTOP(physfree);
-
-               cpu = 0;
-               cpu_data_alloc(TRUE);
+               DBG("SMBIOS mem sz 0x%llx\n", kernelBootArgs->PhysicalMemorySize);
 
-                               
                /*
                 * Setup boot args given the physical start address.
+                * Note: PE_init_platform needs to be called before Idle_PTs_init
+                * because access to the DeviceTree is required to read the
+                * random seed before generating a random physical map slide.
                 */
                kernelBootArgs = (boot_args *)
                    ml_static_ptovirt(boot_args_start);
                DBG("i386_init(0x%lx) kernelBootArgs=%p\n",
                    (unsigned long)boot_args_start, kernelBootArgs);
-
                PE_init_platform(FALSE, kernelBootArgs);
                postcode(PE_INIT_PLATFORM_D);
+
+               Idle_PTs_init();
+               postcode(VSTART_IDLE_PTS_INIT);
+
+               first_avail = (vm_offset_t)ID_MAP_VTOP(physfree);
+
+               cpu = 0;
+               cpu_data_alloc(TRUE);
        } else {
+               /* Switch to kernel's page tables (from the Boot PTs) */
+               set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4));
                /* Find our logical cpu number */
                cpu = lapic_to_cpu[(LAPIC_READ(ID)>>LAPIC_ID_SHIFT) & LAPIC_ID_MASK];
                DBG("CPU: %d, GSBASE initial value: 0x%llx\n", cpu, rdmsr64(MSR_IA32_GS_BASE));
-#ifdef __x86_64__
-               if (cpuid_extfeatures() & CPUID_EXTFEATURE_XD) {
-                       wrmsr64(MSR_IA32_EFER, rdmsr64(MSR_IA32_EFER) | MSR_IA32_EFER_NXE);
-                       DBG("vstart() NX/XD enabled, non-boot\n");
-               }
-#endif
        }
 
-#ifdef __x86_64__
+       postcode(VSTART_CPU_DESC_INIT);
        if(is_boot_cpu)
                cpu_desc_init64(cpu_datap(cpu));
        cpu_desc_load64(cpu_datap(cpu));
-#else
-       if(is_boot_cpu)
-               cpu_desc_init(cpu_datap(cpu));
-       cpu_desc_load(cpu_datap(cpu));
-#endif
+       postcode(VSTART_CPU_MODE_INIT);
        if (is_boot_cpu)
                cpu_mode_init(current_cpu_datap()); /* cpu_mode_init() will be
                                                     * invoked on the APs
                                                     * via i386_init_slave()
                                                     */
-#ifdef __x86_64__
-       /* Done with identity mapping */
-       IdlePML4[0] = 0;
-#endif
-
        postcode(VSTART_EXIT);
-#ifdef __i386__
-       if (cpuid_extfeatures() & CPUID_EXTFEATURE_XD) {
-               wrmsr64(MSR_IA32_EFER, rdmsr64(MSR_IA32_EFER) | MSR_IA32_EFER_NXE);
-               DBG("vstart() NX/XD enabled, i386\n");
-       }
+       x86_init_wrapper(is_boot_cpu ? (uintptr_t) i386_init
+                                    : (uintptr_t) i386_init_slave,
+                        cpu_datap(cpu)->cpu_int_stack_top);
+}
 
-       if (is_boot_cpu)
-               i386_init();
-       else
-               i386_init_slave();
-       /*NOTREACHED*/
-#else
-       /* We need to switch to a new per-cpu stack, but we must do this atomically with
-        * the call to ensure the compiler doesn't assume anything about the stack before
-        * e.g. tail-call optimisations
-        */
-       if (is_boot_cpu)
-       {
-               asm volatile(
-                               "mov %1, %%rdi;"
-                               "mov %0, %%rsp;"
-                               "call _i386_init;"      : : "r" 
-                               (cpu_datap(cpu)->cpu_int_stack_top), "r" (boot_args_start));
-       }
-       else
-       {
-               asm volatile(
-                               "mov %0, %%rsp;"
-                               "call _i386_init_slave;"        : : "r" 
-                               (cpu_datap(cpu)->cpu_int_stack_top));
-       }
-       /*NOTREACHED*/
-#endif
+void
+pstate_trace(void)
+{
 }
 
 /*
@@ -430,15 +425,17 @@ i386_init(void)
        postcode(I386_INIT_ENTRY);
 
        pal_i386_init();
+       tsc_init();
+       rtclock_early_init();   /* mach_absolute_time() now functionsl */
+
+       kernel_debug_string_early("i386_init");
+       pstate_trace();
 
 #if CONFIG_MCA
        /* Initialize machine-check handling */
        mca_cpu_init();
 #endif
 
-
-       kernel_early_bootstrap();
-
        master_cpu = 0;
        cpu_init();
 
@@ -448,8 +445,12 @@ i386_init(void)
        panic_init();                   /* Init this in case we need debugger */
 
        /* setup debugging output if one has been chosen */
+       kernel_debug_string_early("PE_init_kprintf");
        PE_init_kprintf(FALSE);
 
+       kernel_debug_string_early("kernel_early_bootstrap");
+       kernel_early_bootstrap();
+
        if (!PE_parse_boot_argn("diag", &dgWork.dgFlags, sizeof (dgWork.dgFlags)))
                dgWork.dgFlags = 0;
 
@@ -464,6 +465,7 @@ i386_init(void)
        }
 
        /* setup console output */
+       kernel_debug_string_early("PE_init_printf");
        PE_init_printf(FALSE);
 
        kprintf("version_variant = %s\n", version_variant);
@@ -482,8 +484,9 @@ i386_init(void)
        /*
         * debug support for > 4G systems
         */
-       if (!PE_parse_boot_argn("himemory_mode", &vm_himemory_mode, sizeof (vm_himemory_mode)))
-               vm_himemory_mode = 0;
+       PE_parse_boot_argn("himemory_mode", &vm_himemory_mode, sizeof (vm_himemory_mode));
+       if (vm_himemory_mode != 0)
+               kprintf("himemory_mode: %d\n", vm_himemory_mode);
 
        if (!PE_parse_boot_argn("immediate_NMI", &fidn, sizeof (fidn)))
                force_immediate_debugger_NMI = FALSE;
@@ -497,31 +500,14 @@ i386_init(void)
            &urgency_notification_assert_abstime_threshold,
            sizeof(urgency_notification_assert_abstime_threshold));
 
-#if CONFIG_YONAH
-       /*
-        * At this point we check whether we are a 64-bit processor
-        * and that we're not restricted to legacy mode, 32-bit operation.
-        */
-       if (cpuid_extfeatures() & CPUID_EXTFEATURE_EM64T) {
-               boolean_t       legacy_mode;
-               kprintf("EM64T supported");
-               if (PE_parse_boot_argn("-legacy", &legacy_mode, sizeof (legacy_mode))) {
-                       kprintf(" but legacy mode forced\n");
-                       IA32e = FALSE;
-               } else {
-                       kprintf(" and will be enabled\n");
-               }
-       } else
-               IA32e = FALSE;
-#endif
-
        if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
                nx_enabled = 0;
 
        /*   
         * VM initialization, after this we're using page tables...
-        * The maximum number of cpus must be set beforehand.
+        * Thn maximum number of cpus must be set beforehand.
         */
+       kernel_debug_string_early("i386_vm_init");
        i386_vm_init(maxmemtouse, IA32e, kernelBootArgs);
 
        /* create the console for verbose or pretty mode */
@@ -529,13 +515,15 @@ i386_init(void)
        PE_init_platform(TRUE, kernelBootArgs);
        PE_create_console();
 
-       tsc_init();
+       kernel_debug_string_early("power_management_init");
        power_management_init();
-
        processor_bootstrap();
        thread_bootstrap();
 
+       pstate_trace();
+       kernel_debug_string_early("machine_startup");
        machine_startup();
+       pstate_trace();
 }
 
 static void
@@ -555,11 +543,13 @@ do_init_slave(boolean_t fast_restart)
                assert(!ml_get_interrupts_enabled());
   
                cpu_mode_init(current_cpu_datap());
+               pmap_cpu_init();
   
 #if CONFIG_MCA
                mca_cpu_init();
 #endif
   
+               LAPIC_INIT();
                lapic_configure();
                LAPIC_DUMP();
                LAPIC_CPU_MAP_DUMP();
@@ -569,15 +559,14 @@ do_init_slave(boolean_t fast_restart)
 #if CONFIG_MTRR
                mtrr_update_cpu();
 #endif
+               /* update CPU microcode */
+               ucode_update_wake();
        } else
            init_param = FAST_SLAVE_INIT;
 
-       /* update CPU microcode */
-       ucode_update_wake();
-
 #if CONFIG_VMX
        /* resume VT operation */
-       vmx_resume();
+       vmx_resume(FALSE);
 #endif
 
 #if CONFIG_MTRR
@@ -587,16 +576,7 @@ do_init_slave(boolean_t fast_restart)
 
        cpu_thread_init();      /* not strictly necessary */
 
-#ifdef __x86_64__
-       /* Re-zero the identity-map for the idle PT's. This MUST be done before 
-        * cpu_running is set so that other slaves can set up their own
-        * identity-map */
-       if (!fast_restart)
-           IdlePML4[0] = 0;
-#endif
-
-       cpu_init();     /* Sets cpu_running which starter cpu waits for */ 
-
+       cpu_init();     /* Sets cpu_running which starter cpu waits for */
        slave_main(init_param);
   
        panic("do_init_slave() returned from slave_main()");