- if (is_boot_cpu) {
- /*
- * Master CPU uses the tables built at boot time.
- * Just set the TSS and GDT pointers.
- */
- cdt->cdi_ktss = &ktss;
-#if MACH_KDB
- cdt->cdi_dbtss = &dbtss;
-#endif /* MACH_KDB */
- cdt->cdi_gdt = gdt;
- cdt->cdi_idt = idt;
- cdt->cdi_ldt = ldt;
-
- } else {
-
- cdt->cdi_ktss = &mpt->ktss;
- cdt->cdi_gdt = mpt->gdt;
- cdt->cdi_idt = mpt->idt;
- cdt->cdi_ldt = mpt->ldt;
-
- /*
- * Copy the tables
- */
- bcopy((char *)idt,
- (char *)mpt->idt,
- sizeof(idt));
- bcopy((char *)gdt,
- (char *)mpt->gdt,
- sizeof(gdt));
- bcopy((char *)ldt,
- (char *)mpt->ldt,
- sizeof(ldt));
- bzero((char *)&mpt->ktss,
- sizeof(struct i386_tss));
-
-#if MACH_KDB
- cdt->cdi_dbtss = &dbtss;
- bcopy((char *)&dbtss,
- (char *)&mpt->dbtss,
- sizeof(struct i386_tss));
-#endif /* MACH_KDB */
-
- /*
- * Fix up the entries in the GDT to point to
- * this LDT and this TSS.
- */
- mpt->gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern;
- mpt->gdt[sel_idx(KERNEL_LDT)].offset = (vm_offset_t) mpt->ldt;
- fix_desc(&mpt->gdt[sel_idx(KERNEL_LDT)], 1);
-
- mpt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern;
- mpt->gdt[sel_idx(KERNEL_TSS)].offset = (vm_offset_t) &mpt->ktss;
- fix_desc(&mpt->gdt[sel_idx(KERNEL_TSS)], 1);
-
- mpt->gdt[sel_idx(CPU_DATA_GS)] = cpudata_desc_pattern;
- mpt->gdt[sel_idx(CPU_DATA_GS)].offset = (vm_offset_t) cdp;
- fix_desc(&mpt->gdt[sel_idx(CPU_DATA_GS)], 1);
-
-#if MACH_KDB
- mpt->gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern;
- mpt->gdt[sel_idx(DEBUG_TSS)].offset = (vm_offset_t) &mpt->dbtss;
- fix_desc(&mpt->gdt[sel_idx(DEBUG_TSS)], 1);
-
- mpt->dbtss.esp0 = (int)(db_task_stack_store +
- (INTSTACK_SIZE * (cpu + 1)) - sizeof (natural_t));
- mpt->dbtss.esp = mpt->dbtss.esp0;
- mpt->dbtss.eip = (int)&db_task_start;
-#endif /* MACH_KDB */
-
- mpt->ktss.ss0 = KERNEL_DS;
- mpt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */
+ /*
+ * Now copy back over the fake structure.
+ */
+ bcopy((void *) &real, (void *) fakep, sizeof(real));
+ }
+}
+
+static void
+cpu_gdt_alias(vm_map_offset_t gdt, vm_map_offset_t alias)
+{
+ pt_entry_t *pte = NULL;
+
+ /* Require page alignment */
+ assert(page_aligned(gdt));
+ assert(page_aligned(alias));
+
+ pte = pmap_pte(kernel_pmap, alias);
+ pmap_store_pte(pte, kvtophys(gdt) | INTEL_PTE_REF
+ | INTEL_PTE_MOD
+ | INTEL_PTE_WIRED
+ | INTEL_PTE_VALID
+ | INTEL_PTE_WRITE
+ | INTEL_PTE_NX);
+
+ /* TLB flush unneccessry because target processor isn't running yet */
+}
+
+
+void
+cpu_desc_init64(cpu_data_t *cdp)
+{
+ cpu_desc_index_t *cdi = &cdp->cpu_desc_index;
+
+ if (cdp == &cpu_data_master) {
+ /*
+ * Master CPU uses the tables built at boot time.
+ * Just set the index pointers to the low memory space.
+ */
+ cdi->cdi_ktss = (void *)&master_ktss64;
+ cdi->cdi_sstk = (vm_offset_t) &master_sstk.top;
+ cdi->cdi_gdt.ptr = (void *)MASTER_GDT_ALIAS;
+ cdi->cdi_idt.ptr = (void *)MASTER_IDT_ALIAS;
+ cdi->cdi_ldt = (struct fake_descriptor *) master_ldt;
+
+ /* Replace the expanded LDTs and TSS slots in the GDT */
+ kernel_ldt_desc64.offset64 = (uintptr_t) &master_ldt;
+ *(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_LDT)] =
+ kernel_ldt_desc64;
+ *(struct fake_descriptor64 *) &master_gdt[sel_idx(USER_LDT)] =
+ kernel_ldt_desc64;
+ kernel_tss_desc64.offset64 = (uintptr_t) &master_ktss64;
+ *(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_TSS)] =
+ kernel_tss_desc64;
+
+ /* Fix up the expanded descriptors for 64-bit. */
+ fix_desc64((void *) &master_idt64, IDTSZ);
+ fix_desc64((void *) &master_gdt[sel_idx(KERNEL_LDT)], 1);
+ fix_desc64((void *) &master_gdt[sel_idx(USER_LDT)], 1);
+ fix_desc64((void *) &master_gdt[sel_idx(KERNEL_TSS)], 1);
+
+ /*
+ * Set the NMI/fault stacks as IST2/IST1 in the 64-bit TSS
+ * Note: this will be dynamically re-allocated in VM later.
+ */
+ master_ktss64.ist2 = (uintptr_t) low_eintstack;
+ master_ktss64.ist1 = (uintptr_t) low_eintstack
+ - sizeof(x86_64_intr_stack_frame_t);
+
+ } else if (cdi->cdi_ktss == NULL) { /* Skipping re-init on wake */
+ cpu_desc_table64_t *cdt = (cpu_desc_table64_t *) cdp->cpu_desc_tablep;
+
+ /*
+ * Per-cpu GDT, IDT, KTSS descriptors are allocated in kernel
+ * heap (cpu_desc_table).
+ * LDT descriptors are mapped into a separate area.
+ * GDT descriptors are addressed by alias to avoid sgdt leaks to user-space.
+ */
+ cdi->cdi_idt.ptr = (void *)MASTER_IDT_ALIAS;
+ cdi->cdi_gdt.ptr = (void *)CPU_GDT_ALIAS(cdp->cpu_number);
+ cdi->cdi_ktss = (void *)&cdt->ktss;
+ cdi->cdi_sstk = (vm_offset_t)&cdt->sstk.top;
+ cdi->cdi_ldt = cdp->cpu_ldtp;
+
+ /* Make the virtual alias address for the GDT */
+ cpu_gdt_alias((vm_map_offset_t) &cdt->gdt,
+ (vm_map_offset_t) cdi->cdi_gdt.ptr);
+
+ /*
+ * Copy the tables
+ */
+ bcopy((char *)master_gdt, (char *)cdt->gdt, sizeof(master_gdt));
+ bcopy((char *)master_ldt, (char *)cdp->cpu_ldtp, sizeof(master_ldt));
+ bcopy((char *)&master_ktss64, (char *)&cdt->ktss, sizeof(struct x86_64_tss));
+
+ /*
+ * Fix up the entries in the GDT to point to
+ * this LDT and this TSS.
+ */
+ kernel_ldt_desc64.offset64 = (uintptr_t) cdi->cdi_ldt;
+ *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_LDT)] =
+ kernel_ldt_desc64;
+ fix_desc64(&cdt->gdt[sel_idx(KERNEL_LDT)], 1);
+
+ kernel_ldt_desc64.offset64 = (uintptr_t) cdi->cdi_ldt;
+ *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(USER_LDT)] =
+ kernel_ldt_desc64;
+ fix_desc64(&cdt->gdt[sel_idx(USER_LDT)], 1);
+
+ kernel_tss_desc64.offset64 = (uintptr_t) cdi->cdi_ktss;
+ *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_TSS)] =
+ kernel_tss_desc64;
+ fix_desc64(&cdt->gdt[sel_idx(KERNEL_TSS)], 1);
+
+ /* Set (zeroed) fault stack as IST1, NMI intr stack IST2 */
+ bzero((void *) cdt->fstk, sizeof(cdt->fstk));
+ cdt->ktss.ist2 = (unsigned long)cdt->fstk + sizeof(cdt->fstk);
+ cdt->ktss.ist1 = cdt->ktss.ist2
+ - sizeof(x86_64_intr_stack_frame_t);