+ cpu_desc_index_t *cdi = &cdp->cpu_desc_index;
+
+ if (cdp == cpu_data_master) {
+ /*
+ * Populate the double-mapped 'u' and base 'b' fields in the
+ * KTSS with I/G/LDT and sysenter stack data.
+ */
+ cdi->cdi_ktssu = (void *)DBLMAP(&master_ktss64);
+ cdi->cdi_ktssb = (void *)&master_ktss64;
+ cdi->cdi_sstku = (vm_offset_t) DBLMAP(&master_sstk.top);
+ cdi->cdi_sstkb = (vm_offset_t) &master_sstk.top;
+
+ cdi->cdi_gdtu.ptr = (void *)DBLMAP((uintptr_t) &master_gdt);
+ cdi->cdi_gdtb.ptr = (void *)&master_gdt;
+ cdi->cdi_idtu.ptr = (void *)DBLMAP((uintptr_t) &master_idt64);
+ cdi->cdi_idtb.ptr = (void *)((uintptr_t) &master_idt64);
+ cdi->cdi_ldtu = (struct real_descriptor *)DBLMAP((uintptr_t)&master_ldt[0]);
+ cdi->cdi_ldtb = &master_ldt[0];
+
+ /* Replace the expanded LDTs and TSS slots in the GDT */
+ kernel_ldt_desc64.offset64 = (uintptr_t) cdi->cdi_ldtu;
+ *(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_LDT)] =
+ kernel_ldt_desc64;
+ *(struct fake_descriptor64 *) &master_gdt[sel_idx(USER_LDT)] =
+ kernel_ldt_desc64;
+ kernel_tss_desc64.offset64 = (uintptr_t) DBLMAP(&master_ktss64);
+ *(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_TSS)] =
+ kernel_tss_desc64;
+
+ /* Fix up the expanded descriptors for 64-bit. */
+ fix_desc64((void *) &master_idt64, IDTSZ);
+ fix_desc64((void *) &master_gdt[sel_idx(KERNEL_LDT)], 1);
+ fix_desc64((void *) &master_gdt[sel_idx(USER_LDT)], 1);
+ fix_desc64((void *) &master_gdt[sel_idx(KERNEL_TSS)], 1);
+
+ /*
+ * Set the NMI/fault stacks as IST2/IST1 in the 64-bit TSS
+ */
+ master_ktss64.ist2 = (uintptr_t) low_eintstack;
+ master_ktss64.ist1 = (uintptr_t) low_eintstack - sizeof(x86_64_intr_stack_frame_t);
+ } else if (cdi->cdi_ktssu == NULL) { /* Skipping re-init on wake */
+ cpu_desc_table64_t *cdt = (cpu_desc_table64_t *) cdp->cpu_desc_tablep;
+
+ cdi->cdi_idtu.ptr = (void *)DBLMAP((uintptr_t) &master_idt64);
+
+ cdi->cdi_ktssu = (void *)DBLMAP(&cdt->ktss);
+ cdi->cdi_ktssb = (void *)(&cdt->ktss);
+ cdi->cdi_sstku = (vm_offset_t)DBLMAP(&cdt->sstk.top);
+ cdi->cdi_sstkb = (vm_offset_t)(&cdt->sstk.top);
+ cdi->cdi_ldtu = (void *)LDTALIAS(cdp->cpu_ldtp);
+ cdi->cdi_ldtb = (void *)(cdp->cpu_ldtp);
+
+ /*
+ * Copy the tables
+ */
+ bcopy((char *)master_gdt, (char *)cdt->gdt, sizeof(master_gdt));
+ bcopy((char *)master_ldt, (char *)cdp->cpu_ldtp, mldtsz);
+ bcopy((char *)&master_ktss64, (char *)&cdt->ktss, sizeof(struct x86_64_tss));
+ cdi->cdi_gdtu.ptr = (void *)DBLMAP(cdt->gdt);
+ cdi->cdi_gdtb.ptr = (void *)(cdt->gdt);
+ /*
+ * Fix up the entries in the GDT to point to
+ * this LDT and this TSS.
+ * Note reuse of global 'kernel_ldt_desc64, which is not
+ * concurrency-safe. Higher level synchronization is expected
+ */
+ kernel_ldt_desc64.offset64 = (uintptr_t) cdi->cdi_ldtu;
+ *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_LDT)] =
+ kernel_ldt_desc64;
+ fix_desc64(&cdt->gdt[sel_idx(KERNEL_LDT)], 1);
+
+ kernel_ldt_desc64.offset64 = (uintptr_t) cdi->cdi_ldtu;
+ *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(USER_LDT)] =
+ kernel_ldt_desc64;
+ fix_desc64(&cdt->gdt[sel_idx(USER_LDT)], 1);
+
+ kernel_tss_desc64.offset64 = (uintptr_t) cdi->cdi_ktssu;
+ *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_TSS)] =
+ kernel_tss_desc64;
+ fix_desc64(&cdt->gdt[sel_idx(KERNEL_TSS)], 1);
+
+ /* Set (zeroed) fault stack as IST1, NMI intr stack IST2 */
+ uint8_t *cfstk = &scfstks[cdp->cpu_number].fstk[0];
+ cdt->fstkp = cfstk;
+ bzero((void *) cfstk, FSTK_SZ);
+ cdt->ktss.ist2 = DBLMAP((uint64_t)cdt->fstkp + FSTK_SZ);
+ cdt->ktss.ist1 = cdt->ktss.ist2 - sizeof(x86_64_intr_stack_frame_t);