+ cpu_desc_index_t *cdi = &cdp->cpu_desc_index;
+
+ if (cdp == &cpu_data_master) {
+ /*
+ * Fix up the entries in the GDT to point to
+ * this LDT and this TSS.
+ */
+ struct fake_descriptor temp_fake_desc;
+ temp_fake_desc = ldt_desc_pattern;
+ temp_fake_desc.offset = (vm_offset_t) &master_ldt;
+ fix_desc(&temp_fake_desc, 1);
+ *(struct fake_descriptor *) &master_gdt[sel_idx(KERNEL_LDT)] =
+ temp_fake_desc;
+ *(struct fake_descriptor *) &master_gdt[sel_idx(USER_LDT)] =
+ temp_fake_desc;
+
+ temp_fake_desc = tss_desc_pattern;
+ temp_fake_desc.offset = (vm_offset_t) &master_ktss;
+ fix_desc(&temp_fake_desc, 1);
+ *(struct fake_descriptor *) &master_gdt[sel_idx(KERNEL_TSS)] =
+ temp_fake_desc;
+
+ temp_fake_desc = cpudata_desc_pattern;
+ temp_fake_desc.offset = (vm_offset_t) &cpu_data_master;
+ fix_desc(&temp_fake_desc, 1);
+ *(struct fake_descriptor *) &master_gdt[sel_idx(CPU_DATA_GS)] =
+ temp_fake_desc;
+
+ fix_desc((void *)&master_idt, IDTSZ);
+
+ cdi->cdi_idt.ptr = master_idt;
+ cdi->cdi_gdt.ptr = (void *)master_gdt;
+
+
+ /*
+ * Master CPU uses the tables built at boot time.
+ * Just set the index pointers to the high shared-mapping space.
+ * Note that the sysenter stack uses empty space above the ktss
+ * in the HIGH_FIXED_KTSS page. In this case we don't map the
+ * the real master_sstk in low memory.
+ */
+ cdi->cdi_ktss = (struct i386_tss *)
+ pmap_index_to_virt(HIGH_FIXED_KTSS) ;
+ cdi->cdi_sstk = (vm_offset_t) (cdi->cdi_ktss + 1) +
+ (vm_offset_t) &master_sstk.top -
+ (vm_offset_t) &master_sstk;
+ } else {
+ cpu_desc_table_t *cdt = (cpu_desc_table_t *) cdp->cpu_desc_tablep;
+
+ vm_offset_t cpu_hi_desc;
+
+ cpu_hi_desc = pmap_cpu_high_shared_remap(
+ cdp->cpu_number,
+ HIGH_CPU_DESC,
+ (vm_offset_t) cdt, 1);
+
+ /*
+ * Per-cpu GDT, IDT, LDT, KTSS descriptors are allocated in one
+ * block (cpu_desc_table) and double-mapped into high shared space
+ * in one page window.
+ * Also, a transient stack for the fast sysenter path. The top of
+ * which is set at context switch time to point to the PCB using
+ * the high address.
+ */
+ cdi->cdi_gdt.ptr = (struct fake_descriptor *) (cpu_hi_desc +
+ offsetof(cpu_desc_table_t, gdt[0]));
+ cdi->cdi_idt.ptr = (struct fake_descriptor *) (cpu_hi_desc +
+ offsetof(cpu_desc_table_t, idt[0]));
+ cdi->cdi_ktss = (struct i386_tss *) (cpu_hi_desc +
+ offsetof(cpu_desc_table_t, ktss));
+ cdi->cdi_sstk = cpu_hi_desc + offsetof(cpu_desc_table_t, sstk.top);
+
+ /*
+ * LDT descriptors are mapped into a seperate area.
+ */
+ cdi->cdi_ldt = (struct fake_descriptor *)
+ pmap_cpu_high_shared_remap(
+ cdp->cpu_number,
+ HIGH_CPU_LDT_BEGIN,
+ (vm_offset_t) cdp->cpu_ldtp,
+ HIGH_CPU_LDT_END - HIGH_CPU_LDT_BEGIN + 1);
+
+ /*
+ * Copy the tables
+ */
+ bcopy((char *)master_idt, (char *)cdt->idt, sizeof(master_idt));
+ bcopy((char *)master_gdt, (char *)cdt->gdt, sizeof(master_gdt));
+ bcopy((char *)master_ldt, (char *)cdp->cpu_ldtp, sizeof(master_ldt));
+ bzero((char *)&cdt->ktss, sizeof(struct i386_tss));
+
+ /*
+ * Fix up the entries in the GDT to point to
+ * this LDT and this TSS.
+ */
+ struct fake_descriptor temp_ldt = ldt_desc_pattern;
+ temp_ldt.offset = (vm_offset_t)cdi->cdi_ldt;
+ fix_desc(&temp_ldt, 1);
+
+ cdt->gdt[sel_idx(KERNEL_LDT)] = temp_ldt;
+ cdt->gdt[sel_idx(USER_LDT)] = temp_ldt;
+
+ cdt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern;
+ cdt->gdt[sel_idx(KERNEL_TSS)].offset = (vm_offset_t) cdi->cdi_ktss;
+ fix_desc(&cdt->gdt[sel_idx(KERNEL_TSS)], 1);
+
+ cdt->gdt[sel_idx(CPU_DATA_GS)] = cpudata_desc_pattern;
+ cdt->gdt[sel_idx(CPU_DATA_GS)].offset = (vm_offset_t) cdp;
+ fix_desc(&cdt->gdt[sel_idx(CPU_DATA_GS)], 1);
+
+ cdt->ktss.ss0 = KERNEL_DS;
+ cdt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */
+
+ cpu_userwindow_init(cdp->cpu_number);
+ cpu_physwindow_init(cdp->cpu_number);
+