+ cdp->cpu_active_thread = (thread_t) (uintptr_t) cdp->cpu_number;
+
+ cdp->cpu_nanotime = &pal_rtc_nanotime_info;
+
+ kprintf("cpu_data_alloc(%d) %p desc_table: %p "
+ "ldt: %p "
+ "int_stack: 0x%lx-0x%lx\n",
+ cdp->cpu_number, cdp, cdp->cpu_desc_tablep, cdp->cpu_ldtp,
+ (long)(cdp->cpu_int_stack_top - INTSTACK_SIZE), (long)(cdp->cpu_int_stack_top));
+
+ return cdp;
+
+abort:
+ if (cdp) {
+ if (cdp->cpu_desc_tablep)
+ kfree((void *) cdp->cpu_desc_tablep,
+ sizeof(cpu_desc_table64_t));
+ if (cdp->cpu_int_stack_top)
+ kfree((void *) (cdp->cpu_int_stack_top - INTSTACK_SIZE),
+ INTSTACK_SIZE);
+ kfree((void *) cdp, sizeof(*cdp));
+ }
+ return NULL;
+}
+
+boolean_t
+valid_user_data_selector(uint16_t selector)
+{
+ sel_t sel = selector_to_sel(selector);
+
+ if (selector == 0)
+ return (TRUE);
+
+ if (sel.ti == SEL_LDT)
+ return (TRUE);
+ else if (sel.index < GDTSZ) {
+ if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U)
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+boolean_t
+valid_user_code_selector(uint16_t selector)
+{
+ sel_t sel = selector_to_sel(selector);
+
+ if (selector == 0)
+ return (FALSE);
+
+ if (sel.ti == SEL_LDT) {
+ if (sel.rpl == USER_PRIV)
+ return (TRUE);
+ }
+ else if (sel.index < GDTSZ && sel.rpl == USER_PRIV) {
+ if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U)
+ return (TRUE);
+ /* Explicitly validate the system code selectors
+ * even if not instantaneously privileged,
+ * since they are dynamically re-privileged
+ * at context switch
+ */
+ if ((selector == USER_CS) || (selector == USER64_CS))
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+boolean_t
+valid_user_stack_selector(uint16_t selector)
+{
+ sel_t sel = selector_to_sel(selector);
+
+ if (selector == 0)
+ return (FALSE);
+
+ if (sel.ti == SEL_LDT) {
+ if (sel.rpl == USER_PRIV)
+ return (TRUE);
+ }
+ else if (sel.index < GDTSZ && sel.rpl == USER_PRIV) {
+ if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U)
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+boolean_t
+valid_user_segment_selectors(uint16_t cs,
+ uint16_t ss,
+ uint16_t ds,
+ uint16_t es,
+ uint16_t fs,
+ uint16_t gs)
+{
+ return valid_user_code_selector(cs) &&
+ valid_user_stack_selector(ss) &&
+ valid_user_data_selector(ds) &&
+ valid_user_data_selector(es) &&
+ valid_user_data_selector(fs) &&
+ valid_user_data_selector(gs);
+}
+
+#if NCOPY_WINDOWS > 0
+
+static vm_offset_t user_window_base = 0;
+
+void
+cpu_userwindow_init(int cpu)
+{
+ cpu_data_t *cdp = cpu_data_ptr[cpu];
+ vm_offset_t user_window;
+ vm_offset_t vaddr;
+ int num_cpus;
+
+ num_cpus = ml_get_max_cpus();
+
+ if (cpu >= num_cpus)
+ panic("cpu_userwindow_init: cpu > num_cpus");
+
+ if (user_window_base == 0) {
+
+ if (vm_allocate(kernel_map, &vaddr,
+ (NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE,
+ VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_CPU)) != KERN_SUCCESS)
+ panic("cpu_userwindow_init: "
+ "couldn't allocate user map window");
+
+ /*
+ * window must start on a page table boundary
+ * in the virtual address space
+ */
+ user_window_base = (vaddr + (NBPDE - 1)) & ~(NBPDE - 1);
+
+ /*
+ * get rid of any allocation leading up to our
+ * starting boundary
+ */
+ vm_deallocate(kernel_map, vaddr, user_window_base - vaddr);
+
+ /*
+ * get rid of tail that we don't need
+ */
+ user_window = user_window_base +
+ (NBPDE * NCOPY_WINDOWS * num_cpus);
+
+ vm_deallocate(kernel_map, user_window,
+ (vaddr +
+ ((NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE)) -
+ user_window);
+ }
+
+ user_window = user_window_base + (cpu * NCOPY_WINDOWS * NBPDE);
+
+ cdp->cpu_copywindow_base = user_window;