X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/7e4a7d3939db04e70062ae6c7bf24b8c8b2f5a7c..3e170ce000f1506b7b5d2c5c7faec85ceabb573d:/osfmk/i386/mp_desc.c?ds=sidebyside diff --git a/osfmk/i386/mp_desc.c b/osfmk/i386/mp_desc.c index 084038e4b..a3dbca3b1 100644 --- a/osfmk/i386/mp_desc.c +++ b/osfmk/i386/mp_desc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2009 Apple Inc. All rights reserved. + * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -62,27 +62,25 @@ #include #include #include -#include #include #include #include #include -#include +#include #include #include #include #include +#if defined(__i386__) || defined(__x86_64__) +#include +#endif /* i386 */ #if CONFIG_MCA #include #endif #include -#include - - -#ifdef __x86_64__ #define K_INTR_GATE (ACC_P|ACC_PL_K|ACC_INTR_GATE) #define U_INTR_GATE (ACC_P|ACC_PL_U|ACC_INTR_GATE) @@ -90,7 +88,8 @@ #define TRAP(n, name) extern void *name ; #define TRAP_ERR(n, name) extern void *name ; #define TRAP_SPC(n, name) extern void *name ; -#define TRAP_IST(n, name) extern void *name ; +#define TRAP_IST1(n, name) extern void *name ; +#define TRAP_IST2(n, name) extern void *name ; #define INTERRUPT(n) extern void *_intr_ ## n ; #define USER_TRAP(n, name) extern void *name ; #define USER_TRAP_SPC(n, name) extern void *name ; @@ -102,13 +101,14 @@ #undef TRAP #undef TRAP_ERR #undef TRAP_SPC -#undef TRAP_IST +#undef TRAP_IST1 +#undef TRAP_IST2 #undef INTERRUPT #undef USER_TRAP #undef USER_TRAP_SPC #define TRAP(n, name) \ - [n] { \ + [n] = { \ (uintptr_t)&name, \ KERNEL64_CS, \ 0, \ @@ -119,8 +119,8 @@ #define TRAP_ERR TRAP #define TRAP_SPC TRAP -#define TRAP_IST(n, name) \ - [n] { \ +#define TRAP_IST1(n, name) \ + [n] = { \ (uintptr_t)&name, \ KERNEL64_CS, \ 1, \ @@ -128,8 +128,17 @@ 0 \ }, +#define TRAP_IST2(n, name) \ + [n] = { \ + (uintptr_t)&name, \ + KERNEL64_CS, \ + 2, \ + K_INTR_GATE, \ + 0 \ + }, + #define INTERRUPT(n) \ - [n] { \ + [n] = { \ (uintptr_t)&_intr_ ## n,\ KERNEL64_CS, \ 0, \ @@ -138,7 +147,7 @@ }, #define USER_TRAP(n, name) \ - [n] { \ + [n] = { \ (uintptr_t)&name, \ KERNEL64_CS, \ 0, \ @@ -147,24 +156,18 @@ }, #define USER_TRAP_SPC USER_TRAP - // Declare the table using the macros we just set up -struct fake_descriptor64 master_idt64[IDTSZ] __attribute__ ((aligned (4096))) = { +struct fake_descriptor64 master_idt64[IDTSZ] + __attribute__ ((section("__HIB,__desc"))) + __attribute__ ((aligned(PAGE_SIZE))) = { #include "../x86_64/idt_table.h" }; -#endif - -/* - * The i386 needs an interrupt stack to keep the PCB stack from being - * overrun by interrupts. All interrupt stacks MUST lie at lower addresses - * than any thread`s kernel stack. - */ /* * First cpu`s interrupt stack. */ -extern uint32_t low_intstack[]; /* bottom */ +extern uint32_t low_intstack[]; /* bottom */ extern uint32_t low_eintstack[]; /* top */ /* @@ -172,44 +175,20 @@ extern uint32_t low_eintstack[]; /* top */ * The master cpu (cpu 0) has its data area statically allocated; * others are allocated dynamically and this array is updated at runtime. */ -cpu_data_t cpu_data_master = { +static cpu_data_t cpu_data_master = { .cpu_this = &cpu_data_master, - .cpu_nanotime = &rtc_nanotime_info, + .cpu_nanotime = &pal_rtc_nanotime_info, .cpu_int_stack_top = (vm_offset_t) low_eintstack, -#ifdef __i386__ - .cpu_is64bit = FALSE, -#else - .cpu_is64bit = TRUE -#endif }; -cpu_data_t *cpu_data_ptr[MAX_CPUS] = { [0] &cpu_data_master }; +cpu_data_t *cpu_data_ptr[MAX_CPUS] = { [0] = &cpu_data_master }; decl_simple_lock_data(,ncpus_lock); /* protects real_ncpus */ unsigned int real_ncpus = 1; unsigned int max_ncpus = MAX_CPUS; -#ifdef __i386__ -extern void *hi_remap_text; -#define HI_TEXT(lo_text) \ - (((uint32_t)&lo_text - (uint32_t)&hi_remap_text) + HIGH_MEM_BASE) - -extern void hi_sysenter(void); - -typedef struct { - uint16_t length; - uint32_t offset[2]; -} __attribute__((__packed__)) table_descriptor64_t; - -extern table_descriptor64_t gdtptr64; -extern table_descriptor64_t idtptr64; -#endif extern void hi64_sysenter(void); extern void hi64_syscall(void); -#if defined(__x86_64__) && !defined(UBER64) -#define UBER64(x) ((uintptr_t)x) -#endif - /* * Multiprocessor i386/i486 systems use a separate copy of the * GDT, IDT, LDT, and kernel TSS per processor. The first three @@ -244,12 +223,14 @@ struct fake_descriptor cpudata_desc_pattern = { ACC_P|ACC_PL_K|ACC_DATA_W }; +#if NCOPY_WINDOWS > 0 struct fake_descriptor userwindow_desc_pattern = { (unsigned int) 0, ((NBPDE * NCOPY_WINDOWS) / PAGE_SIZE) - 1, SZ_32 | SZ_G, ACC_P|ACC_PL_U|ACC_DATA_W }; +#endif struct fake_descriptor physwindow_desc_pattern = { (unsigned int) 0, @@ -383,21 +364,21 @@ fix_desc64(void *descp, int count) case ACC_CALL_GATE: case ACC_INTR_GATE: case ACC_TRAP_GATE: - real.gate.offset_low16 = fakep->offset64 & 0xFFFF; + real.gate.offset_low16 = (uint16_t)(fakep->offset64 & 0xFFFF); real.gate.selector16 = fakep->lim_or_seg & 0xFFFF; real.gate.IST = fakep->size_or_IST & 0x7; real.gate.access8 = fakep->access; - real.gate.offset_high16 = (fakep->offset64>>16)&0xFFFF; + real.gate.offset_high16 = (uint16_t)((fakep->offset64>>16) & 0xFFFF); real.gate.offset_top32 = (uint32_t)(fakep->offset64>>32); break; default: /* Otherwise */ real.desc.limit_low16 = fakep->lim_or_seg & 0xFFFF; - real.desc.base_low16 = fakep->offset64 & 0xFFFF; - real.desc.base_med8 = (fakep->offset64 >> 16) & 0xFF; + real.desc.base_low16 = (uint16_t)(fakep->offset64 & 0xFFFF); + real.desc.base_med8 = (uint8_t)((fakep->offset64 >> 16) & 0xFF); real.desc.access8 = fakep->access; real.desc.limit_high4 = (fakep->lim_or_seg >> 16) & 0xFF; real.desc.granularity4 = fakep->size_or_IST; - real.desc.base_high8 = (fakep->offset64 >> 24) & 0xFF; + real.desc.base_high8 = (uint8_t)((fakep->offset64 >> 24) & 0xFF); real.desc.base_top32 = (uint32_t)(fakep->offset64>>32); } @@ -408,154 +389,26 @@ fix_desc64(void *descp, int count) } } -#ifdef __i386__ -void -cpu_desc_init(cpu_data_t *cdp) +static void +cpu_gdt_alias(vm_map_offset_t gdt, vm_map_offset_t alias) { - cpu_desc_index_t *cdi = &cdp->cpu_desc_index; - - if (cdp == &cpu_data_master) { - /* - * Fix up the entries in the GDT to point to - * this LDT and this TSS. - */ - struct fake_descriptor temp_fake_desc; - temp_fake_desc = ldt_desc_pattern; - temp_fake_desc.offset = (vm_offset_t) &master_ldt; - fix_desc(&temp_fake_desc, 1); - *(struct fake_descriptor *) &master_gdt[sel_idx(KERNEL_LDT)] = - temp_fake_desc; - *(struct fake_descriptor *) &master_gdt[sel_idx(USER_LDT)] = - temp_fake_desc; - - temp_fake_desc = tss_desc_pattern; - temp_fake_desc.offset = (vm_offset_t) &master_ktss; - fix_desc(&temp_fake_desc, 1); - *(struct fake_descriptor *) &master_gdt[sel_idx(KERNEL_TSS)] = - temp_fake_desc; - -#if MACH_KDB - temp_fake_desc = tss_desc_pattern; - temp_fake_desc.offset = (vm_offset_t) &master_dbtss; - fix_desc(&temp_fake_desc, 1); - *(struct fake_descriptor *) &master_gdt[sel_idx(DEBUG_TSS)] = - temp_fake_desc; -#endif - - temp_fake_desc = cpudata_desc_pattern; - temp_fake_desc.offset = (vm_offset_t) &cpu_data_master; - fix_desc(&temp_fake_desc, 1); - *(struct fake_descriptor *) &master_gdt[sel_idx(CPU_DATA_GS)] = - temp_fake_desc; - - fix_desc((void *)&master_idt, IDTSZ); - - cdi->cdi_idt.ptr = master_idt; - cdi->cdi_gdt.ptr = (void *)master_gdt; - - - /* - * Master CPU uses the tables built at boot time. - * Just set the index pointers to the high shared-mapping space. - * Note that the sysenter stack uses empty space above the ktss - * in the HIGH_FIXED_KTSS page. In this case we don't map the - * the real master_sstk in low memory. - */ - cdi->cdi_ktss = (struct i386_tss *) - pmap_index_to_virt(HIGH_FIXED_KTSS) ; - cdi->cdi_sstk = (vm_offset_t) (cdi->cdi_ktss + 1) + - (vm_offset_t) &master_sstk.top - - (vm_offset_t) &master_sstk; - } else { - cpu_desc_table_t *cdt = (cpu_desc_table_t *) cdp->cpu_desc_tablep; - - vm_offset_t cpu_hi_desc; - - cpu_hi_desc = pmap_cpu_high_shared_remap( - cdp->cpu_number, - HIGH_CPU_DESC, - (vm_offset_t) cdt, 1); - - /* - * Per-cpu GDT, IDT, LDT, KTSS descriptors are allocated in one - * block (cpu_desc_table) and double-mapped into high shared space - * in one page window. - * Also, a transient stack for the fast sysenter path. The top of - * which is set at context switch time to point to the PCB using - * the high address. - */ - cdi->cdi_gdt.ptr = (struct fake_descriptor *) (cpu_hi_desc + - offsetof(cpu_desc_table_t, gdt[0])); - cdi->cdi_idt.ptr = (struct fake_descriptor *) (cpu_hi_desc + - offsetof(cpu_desc_table_t, idt[0])); - cdi->cdi_ktss = (struct i386_tss *) (cpu_hi_desc + - offsetof(cpu_desc_table_t, ktss)); - cdi->cdi_sstk = cpu_hi_desc + offsetof(cpu_desc_table_t, sstk.top); - - /* - * LDT descriptors are mapped into a seperate area. - */ - cdi->cdi_ldt = (struct fake_descriptor *) - pmap_cpu_high_shared_remap( - cdp->cpu_number, - HIGH_CPU_LDT_BEGIN, - (vm_offset_t) cdp->cpu_ldtp, - HIGH_CPU_LDT_END - HIGH_CPU_LDT_BEGIN + 1); - - /* - * Copy the tables - */ - bcopy((char *)master_idt, (char *)cdt->idt, sizeof(master_idt)); - bcopy((char *)master_gdt, (char *)cdt->gdt, sizeof(master_gdt)); - bcopy((char *)master_ldt, (char *)cdp->cpu_ldtp, sizeof(master_ldt)); - bzero((char *)&cdt->ktss, sizeof(struct i386_tss)); -#if MACH_KDB - cdi->cdi_dbtss = (struct i386_tss *) (cpu_hi_desc + - offsetof(cpu_desc_table_t, dbtss)); - bcopy((char *)&master_dbtss, - (char *)&cdt->dbtss, - sizeof(struct i386_tss)); -#endif /* MACH_KDB */ - - /* - * Fix up the entries in the GDT to point to - * this LDT and this TSS. - */ - struct fake_descriptor temp_ldt = ldt_desc_pattern; - temp_ldt.offset = (vm_offset_t)cdi->cdi_ldt; - fix_desc(&temp_ldt, 1); - - cdt->gdt[sel_idx(KERNEL_LDT)] = temp_ldt; - cdt->gdt[sel_idx(USER_LDT)] = temp_ldt; - - cdt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern; - cdt->gdt[sel_idx(KERNEL_TSS)].offset = (vm_offset_t) cdi->cdi_ktss; - fix_desc(&cdt->gdt[sel_idx(KERNEL_TSS)], 1); + pt_entry_t *pte = NULL; - cdt->gdt[sel_idx(CPU_DATA_GS)] = cpudata_desc_pattern; - cdt->gdt[sel_idx(CPU_DATA_GS)].offset = (vm_offset_t) cdp; - fix_desc(&cdt->gdt[sel_idx(CPU_DATA_GS)], 1); + /* Require page alignment */ + assert(page_aligned(gdt)); + assert(page_aligned(alias)); -#if MACH_KDB - cdt->gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern; - cdt->gdt[sel_idx(DEBUG_TSS)].offset = (vm_offset_t) cdi->cdi_dbtss; - fix_desc(&cdt->gdt[sel_idx(DEBUG_TSS)], 1); + pte = pmap_pte(kernel_pmap, alias); + pmap_store_pte(pte, kvtophys(gdt) | INTEL_PTE_REF + | INTEL_PTE_MOD + | INTEL_PTE_WIRED + | INTEL_PTE_VALID + | INTEL_PTE_WRITE + | INTEL_PTE_NX); - cdt->dbtss.esp0 = (int)(db_task_stack_store + - (INTSTACK_SIZE * (cdp->cpu_number)) - sizeof (natural_t)); - cdt->dbtss.esp = cdt->dbtss.esp0; - cdt->dbtss.eip = (int)&db_task_start; -#endif /* MACH_KDB */ - - cdt->ktss.ss0 = KERNEL_DS; - cdt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */ - - cpu_userwindow_init(cdp->cpu_number); - cpu_physwindow_init(cdp->cpu_number); - - } + /* TLB flush unneccessry because target processor isn't running yet */ } -#endif /* __i386__ */ + void cpu_desc_init64(cpu_data_t *cdp) @@ -569,18 +422,17 @@ cpu_desc_init64(cpu_data_t *cdp) */ cdi->cdi_ktss = (void *)&master_ktss64; cdi->cdi_sstk = (vm_offset_t) &master_sstk.top; - cdi->cdi_gdt.ptr = (void *)master_gdt; - cdi->cdi_idt.ptr = (void *)master_idt64; + cdi->cdi_gdt.ptr = (void *)MASTER_GDT_ALIAS; + cdi->cdi_idt.ptr = (void *)MASTER_IDT_ALIAS; cdi->cdi_ldt = (struct fake_descriptor *) master_ldt; - /* Replace the expanded LDTs and TSS slots in the GDT */ - kernel_ldt_desc64.offset64 = UBER64(&master_ldt); + kernel_ldt_desc64.offset64 = (uintptr_t) &master_ldt; *(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_LDT)] = kernel_ldt_desc64; *(struct fake_descriptor64 *) &master_gdt[sel_idx(USER_LDT)] = kernel_ldt_desc64; - kernel_tss_desc64.offset64 = UBER64(&master_ktss64); + kernel_tss_desc64.offset64 = (uintptr_t) &master_ktss64; *(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_TSS)] = kernel_tss_desc64; @@ -591,27 +443,35 @@ cpu_desc_init64(cpu_data_t *cdp) fix_desc64((void *) &master_gdt[sel_idx(KERNEL_TSS)], 1); /* - * Set the double-fault stack as IST1 in the 64-bit TSS + * Set the NMI/fault stacks as IST2/IST1 in the 64-bit TSS + * Note: this will be dynamically re-allocated in VM later. */ - master_ktss64.ist1 = UBER64((uintptr_t) df_task_stack_end); + master_ktss64.ist2 = (uintptr_t) low_eintstack; + master_ktss64.ist1 = (uintptr_t) low_eintstack + - sizeof(x86_64_intr_stack_frame_t); - } else { + } else if (cdi->cdi_ktss == NULL) { /* Skipping re-init on wake */ cpu_desc_table64_t *cdt = (cpu_desc_table64_t *) cdp->cpu_desc_tablep; + /* * Per-cpu GDT, IDT, KTSS descriptors are allocated in kernel * heap (cpu_desc_table). * LDT descriptors are mapped into a separate area. + * GDT descriptors are addressed by alias to avoid sgdt leaks to user-space. */ - cdi->cdi_gdt.ptr = (struct fake_descriptor *)cdt->gdt; - cdi->cdi_idt.ptr = (void *)cdt->idt; + cdi->cdi_idt.ptr = (void *)MASTER_IDT_ALIAS; + cdi->cdi_gdt.ptr = (void *)CPU_GDT_ALIAS(cdp->cpu_number); cdi->cdi_ktss = (void *)&cdt->ktss; cdi->cdi_sstk = (vm_offset_t)&cdt->sstk.top; cdi->cdi_ldt = cdp->cpu_ldtp; + /* Make the virtual alias address for the GDT */ + cpu_gdt_alias((vm_map_offset_t) &cdt->gdt, + (vm_map_offset_t) cdi->cdi_gdt.ptr); + /* * Copy the tables */ - bcopy((char *)master_idt64, (char *)cdt->idt, sizeof(master_idt64)); bcopy((char *)master_gdt, (char *)cdt->gdt, sizeof(master_gdt)); bcopy((char *)master_ldt, (char *)cdp->cpu_ldtp, sizeof(master_ldt)); bcopy((char *)&master_ktss64, (char *)&cdt->ktss, sizeof(struct x86_64_tss)); @@ -620,32 +480,26 @@ cpu_desc_init64(cpu_data_t *cdp) * Fix up the entries in the GDT to point to * this LDT and this TSS. */ - kernel_ldt_desc64.offset64 = UBER64(cdi->cdi_ldt); + kernel_ldt_desc64.offset64 = (uintptr_t) cdi->cdi_ldt; *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_LDT)] = kernel_ldt_desc64; fix_desc64(&cdt->gdt[sel_idx(KERNEL_LDT)], 1); - kernel_ldt_desc64.offset64 = UBER64(cdi->cdi_ldt); + kernel_ldt_desc64.offset64 = (uintptr_t) cdi->cdi_ldt; *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(USER_LDT)] = kernel_ldt_desc64; fix_desc64(&cdt->gdt[sel_idx(USER_LDT)], 1); - kernel_tss_desc64.offset64 = UBER64(cdi->cdi_ktss); + kernel_tss_desc64.offset64 = (uintptr_t) cdi->cdi_ktss; *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_TSS)] = kernel_tss_desc64; fix_desc64(&cdt->gdt[sel_idx(KERNEL_TSS)], 1); - /* Set double-fault stack as IST1 */ - cdt->ktss.ist1 = UBER64((unsigned long)cdt->dfstk + sizeof(cdt->dfstk)); -#ifdef __i386__ - cdt->gdt[sel_idx(CPU_DATA_GS)] = cpudata_desc_pattern; - cdt->gdt[sel_idx(CPU_DATA_GS)].offset = (vm_offset_t) cdp; - fix_desc(&cdt->gdt[sel_idx(CPU_DATA_GS)], 1); - - /* Allocate copyio windows */ - cpu_userwindow_init(cdp->cpu_number); - cpu_physwindow_init(cdp->cpu_number); -#endif + /* Set (zeroed) fault stack as IST1, NMI intr stack IST2 */ + bzero((void *) cdt->fstk, sizeof(cdt->fstk)); + cdt->ktss.ist2 = (unsigned long)cdt->fstk + sizeof(cdt->fstk); + cdt->ktss.ist1 = cdt->ktss.ist2 + - sizeof(x86_64_intr_stack_frame_t); } /* Require that the top of the sysenter stack is 16-byte aligned */ @@ -653,84 +507,36 @@ cpu_desc_init64(cpu_data_t *cdp) panic("cpu_desc_init64() sysenter stack not 16-byte aligned"); } -#ifdef __i386__ -void -cpu_desc_load(cpu_data_t *cdp) -{ - cpu_desc_index_t *cdi = &cdp->cpu_desc_index; - - cdi->cdi_idt.size = 0x1000 + cdp->cpu_number; - cdi->cdi_gdt.size = sizeof(struct real_descriptor)*GDTSZ - 1; - - lgdt((unsigned long *) &cdi->cdi_gdt); - lidt((unsigned long *) &cdi->cdi_idt); - lldt(KERNEL_LDT); - - set_tr(KERNEL_TSS); - - __asm__ volatile("mov %0, %%gs" : : "rm" ((unsigned short)(CPU_DATA_GS))); -} -#endif /* __i386__ */ void cpu_desc_load64(cpu_data_t *cdp) { cpu_desc_index_t *cdi = &cdp->cpu_desc_index; -#ifdef __i386__ + /* Stuff the kernel per-cpu data area address into the MSRs */ + wrmsr64(MSR_IA32_GS_BASE, (uintptr_t) cdp); + wrmsr64(MSR_IA32_KERNEL_GS_BASE, (uintptr_t) cdp); + /* - * Load up the new descriptors etc - * ml_load_desc64() expects these global pseudo-descriptors: - * gdtptr64 -> per-cpu gdt - * idtptr64 -> per-cpu idt - * These are 10-byte descriptors with 64-bit addresses into - * uber-space. - * - * Refer to commpage/cpu_number.s for the IDT limit trick. + * Ensure the TSS segment's busy bit is clear. This is required + * for the case of reloading descriptors at wake to avoid + * their complete re-initialization. */ - gdtptr64.length = GDTSZ * sizeof(struct real_descriptor) - 1; - gdtptr64.offset[0] = (uint32_t) cdi->cdi_gdt.ptr; - gdtptr64.offset[1] = KERNEL_UBER_BASE_HI32; - idtptr64.length = 0x1000 + cdp->cpu_number; - idtptr64.offset[0] = (uint32_t) cdi->cdi_idt.ptr; - idtptr64.offset[1] = KERNEL_UBER_BASE_HI32; - - /* Make sure busy bit is cleared in the TSS */ gdt_desc_p(KERNEL_TSS)->access &= ~ACC_TSS_BUSY; - ml_load_desc64(); -#else /* Load the GDT, LDT, IDT and TSS */ cdi->cdi_gdt.size = sizeof(struct real_descriptor)*GDTSZ - 1; cdi->cdi_idt.size = 0x1000 + cdp->cpu_number; - lgdt((unsigned long *) &cdi->cdi_gdt); - lidt((unsigned long *) &cdi->cdi_idt); + lgdt((uintptr_t *) &cdi->cdi_gdt); + lidt((uintptr_t *) &cdi->cdi_idt); lldt(KERNEL_LDT); set_tr(KERNEL_TSS); - /* Stuff the pre-cpu data area into the MSR and swapgs to activate */ - wrmsr64(MSR_IA32_KERNEL_GS_BASE, (unsigned long)cdp); #if GPROF // Hack to enable mcount to work on K64 __asm__ volatile("mov %0, %%gs" : : "rm" ((unsigned short)(KERNEL_DS))); -#endif - swapgs(); - - cpu_mode_init(cdp); #endif } -#ifdef __i386__ -/* - * Set MSRs for sysenter/sysexit for 32-bit. - */ -static void -fast_syscall_init(__unused cpu_data_t *cdp) -{ - wrmsr(MSR_IA32_SYSENTER_CS, SYSENTER_CS, 0); - wrmsr(MSR_IA32_SYSENTER_EIP, HI_TEXT(hi_sysenter), 0); - wrmsr(MSR_IA32_SYSENTER_ESP, current_sstk(), 0); -} -#endif /* * Set MSRs for sysenter/sysexit and syscall/sysret for 64-bit. @@ -739,8 +545,8 @@ static void fast_syscall_init64(__unused cpu_data_t *cdp) { wrmsr64(MSR_IA32_SYSENTER_CS, SYSENTER_CS); - wrmsr64(MSR_IA32_SYSENTER_EIP, UBER64((uintptr_t) hi64_sysenter)); - wrmsr64(MSR_IA32_SYSENTER_ESP, UBER64(current_sstk())); + wrmsr64(MSR_IA32_SYSENTER_EIP, (uintptr_t) hi64_sysenter); + wrmsr64(MSR_IA32_SYSENTER_ESP, current_sstk()); /* Enable syscall/sysret */ wrmsr64(MSR_IA32_EFER, rdmsr64(MSR_IA32_EFER) | MSR_IA32_EFER_SCE); @@ -749,7 +555,7 @@ fast_syscall_init64(__unused cpu_data_t *cdp) * Note USER_CS because sysret uses this + 16 when returning to * 64-bit code. */ - wrmsr64(MSR_IA32_LSTAR, UBER64((uintptr_t) hi64_syscall)); + wrmsr64(MSR_IA32_LSTAR, (uintptr_t) hi64_syscall); wrmsr64(MSR_IA32_STAR, (((uint64_t)USER_CS) << 48) | (((uint64_t)KERNEL64_CS) << 32)); /* @@ -761,20 +567,9 @@ fast_syscall_init64(__unused cpu_data_t *cdp) */ wrmsr64(MSR_IA32_FMASK, EFL_DF|EFL_IF|EFL_TF|EFL_NT); -#ifdef __i386__ - /* - * Set the Kernel GS base MSR to point to per-cpu data in uber-space. - * The uber-space handler (hi64_syscall) uses the swapgs instruction. - */ - wrmsr64(MSR_IA32_KERNEL_GS_BASE, UBER64(cdp)); - -#if ONLY_SAFE_FOR_LINDA_SERIAL - kprintf("fast_syscall_init64() KERNEL_GS_BASE=0x%016llx\n", - rdmsr64(MSR_IA32_KERNEL_GS_BASE)); -#endif -#endif } + cpu_data_t * cpu_data_alloc(boolean_t is_boot_cpu) { @@ -783,15 +578,13 @@ cpu_data_alloc(boolean_t is_boot_cpu) if (is_boot_cpu) { assert(real_ncpus == 1); - cdp = &cpu_data_master; + cdp = cpu_datap(0); if (cdp->cpu_processor == NULL) { simple_lock_init(&ncpus_lock, 0); cdp->cpu_processor = cpu_processor_alloc(TRUE); #if NCOPY_WINDOWS > 0 cdp->cpu_pmap = pmap_cpu_alloc(TRUE); #endif - queue_init(&cdp->rtclock_timer.queue); - cdp->rtclock_timer.deadline = EndOfAllTime; } return cdp; } @@ -799,7 +592,7 @@ cpu_data_alloc(boolean_t is_boot_cpu) /* * Allocate per-cpu data: */ - ret = kmem_alloc(kernel_map, (vm_offset_t *) &cdp, sizeof(cpu_data_t)); + ret = kmem_alloc(kernel_map, (vm_offset_t *) &cdp, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU); if (ret != KERN_SUCCESS) { printf("cpu_data_alloc() failed, ret=%d\n", ret); goto abort; @@ -807,15 +600,12 @@ cpu_data_alloc(boolean_t is_boot_cpu) bzero((void*) cdp, sizeof(cpu_data_t)); cdp->cpu_this = cdp; - /* Propagate mode */ - cdp->cpu_is64bit = cpu_mode_is64bit(); - /* * Allocate interrupt stack: */ ret = kmem_alloc(kernel_map, (vm_offset_t *) &cdp->cpu_int_stack_top, - INTSTACK_SIZE); + INTSTACK_SIZE, VM_KERN_MEMORY_CPU); if (ret != KERN_SUCCESS) { printf("cpu_data_alloc() int stack failed, ret=%d\n", ret); goto abort; @@ -823,15 +613,13 @@ cpu_data_alloc(boolean_t is_boot_cpu) bzero((void*) cdp->cpu_int_stack_top, INTSTACK_SIZE); cdp->cpu_int_stack_top += INTSTACK_SIZE; - /* * Allocate descriptor table: - * Size depends on cpu mode. */ ret = kmem_alloc(kernel_map, (vm_offset_t *) &cdp->cpu_desc_tablep, - cdp->cpu_is64bit ? sizeof(cpu_desc_table64_t) - : sizeof(cpu_desc_table_t)); + sizeof(cpu_desc_table64_t), + VM_KERN_MEMORY_CPU); if (ret != KERN_SUCCESS) { printf("cpu_data_alloc() desc_table failed, ret=%d\n", ret); goto abort; @@ -842,7 +630,8 @@ cpu_data_alloc(boolean_t is_boot_cpu) */ ret = kmem_alloc(kernel_map, (vm_offset_t *) &cdp->cpu_ldtp, - sizeof(struct real_descriptor) * LDTSZ); + sizeof(struct real_descriptor) * LDTSZ, + VM_KERN_MEMORY_CPU); if (ret != KERN_SUCCESS) { printf("cpu_data_alloc() ldt failed, ret=%d\n", ret); goto abort; @@ -860,9 +649,17 @@ cpu_data_alloc(boolean_t is_boot_cpu) real_ncpus++; simple_unlock(&ncpus_lock); - cdp->cpu_nanotime = &rtc_nanotime_info; - queue_init(&cdp->rtclock_timer.queue); - cdp->rtclock_timer.deadline = EndOfAllTime; + /* + * Before this cpu has been assigned a real thread context, + * we give it a fake, unique, non-zero thread id which the locking + * primitives use as their lock value. + * Note that this does not apply to the boot processor, cpu 0, which + * transitions to a thread context well before other processors are + * started. + */ + cdp->cpu_active_thread = (thread_t) (uintptr_t) cdp->cpu_number; + + cdp->cpu_nanotime = &pal_rtc_nanotime_info; kprintf("cpu_data_alloc(%d) %p desc_table: %p " "ldt: %p " @@ -876,7 +673,7 @@ abort: if (cdp) { if (cdp->cpu_desc_tablep) kfree((void *) cdp->cpu_desc_tablep, - sizeof(*cdp->cpu_desc_tablep)); + sizeof(cpu_desc_table64_t)); if (cdp->cpu_int_stack_top) kfree((void *) (cdp->cpu_int_stack_top - INTSTACK_SIZE), INTSTACK_SIZE); @@ -885,6 +682,64 @@ abort: return NULL; } +boolean_t +valid_user_data_selector(uint16_t selector) +{ + sel_t sel = selector_to_sel(selector); + + if (selector == 0) + return (TRUE); + + if (sel.ti == SEL_LDT) + return (TRUE); + else if (sel.index < GDTSZ) { + if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U) + return (TRUE); + } + + return (FALSE); +} + +boolean_t +valid_user_code_selector(uint16_t selector) +{ + sel_t sel = selector_to_sel(selector); + + if (selector == 0) + return (FALSE); + + if (sel.ti == SEL_LDT) { + if (sel.rpl == USER_PRIV) + return (TRUE); + } + else if (sel.index < GDTSZ && sel.rpl == USER_PRIV) { + if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U) + return (TRUE); + } + + return (FALSE); +} + +boolean_t +valid_user_stack_selector(uint16_t selector) +{ + sel_t sel = selector_to_sel(selector); + + if (selector == 0) + return (FALSE); + + if (sel.ti == SEL_LDT) { + if (sel.rpl == USER_PRIV) + return (TRUE); + } + else if (sel.index < GDTSZ && sel.rpl == USER_PRIV) { + if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U) + return (TRUE); + } + + return (FALSE); +} + boolean_t valid_user_segment_selectors(uint16_t cs, uint16_t ss, @@ -901,7 +756,6 @@ valid_user_segment_selectors(uint16_t cs, valid_user_data_selector(gs); } - #if NCOPY_WINDOWS > 0 static vm_offset_t user_window_base = 0; @@ -923,7 +777,7 @@ cpu_userwindow_init(int cpu) if (vm_allocate(kernel_map, &vaddr, (NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE, - VM_FLAGS_ANYWHERE) != KERN_SUCCESS) + VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_CPU)) != KERN_SUCCESS) panic("cpu_userwindow_init: " "couldn't allocate user map window"); @@ -954,15 +808,12 @@ cpu_userwindow_init(int cpu) user_window = user_window_base + (cpu * NCOPY_WINDOWS * NBPDE); cdp->cpu_copywindow_base = user_window; + /* + * Abuse this pdp entry, the pdp now actually points to + * an array of copy windows addresses. + */ cdp->cpu_copywindow_pdp = pmap_pde(kernel_pmap, user_window); -#ifdef __i386__ - cpu_desc_index_t *cdi = &cdp->cpu_desc_index; - cdi->cdi_gdt.ptr[sel_idx(USER_WINDOW_SEL)] = userwindow_desc_pattern; - cdi->cdi_gdt.ptr[sel_idx(USER_WINDOW_SEL)].offset = user_window; - - fix_desc(&cdi->cdi_gdt.ptr[sel_idx(USER_WINDOW_SEL)], 1); -#endif /* __i386__ */ } void @@ -973,7 +824,7 @@ cpu_physwindow_init(int cpu) if (phys_window == 0) { if (vm_allocate(kernel_map, &phys_window, - PAGE_SIZE, VM_FLAGS_ANYWHERE) + PAGE_SIZE, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_CPU)) != KERN_SUCCESS) panic("cpu_physwindow_init: " "couldn't allocate phys map window"); @@ -983,18 +834,11 @@ cpu_physwindow_init(int cpu) * pte pointer we're interested in actually * exists in the page table */ - pmap_expand(kernel_pmap, phys_window); + pmap_expand(kernel_pmap, phys_window, PMAP_EXPAND_OPTIONS_NONE); cdp->cpu_physwindow_base = phys_window; cdp->cpu_physwindow_ptep = vtopte(phys_window); } -#ifdef __i386__ - cpu_desc_index_t *cdi = &cdp->cpu_desc_index; - cdi->cdi_gdt.ptr[sel_idx(PHYS_WINDOW_SEL)] = physwindow_desc_pattern; - cdi->cdi_gdt.ptr[sel_idx(PHYS_WINDOW_SEL)].offset = phys_window; - - fix_desc(&cdi->cdi_gdt.ptr[sel_idx(PHYS_WINDOW_SEL)], 1); -#endif /* __i386__ */ } #endif /* NCOPY_WINDOWS > 0 */ @@ -1004,19 +848,63 @@ cpu_physwindow_init(int cpu) void cpu_mode_init(cpu_data_t *cdp) { -#ifdef __i386__ - if (cpu_mode_is64bit()) { - cpu_IA32e_enable(cdp); - cpu_desc_load64(cdp); - fast_syscall_init64(cdp); - } else { - fast_syscall_init(cdp); - } -#else fast_syscall_init64(cdp); -#endif - - /* Call for per-cpu pmap mode initialization */ - pmap_cpu_init(); } +/* + * Allocate a new interrupt stack for the boot processor from the + * heap rather than continue to use the statically allocated space. + * Also switch to a dynamically allocated cpu data area. + */ +void +cpu_data_realloc(void) +{ + int ret; + vm_offset_t istk; + vm_offset_t fstk; + cpu_data_t *cdp; + boolean_t istate; + + ret = kmem_alloc(kernel_map, &istk, INTSTACK_SIZE, VM_KERN_MEMORY_CPU); + if (ret != KERN_SUCCESS) { + panic("cpu_data_realloc() stack alloc, ret=%d\n", ret); + } + bzero((void*) istk, INTSTACK_SIZE); + istk += INTSTACK_SIZE; + + ret = kmem_alloc(kernel_map, (vm_offset_t *) &cdp, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU); + if (ret != KERN_SUCCESS) { + panic("cpu_data_realloc() cpu data alloc, ret=%d\n", ret); + } + + /* Copy old contents into new area and make fix-ups */ + assert(cpu_number() == 0); + bcopy((void *) cpu_data_ptr[0], (void*) cdp, sizeof(cpu_data_t)); + cdp->cpu_this = cdp; + cdp->cpu_int_stack_top = istk; + timer_call_queue_init(&cdp->rtclock_timer.queue); + + /* Allocate the separate fault stack */ + ret = kmem_alloc(kernel_map, &fstk, PAGE_SIZE, VM_KERN_MEMORY_CPU); + if (ret != KERN_SUCCESS) { + panic("cpu_data_realloc() fault stack alloc, ret=%d\n", ret); + } + bzero((void*) fstk, PAGE_SIZE); + fstk += PAGE_SIZE; + + /* + * With interrupts disabled commmit the new areas. + */ + istate = ml_set_interrupts_enabled(FALSE); + cpu_data_ptr[0] = cdp; + master_ktss64.ist2 = (uintptr_t) fstk; + master_ktss64.ist1 = (uintptr_t) fstk + - sizeof(x86_64_intr_stack_frame_t); + wrmsr64(MSR_IA32_GS_BASE, (uintptr_t) cdp); + wrmsr64(MSR_IA32_KERNEL_GS_BASE, (uintptr_t) cdp); + (void) ml_set_interrupts_enabled(istate); + + kprintf("Reallocated master cpu data: %p," + " interrupt stack: %p, fault stack: %p\n", + (void *) cdp, (void *) istk, (void *) fstk); +}