X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/3903760236c30e3b5ace7a4eefac3a269d68957c..527f99514973766e9c0382a4d8550dfb00f54939:/osfmk/i386/pcb_native.c diff --git a/osfmk/i386/pcb_native.c b/osfmk/i386/pcb_native.c index d77d20134..34df7c119 100644 --- a/osfmk/i386/pcb_native.c +++ b/osfmk/i386/pcb_native.c @@ -54,7 +54,6 @@ * the rights to redistribute these changes. */ -#include #include #include @@ -138,6 +137,7 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new) set_ds(NULL_SEG); set_es(NULL_SEG); set_fs(NULL_SEG); + if (get_gs() != NULL_SEG) { swapgs(); /* switch to user's GS context */ set_gs(NULL_SEG); @@ -159,9 +159,7 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new) /* require 16-byte alignment */ assert((pcb_stack_top & 0xF) == 0); - /* Interrupt stack is pcb */ - current_ktss64()->rsp0 = pcb_stack_top; - + current_ktss64()->rsp0 = cdp->cpu_desc_index.cdi_sstku; /* * Top of temporary sysenter stack points to pcb stack. * Although this is not normally used by 64-bit users, @@ -169,6 +167,8 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new) */ *current_sstk64() = pcb_stack_top; + cdp->cd_estack = cpu_shadowp(cdp->cpu_number)->cd_estack = cdp->cpu_desc_index.cdi_sstku; + if (is_saved_state64(pcb->iss)) { cdp->cpu_task_map = new->map->pmap->pm_task_map; @@ -177,8 +177,8 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new) * Enable the 64-bit user code segment, USER64_CS. * Disable the 32-bit user code segment, USER_CS. */ - ldt_desc_p(USER64_CS)->access |= ACC_PL_U; - ldt_desc_p(USER_CS)->access &= ~ACC_PL_U; + gdt_desc_p(USER64_CS)->access |= ACC_PL_U; + gdt_desc_p(USER_CS)->access &= ~ACC_PL_U; /* * Switch user's GS base if necessary @@ -190,12 +190,12 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new) * in the event it was altered in user space. */ if ((pcb->cthread_self != 0) || (new->task != kernel_task)) { - if ((cdp->cpu_uber.cu_user_gs_base != pcb->cthread_self) || (pcb->cthread_self != rdmsr64(MSR_IA32_KERNEL_GS_BASE))) { + if ((cdp->cpu_uber.cu_user_gs_base != pcb->cthread_self) || + (pcb->cthread_self != rdmsr64(MSR_IA32_KERNEL_GS_BASE))) { cdp->cpu_uber.cu_user_gs_base = pcb->cthread_self; wrmsr64(MSR_IA32_KERNEL_GS_BASE, pcb->cthread_self); } } - } else { cdp->cpu_task_map = TASK_MAP_32BIT; @@ -204,8 +204,14 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new) * Disable USER64_CS * Enable USER_CS */ - ldt_desc_p(USER64_CS)->access &= ~ACC_PL_U; - ldt_desc_p(USER_CS)->access |= ACC_PL_U; + + /* It's possible that writing to the GDT areas + * is expensive, if the processor intercepts those + * writes to invalidate its internal segment caches + * TODO: perhaps only do this if switching bitness + */ + gdt_desc_p(USER64_CS)->access &= ~ACC_PL_U; + gdt_desc_p(USER_CS)->access |= ACC_PL_U; /* * Set the thread`s cthread (a.k.a pthread) @@ -359,7 +365,7 @@ machine_thread_create( } /* - * Assure that the synthesized 32-bit state including + * Ensure that the synthesized 32-bit state including * the 64-bit interrupt state can be acommodated in the * 64-bit state we allocate for both 32-bit and 64-bit threads. */ @@ -397,9 +403,7 @@ machine_thread_create( * segment. */ if ((pcb->cthread_desc.access & ACC_P) == 0) { - struct real_descriptor *ldtp; - ldtp = (struct real_descriptor *)current_ldt(); - pcb->cthread_desc = ldtp[sel_idx(USER_DS)]; + pcb->cthread_desc = *gdt_desc_p(USER_DS); } return(KERN_SUCCESS); @@ -422,7 +426,7 @@ machine_thread_destroy( #endif if (pcb->ifps != 0) - fpu_free(pcb->ifps); + fpu_free(thread, pcb->ifps); if (pcb->iss != 0) { zfree(iss_zone, pcb->iss); pcb->iss = 0;