* the rights to redistribute these changes.
*/
-#include <mach_rt.h>
#include <mach_debug.h>
#include <mach_ldebug.h>
#include <i386/seg.h>
#include <i386/machine_routines.h>
+#if HYPERVISOR
+#include <kern/hv_support.h>
+#endif
+
#define ASSERT_IS_16BYTE_MULTIPLE_SIZEOF(_type_) \
extern char assert_is_16byte_multiple_sizeof_ ## _type_ \
[(sizeof(_type_) % 16) == 0 ? 1 : -1]
extern zone_t iss_zone; /* zone for saved_state area */
extern zone_t ids_zone; /* zone for debug_state area */
-extern void *get_bsduthreadarg(thread_t);
void
act_machine_switch_pcb(__unused thread_t old, thread_t new)
{
set_ds(NULL_SEG);
set_es(NULL_SEG);
set_fs(NULL_SEG);
+
if (get_gs() != NULL_SEG) {
swapgs(); /* switch to user's GS context */
set_gs(NULL_SEG);
/* require 16-byte alignment */
assert((pcb_stack_top & 0xF) == 0);
- /* Interrupt stack is pcb */
- current_ktss64()->rsp0 = pcb_stack_top;
-
+ current_ktss64()->rsp0 = cdp->cpu_desc_index.cdi_sstku;
/*
* Top of temporary sysenter stack points to pcb stack.
* Although this is not normally used by 64-bit users,
*/
*current_sstk64() = pcb_stack_top;
+ cdp->cd_estack = cpu_shadowp(cdp->cpu_number)->cd_estack = cdp->cpu_desc_index.cdi_sstku;
+
if (is_saved_state64(pcb->iss)) {
cdp->cpu_task_map = new->map->pmap->pm_task_map;
* Enable the 64-bit user code segment, USER64_CS.
* Disable the 32-bit user code segment, USER_CS.
*/
- ldt_desc_p(USER64_CS)->access |= ACC_PL_U;
- ldt_desc_p(USER_CS)->access &= ~ACC_PL_U;
+ gdt_desc_p(USER64_CS)->access |= ACC_PL_U;
+ gdt_desc_p(USER_CS)->access &= ~ACC_PL_U;
/*
* Switch user's GS base if necessary
* in the event it was altered in user space.
*/
if ((pcb->cthread_self != 0) || (new->task != kernel_task)) {
- if ((cdp->cpu_uber.cu_user_gs_base != pcb->cthread_self) || (pcb->cthread_self != rdmsr64(MSR_IA32_KERNEL_GS_BASE))) {
+ if ((cdp->cpu_uber.cu_user_gs_base != pcb->cthread_self) ||
+ (pcb->cthread_self != rdmsr64(MSR_IA32_KERNEL_GS_BASE))) {
cdp->cpu_uber.cu_user_gs_base = pcb->cthread_self;
wrmsr64(MSR_IA32_KERNEL_GS_BASE, pcb->cthread_self);
}
}
-
} else {
cdp->cpu_task_map = TASK_MAP_32BIT;
* Disable USER64_CS
* Enable USER_CS
*/
- ldt_desc_p(USER64_CS)->access &= ~ACC_PL_U;
- ldt_desc_p(USER_CS)->access |= ACC_PL_U;
+
+ /* It's possible that writing to the GDT areas
+ * is expensive, if the processor intercepts those
+ * writes to invalidate its internal segment caches
+ * TODO: perhaps only do this if switching bitness
+ */
+ gdt_desc_p(USER64_CS)->access &= ~ACC_PL_U;
+ gdt_desc_p(USER_CS)->access |= ACC_PL_U;
/*
* Set the thread`s cthread (a.k.a pthread)
}
/*
- * Assure that the synthesized 32-bit state including
+ * Ensure that the synthesized 32-bit state including
* the 64-bit interrupt state can be acommodated in the
* 64-bit state we allocate for both 32-bit and 64-bit threads.
*/
pcb->cthread_self = 0;
pcb->uldt_selector = 0;
-
+ pcb->thread_gpu_ns = 0;
/* Ensure that the "cthread" descriptor describes a valid
* segment.
*/
if ((pcb->cthread_desc.access & ACC_P) == 0) {
- struct real_descriptor *ldtp;
- ldtp = (struct real_descriptor *)current_ldt();
- pcb->cthread_desc = ldtp[sel_idx(USER_DS)];
+ pcb->cthread_desc = *gdt_desc_p(USER_DS);
}
return(KERN_SUCCESS);
machine_thread_destroy(
thread_t thread)
{
- register pcb_t pcb = THREAD_TO_PCB(thread);
+ pcb_t pcb = THREAD_TO_PCB(thread);
+
+#if HYPERVISOR
+ if (thread->hv_thread_target) {
+ hv_callbacks.thread_destroy(thread->hv_thread_target);
+ thread->hv_thread_target = NULL;
+ }
+#endif
if (pcb->ifps != 0)
- fpu_free(pcb->ifps);
+ fpu_free(thread, pcb->ifps);
if (pcb->iss != 0) {
zfree(iss_zone, pcb->iss);
pcb->iss = 0;
pcb->ids = NULL;
}
}
+
+kern_return_t
+machine_thread_set_tsd_base(
+ thread_t thread,
+ mach_vm_offset_t tsd_base)
+{
+
+ if (thread->task == kernel_task) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (thread_is_64bit(thread)) {
+ /* check for canonical address, set 0 otherwise */
+ if (!IS_USERADDR64_CANONICAL(tsd_base))
+ tsd_base = 0ULL;
+ } else {
+ if (tsd_base > UINT32_MAX)
+ tsd_base = 0ULL;
+ }
+
+ pcb_t pcb = THREAD_TO_PCB(thread);
+ pcb->cthread_self = tsd_base;
+
+ if (!thread_is_64bit(thread)) {
+ /* Set up descriptor for later use */
+ struct real_descriptor desc = {
+ .limit_low = 1,
+ .limit_high = 0,
+ .base_low = tsd_base & 0xffff,
+ .base_med = (tsd_base >> 16) & 0xff,
+ .base_high = (tsd_base >> 24) & 0xff,
+ .access = ACC_P|ACC_PL_U|ACC_DATA_W,
+ .granularity = SZ_32|SZ_G,
+ };
+
+ pcb->cthread_desc = desc;
+ saved_state32(pcb->iss)->gs = USER_CTHREAD;
+ }
+
+ /* For current thread, make the TSD base active immediately */
+ if (thread == current_thread()) {
+
+ if (thread_is_64bit(thread)) {
+ cpu_data_t *cdp;
+
+ mp_disable_preemption();
+ cdp = current_cpu_datap();
+ if ((cdp->cpu_uber.cu_user_gs_base != pcb->cthread_self) ||
+ (pcb->cthread_self != rdmsr64(MSR_IA32_KERNEL_GS_BASE)))
+ wrmsr64(MSR_IA32_KERNEL_GS_BASE, tsd_base);
+ cdp->cpu_uber.cu_user_gs_base = tsd_base;
+ mp_enable_preemption();
+ } else {
+
+ /* assign descriptor */
+ mp_disable_preemption();
+ *ldt_desc_p(USER_CTHREAD) = pcb->cthread_desc;
+ mp_enable_preemption();
+ }
+ }
+
+ return KERN_SUCCESS;
+}