+ cdp->cpu_task_map = new->map->pmap->pm_task_map;
+
+ /*
+ * Enable the 64-bit user code segment, USER64_CS.
+ * Disable the 32-bit user code segment, USER_CS.
+ */
+ ldt_desc_p(USER64_CS)->access |= ACC_PL_U;
+ ldt_desc_p(USER_CS)->access &= ~ACC_PL_U;
+
+ } else {
+ x86_saved_state_compat32_t *iss32compat;
+ vm_offset_t isf;
+
+ assert(is_saved_state32(pcb->iss));
+ iss32compat = (x86_saved_state_compat32_t *) pcb->iss;
+
+ pcb_stack_top = (int) (iss32compat + 1);
+ /* require 16-byte alignment */
+ assert((pcb_stack_top & 0xF) == 0);
+
+ /*
+ * Set pointer to PCB's interrupt stack frame in cpu data.
+ * Used by debug trap handler.
+ */
+ isf = (vm_offset_t) &iss32compat->isf64;
+ cdp->cpu_uber.cu_isf = UBER64(isf);
+
+ /* Top of temporary sysenter stack points to pcb stack */
+ *current_sstk64() = UBER64(pcb_stack_top);
+
+ /* Interrupt stack is pcb */
+ current_ktss64()->rsp0 = UBER64(pcb_stack_top);
+
+ cdp->cpu_task_map = TASK_MAP_32BIT;
+ /* Precalculate pointers to syscall argument store, for use
+ * in the trampolines.
+ */
+ cdp->cpu_uber_arg_store = UBER64((vm_offset_t)get_bsduthreadarg(new));
+ cdp->cpu_uber_arg_store_valid = UBER64((vm_offset_t)&pcb->arg_store_valid);
+ pcb->arg_store_valid = 0;
+
+ /*
+ * Disable USER64_CS
+ * Enable USER_CS
+ */
+ ldt_desc_p(USER64_CS)->access &= ~ACC_PL_U;
+ ldt_desc_p(USER_CS)->access |= ACC_PL_U;
+ }
+
+ /*
+ * Set the thread`s cthread (a.k.a pthread)
+ * For 32-bit user this involves setting the USER_CTHREAD
+ * descriptor in the LDT to point to the cthread data.
+ * The involves copying in the pre-initialized descriptor.
+ */
+ ldtp = (struct real_descriptor *)current_ldt();
+ ldtp[sel_idx(USER_CTHREAD)] = pcb->cthread_desc;
+ if (pcb->uldt_selector != 0)
+ ldtp[sel_idx(pcb->uldt_selector)] = pcb->uldt_desc;
+
+
+ /*
+ * For 64-bit, we additionally set the 64-bit User GS base
+ * address. On return to 64-bit user, the GS.Base MSR will be written.
+ */
+ cdp->cpu_uber.cu_user_gs_base = pcb->cthread_self;
+
+ /*
+ * Set the thread`s LDT or LDT entry.
+ */
+ if (new->task == TASK_NULL || new->task->i386_ldt == 0) {
+ /*
+ * Use system LDT.
+ */
+ ml_cpu_set_ldt(KERNEL_LDT);
+ } else {
+ /*
+ * Task has its own LDT.
+ */
+ user_ldt_set(new);
+ }
+
+ /*
+ * Bump the scheduler generation count in the commpage.
+ * This can be read by user code to detect its preemption.
+ */
+ commpage_sched_gen_inc();
+}
+#endif
+
+/*
+ * Switch to the first thread on a CPU.
+ */
+void
+machine_load_context(
+ thread_t new)
+{
+#if CONFIG_COUNTERS
+ machine_pmc_cswitch(NULL, new);
+#endif
+ new->machine.specFlags |= OnProc;
+ act_machine_switch_pcb(new);
+ Load_context(new);
+}
+
+/*
+ * Switch to a new thread.
+ * Save the old thread`s kernel state or continuation,
+ * and return it.
+ */
+thread_t
+machine_switch_context(
+ thread_t old,
+ thread_continue_t continuation,
+ thread_t new)
+{
+#if MACH_RT
+ assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
+#endif
+#if CONFIG_COUNTERS
+ machine_pmc_cswitch(old, new);
+#endif
+ /*
+ * Save FP registers if in use.
+ */
+ fpu_save_context(old);
+
+
+ old->machine.specFlags &= ~OnProc;
+ new->machine.specFlags |= OnProc;
+
+ /*
+ * Monitor the stack depth and report new max,
+ * not worrying about races.
+ */
+ vm_offset_t depth = current_stack_depth();
+ if (depth > kernel_stack_depth_max) {
+ kernel_stack_depth_max = depth;
+ KERNEL_DEBUG_CONSTANT(
+ MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
+ (long) depth, 0, 0, 0, 0);
+ }
+
+ /*
+ * Switch address maps if need be, even if not switching tasks.
+ * (A server activation may be "borrowing" a client map.)
+ */
+ PMAP_SWITCH_CONTEXT(old, new, cpu_number())
+
+ /*
+ * Load the rest of the user state for the new thread
+ */
+ act_machine_switch_pcb(new);
+
+ return(Switch_context(old, continuation, new));
+}
+
+thread_t
+machine_processor_shutdown(
+ thread_t thread,
+ void (*doshutdown)(processor_t),
+ processor_t processor)
+{
+#if CONFIG_VMX
+ vmx_suspend();
+#endif
+ fpu_save_context(thread);
+ PMAP_SWITCH_CONTEXT(thread, processor->idle_thread, cpu_number());
+ return(Shutdown_context(thread, doshutdown, processor));
+}
+
+/*
+ * act_machine_sv_free
+ * release saveareas associated with an act. if flag is true, release
+ * user level savearea(s) too, else don't
+ */
+void
+act_machine_sv_free(__unused thread_t act, __unused int flag)
+{
+}
+
+
+/*
+ * This is where registers that are not normally specified by the mach-o
+ * file on an execve would be nullified, perhaps to avoid a covert channel.
+ */
+kern_return_t
+machine_thread_state_initialize(
+ thread_t thread)
+{
+ /*
+ * If there's an fpu save area, free it.
+ * The initialized state will then be lazily faulted-in, if required.
+ * And if we're target, re-arm the no-fpu trap.
+ */
+ if (thread->machine.pcb->ifps) {
+ (void) fpu_set_fxstate(thread, NULL);
+
+ if (thread == current_thread())
+ clear_fpu();
+ }
+
+ if (thread->machine.pcb->ids) {
+ zfree(ids_zone, thread->machine.pcb->ids);
+ thread->machine.pcb->ids = NULL;
+ }
+
+ return KERN_SUCCESS;
+}
+
+uint32_t
+get_eflags_exportmask(void)
+{
+ return EFL_USER_SET;
+}
+
+/*
+ * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
+ * for 32bit tasks only
+ * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
+ * for 64bit tasks only
+ * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
+ * for 32bit tasks only
+ * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
+ * for 64bit tasks only
+ * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
+ * for either 32bit or 64bit tasks
+ * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
+ * for 32bit tasks only
+ * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
+ * for 64bit tasks only
+ * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
+ * for either 32bit or 64bit tasks
+ * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
+ * for 32bit tasks only
+ * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
+ * for 64bit tasks only
+ * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
+ * for either 32bit or 64bit tasks
+ */
+
+
+static void
+get_exception_state64(thread_t thread, x86_exception_state64_t *es)
+{
+ x86_saved_state64_t *saved_state;
+
+ saved_state = USER_REGS64(thread);
+
+ es->trapno = saved_state->isf.trapno;
+ es->err = (typeof(es->err))saved_state->isf.err;
+ es->faultvaddr = saved_state->cr2;
+}
+
+static void
+get_exception_state32(thread_t thread, x86_exception_state32_t *es)
+{
+ x86_saved_state32_t *saved_state;
+
+ saved_state = USER_REGS32(thread);
+
+ es->trapno = saved_state->trapno;
+ es->err = saved_state->err;
+ es->faultvaddr = saved_state->cr2;
+}
+
+
+static int
+set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
+{
+ x86_saved_state32_t *saved_state;
+
+
+ saved_state = USER_REGS32(thread);
+
+ /*
+ * Scrub segment selector values:
+ */
+ ts->cs = USER_CS;
+#ifdef __i386__
+ if (ts->ss == 0) ts->ss = USER_DS;
+ if (ts->ds == 0) ts->ds = USER_DS;
+ if (ts->es == 0) ts->es = USER_DS;
+#else /* __x86_64__ */
+ /*
+ * On a 64 bit kernel, we always override the data segments,
+ * as the actual selector numbers have changed. This also
+ * means that we don't support setting the data segments
+ * manually any more.
+ */
+ ts->ss = USER_DS;
+ ts->ds = USER_DS;
+ ts->es = USER_DS;
+#endif
+
+ /* Check segment selectors are safe */
+ if (!valid_user_segment_selectors(ts->cs,
+ ts->ss,
+ ts->ds,
+ ts->es,
+ ts->fs,
+ ts->gs))
+ return(KERN_INVALID_ARGUMENT);
+
+ saved_state->eax = ts->eax;
+ saved_state->ebx = ts->ebx;
+ saved_state->ecx = ts->ecx;
+ saved_state->edx = ts->edx;
+ saved_state->edi = ts->edi;
+ saved_state->esi = ts->esi;
+ saved_state->ebp = ts->ebp;
+ saved_state->uesp = ts->esp;
+ saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
+ saved_state->eip = ts->eip;
+ saved_state->cs = ts->cs;
+ saved_state->ss = ts->ss;
+ saved_state->ds = ts->ds;
+ saved_state->es = ts->es;
+ saved_state->fs = ts->fs;
+ saved_state->gs = ts->gs;
+
+ /*
+ * If the trace trap bit is being set,
+ * ensure that the user returns via iret
+ * - which is signaled thusly:
+ */
+ if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS)
+ saved_state->cs = SYSENTER_TF_CS;
+
+ return(KERN_SUCCESS);
+}
+
+static int
+set_thread_state64(thread_t thread, x86_thread_state64_t *ts)
+{
+ x86_saved_state64_t *saved_state;
+
+
+ saved_state = USER_REGS64(thread);
+
+ if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
+ !IS_USERADDR64_CANONICAL(ts->rip))
+ return(KERN_INVALID_ARGUMENT);
+
+ saved_state->r8 = ts->r8;
+ saved_state->r9 = ts->r9;
+ saved_state->r10 = ts->r10;
+ saved_state->r11 = ts->r11;
+ saved_state->r12 = ts->r12;
+ saved_state->r13 = ts->r13;
+ saved_state->r14 = ts->r14;
+ saved_state->r15 = ts->r15;
+ saved_state->rax = ts->rax;
+ saved_state->rbx = ts->rbx;
+ saved_state->rcx = ts->rcx;
+ saved_state->rdx = ts->rdx;
+ saved_state->rdi = ts->rdi;
+ saved_state->rsi = ts->rsi;
+ saved_state->rbp = ts->rbp;
+ saved_state->isf.rsp = ts->rsp;
+ saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
+ saved_state->isf.rip = ts->rip;
+ saved_state->isf.cs = USER64_CS;
+ saved_state->fs = (uint32_t)ts->fs;
+ saved_state->gs = (uint32_t)ts->gs;
+
+ return(KERN_SUCCESS);
+}
+
+
+
+static void
+get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
+{
+ x86_saved_state32_t *saved_state;
+
+
+ saved_state = USER_REGS32(thread);
+
+ ts->eax = saved_state->eax;
+ ts->ebx = saved_state->ebx;
+ ts->ecx = saved_state->ecx;
+ ts->edx = saved_state->edx;
+ ts->edi = saved_state->edi;
+ ts->esi = saved_state->esi;
+ ts->ebp = saved_state->ebp;
+ ts->esp = saved_state->uesp;
+ ts->eflags = saved_state->efl;
+ ts->eip = saved_state->eip;
+ ts->cs = saved_state->cs;
+ ts->ss = saved_state->ss;
+ ts->ds = saved_state->ds;
+ ts->es = saved_state->es;
+ ts->fs = saved_state->fs;
+ ts->gs = saved_state->gs;
+}
+
+
+static void
+get_thread_state64(thread_t thread, x86_thread_state64_t *ts)
+{
+ x86_saved_state64_t *saved_state;
+
+
+ saved_state = USER_REGS64(thread);
+
+ ts->r8 = saved_state->r8;
+ ts->r9 = saved_state->r9;
+ ts->r10 = saved_state->r10;
+ ts->r11 = saved_state->r11;
+ ts->r12 = saved_state->r12;
+ ts->r13 = saved_state->r13;
+ ts->r14 = saved_state->r14;
+ ts->r15 = saved_state->r15;
+ ts->rax = saved_state->rax;
+ ts->rbx = saved_state->rbx;
+ ts->rcx = saved_state->rcx;
+ ts->rdx = saved_state->rdx;
+ ts->rdi = saved_state->rdi;
+ ts->rsi = saved_state->rsi;
+ ts->rbp = saved_state->rbp;
+ ts->rsp = saved_state->isf.rsp;
+ ts->rflags = saved_state->isf.rflags;
+ ts->rip = saved_state->isf.rip;
+ ts->cs = saved_state->isf.cs;
+ ts->fs = saved_state->fs;
+ ts->gs = saved_state->gs;
+}
+
+
+void
+thread_set_wq_state32(thread_t thread, thread_state_t tstate)
+{
+ x86_thread_state32_t *state;
+ x86_saved_state32_t *saved_state;
+ thread_t curth = current_thread();
+ spl_t s=0;
+
+
+ saved_state = USER_REGS32(thread);
+
+ state = (x86_thread_state32_t *)tstate;
+
+ if (curth != thread) {
+ s = splsched();
+ thread_lock(thread);
+ }
+
+ saved_state->ebp = 0;
+ saved_state->eip = state->eip;
+ saved_state->eax = state->eax;
+ saved_state->ebx = state->ebx;
+ saved_state->ecx = state->ecx;
+ saved_state->edx = state->edx;
+ saved_state->edi = state->edi;
+ saved_state->esi = state->esi;
+ saved_state->uesp = state->esp;
+ saved_state->efl = EFL_USER_SET;
+
+ saved_state->cs = USER_CS;
+ saved_state->ss = USER_DS;
+ saved_state->ds = USER_DS;
+ saved_state->es = USER_DS;
+
+
+ if (curth != thread) {
+ thread_unlock(thread);
+ splx(s);
+ }
+}
+
+
+void
+thread_set_wq_state64(thread_t thread, thread_state_t tstate)
+{
+ x86_thread_state64_t *state;
+ x86_saved_state64_t *saved_state;
+ thread_t curth = current_thread();
+ spl_t s=0;
+
+
+ saved_state = USER_REGS64(thread);
+ state = (x86_thread_state64_t *)tstate;
+
+ if (curth != thread) {
+ s = splsched();
+ thread_lock(thread);
+ }
+
+ saved_state->rbp = 0;
+ saved_state->rdi = state->rdi;
+ saved_state->rsi = state->rsi;
+ saved_state->rdx = state->rdx;
+ saved_state->rcx = state->rcx;
+ saved_state->r8 = state->r8;
+ saved_state->r9 = state->r9;
+
+ saved_state->isf.rip = state->rip;
+ saved_state->isf.rsp = state->rsp;
+ saved_state->isf.cs = USER64_CS;
+ saved_state->isf.rflags = EFL_USER_SET;
+
+
+ if (curth != thread) {
+ thread_unlock(thread);
+ splx(s);
+ }
+}
+
+
+
+/*
+ * act_machine_set_state:
+ *
+ * Set the status of the specified thread.
+ */
+
+kern_return_t
+machine_thread_set_state(
+ thread_t thr_act,
+ thread_flavor_t flavor,
+ thread_state_t tstate,
+ mach_msg_type_number_t count)
+{
+ switch (flavor) {
+ case x86_SAVED_STATE32:
+ {
+ x86_saved_state32_t *state;
+ x86_saved_state32_t *saved_state;
+
+ if (count < x86_SAVED_STATE32_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ if (thread_is_64bit(thr_act))
+ return(KERN_INVALID_ARGUMENT);
+
+ state = (x86_saved_state32_t *) tstate;
+
+ /* Check segment selectors are safe */
+ if (!valid_user_segment_selectors(state->cs,
+ state->ss,
+ state->ds,
+ state->es,
+ state->fs,
+ state->gs))
+ return KERN_INVALID_ARGUMENT;
+
+
+ saved_state = USER_REGS32(thr_act);
+
+ /*
+ * General registers
+ */
+ saved_state->edi = state->edi;
+ saved_state->esi = state->esi;
+ saved_state->ebp = state->ebp;
+ saved_state->uesp = state->uesp;
+ saved_state->ebx = state->ebx;
+ saved_state->edx = state->edx;
+ saved_state->ecx = state->ecx;
+ saved_state->eax = state->eax;
+ saved_state->eip = state->eip;
+
+ saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
+
+ /*
+ * If the trace trap bit is being set,
+ * ensure that the user returns via iret
+ * - which is signaled thusly:
+ */
+ if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS)
+ state->cs = SYSENTER_TF_CS;
+
+ /*
+ * User setting segment registers.
+ * Code and stack selectors have already been
+ * checked. Others will be reset by 'iret'
+ * if they are not valid.
+ */
+ saved_state->cs = state->cs;
+ saved_state->ss = state->ss;
+ saved_state->ds = state->ds;
+ saved_state->es = state->es;
+ saved_state->fs = state->fs;
+ saved_state->gs = state->gs;
+
+ break;
+ }
+
+ case x86_SAVED_STATE64:
+ {
+ x86_saved_state64_t *state;
+ x86_saved_state64_t *saved_state;
+
+ if (count < x86_SAVED_STATE64_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ if (!thread_is_64bit(thr_act))
+ return(KERN_INVALID_ARGUMENT);
+
+ state = (x86_saved_state64_t *) tstate;
+
+ /* Verify that the supplied code segment selector is
+ * valid. In 64-bit mode, the FS and GS segment overrides
+ * use the FS.base and GS.base MSRs to calculate
+ * base addresses, and the trampolines don't directly
+ * restore the segment registers--hence they are no
+ * longer relevant for validation.
+ */
+ if (!valid_user_code_selector(state->isf.cs))
+ return KERN_INVALID_ARGUMENT;
+
+ /* Check pc and stack are canonical addresses */
+ if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
+ !IS_USERADDR64_CANONICAL(state->isf.rip))
+ return KERN_INVALID_ARGUMENT;
+
+
+ saved_state = USER_REGS64(thr_act);
+
+ /*
+ * General registers
+ */
+ saved_state->r8 = state->r8;
+ saved_state->r9 = state->r9;
+ saved_state->r10 = state->r10;
+ saved_state->r11 = state->r11;
+ saved_state->r12 = state->r12;
+ saved_state->r13 = state->r13;
+ saved_state->r14 = state->r14;
+ saved_state->r15 = state->r15;
+ saved_state->rdi = state->rdi;
+ saved_state->rsi = state->rsi;
+ saved_state->rbp = state->rbp;
+ saved_state->rbx = state->rbx;
+ saved_state->rdx = state->rdx;
+ saved_state->rcx = state->rcx;
+ saved_state->rax = state->rax;
+ saved_state->isf.rsp = state->isf.rsp;
+ saved_state->isf.rip = state->isf.rip;
+
+ saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
+
+ /*
+ * User setting segment registers.
+ * Code and stack selectors have already been
+ * checked. Others will be reset by 'sys'
+ * if they are not valid.
+ */
+ saved_state->isf.cs = state->isf.cs;
+ saved_state->isf.ss = state->isf.ss;
+ saved_state->fs = state->fs;
+ saved_state->gs = state->gs;
+
+ break;
+ }
+
+ case x86_FLOAT_STATE32:
+ {
+ if (count != x86_FLOAT_STATE32_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ if (thread_is_64bit(thr_act))
+ return(KERN_INVALID_ARGUMENT);
+
+ return fpu_set_fxstate(thr_act, tstate);
+ }
+
+ case x86_FLOAT_STATE64:
+ {
+ if (count != x86_FLOAT_STATE64_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ if ( !thread_is_64bit(thr_act))
+ return(KERN_INVALID_ARGUMENT);
+
+ return fpu_set_fxstate(thr_act, tstate);
+ }
+
+ case x86_FLOAT_STATE:
+ {
+ x86_float_state_t *state;
+
+ if (count != x86_FLOAT_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ state = (x86_float_state_t *)tstate;
+ if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
+ thread_is_64bit(thr_act)) {
+ return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64);
+ }
+ if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
+ !thread_is_64bit(thr_act)) {
+ return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32);
+ }
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ case x86_THREAD_STATE32:
+ {
+ if (count != x86_THREAD_STATE32_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ if (thread_is_64bit(thr_act))
+ return(KERN_INVALID_ARGUMENT);
+
+ return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
+ }
+
+ case x86_THREAD_STATE64:
+ {
+ if (count != x86_THREAD_STATE64_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ if (!thread_is_64bit(thr_act))
+ return(KERN_INVALID_ARGUMENT);
+
+ return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
+
+ }
+ case x86_THREAD_STATE:
+ {
+ x86_thread_state_t *state;
+
+ if (count != x86_THREAD_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ state = (x86_thread_state_t *)tstate;
+
+ if (state->tsh.flavor == x86_THREAD_STATE64 &&
+ state->tsh.count == x86_THREAD_STATE64_COUNT &&
+ thread_is_64bit(thr_act)) {
+ return set_thread_state64(thr_act, &state->uts.ts64);
+ } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
+ state->tsh.count == x86_THREAD_STATE32_COUNT &&
+ !thread_is_64bit(thr_act)) {
+ return set_thread_state32(thr_act, &state->uts.ts32);
+ } else
+ return(KERN_INVALID_ARGUMENT);
+
+ break;
+ }
+ case x86_DEBUG_STATE32:
+ {
+ x86_debug_state32_t *state;
+ kern_return_t ret;
+
+ if (thread_is_64bit(thr_act))
+ return(KERN_INVALID_ARGUMENT);
+
+ state = (x86_debug_state32_t *)tstate;
+
+ ret = set_debug_state32(thr_act, state);
+
+ return ret;
+ }
+ case x86_DEBUG_STATE64:
+ {
+ x86_debug_state64_t *state;
+ kern_return_t ret;
+
+ if (!thread_is_64bit(thr_act))
+ return(KERN_INVALID_ARGUMENT);
+
+ state = (x86_debug_state64_t *)tstate;
+
+ ret = set_debug_state64(thr_act, state);
+
+ return ret;
+ }
+ case x86_DEBUG_STATE:
+ {
+ x86_debug_state_t *state;
+ kern_return_t ret = KERN_INVALID_ARGUMENT;
+
+ if (count != x86_DEBUG_STATE_COUNT)
+ return (KERN_INVALID_ARGUMENT);
+
+ state = (x86_debug_state_t *)tstate;
+ if (state->dsh.flavor == x86_DEBUG_STATE64 &&
+ state->dsh.count == x86_DEBUG_STATE64_COUNT &&
+ thread_is_64bit(thr_act)) {
+ ret = set_debug_state64(thr_act, &state->uds.ds64);
+ }
+ else
+ if (state->dsh.flavor == x86_DEBUG_STATE32 &&
+ state->dsh.count == x86_DEBUG_STATE32_COUNT &&
+ !thread_is_64bit(thr_act)) {
+ ret = set_debug_state32(thr_act, &state->uds.ds32);
+ }
+ return ret;
+ }
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }