+kern_return_t
+machine_thread_state_convert_from_user(
+ __unused thread_t thread,
+ __unused thread_flavor_t flavor,
+ __unused thread_state_t tstate,
+ __unused mach_msg_type_number_t count)
+{
+ // No conversion from userspace representation on this platform
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+machine_thread_siguctx_pointer_convert_to_user(
+ __unused thread_t thread,
+ __unused user_addr_t *uctxp)
+{
+ // No conversion to userspace representation on this platform
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+machine_thread_function_pointers_convert_from_user(
+ __unused thread_t thread,
+ __unused user_addr_t *fptrs,
+ __unused uint32_t count)
+{
+ // No conversion from userspace representation on this platform
+ return KERN_SUCCESS;
+}
+
+/*
+ * act_machine_set_state:
+ *
+ * Set the status of the specified thread.
+ */
+
+kern_return_t
+machine_thread_set_state(
+ thread_t thr_act,
+ thread_flavor_t flavor,
+ thread_state_t tstate,
+ mach_msg_type_number_t count)
+{
+ switch (flavor) {
+ case x86_SAVED_STATE32:
+ {
+ x86_saved_state32_t *state;
+ x86_saved_state32_t *saved_state;
+
+ if (count < x86_SAVED_STATE32_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ state = (x86_saved_state32_t *) tstate;
+
+ /*
+ * Refuse to allow 64-bit processes to set
+ * 32-bit state.
+ */
+ if (thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /* Check segment selectors are safe */
+ if (!valid_user_segment_selectors(state->cs,
+ state->ss,
+ state->ds,
+ state->es,
+ state->fs,
+ state->gs)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ pal_register_cache_state(thr_act, DIRTY);
+
+ saved_state = USER_REGS32(thr_act);
+
+ /*
+ * General registers
+ */
+ saved_state->edi = state->edi;
+ saved_state->esi = state->esi;
+ saved_state->ebp = state->ebp;
+ saved_state->uesp = state->uesp;
+ saved_state->ebx = state->ebx;
+ saved_state->edx = state->edx;
+ saved_state->ecx = state->ecx;
+ saved_state->eax = state->eax;
+ saved_state->eip = state->eip;
+
+ saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
+
+ /*
+ * If the trace trap bit is being set,
+ * ensure that the user returns via iret
+ * - which is signaled thusly:
+ */
+ if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS) {
+ state->cs = SYSENTER_TF_CS;
+ }
+
+ /*
+ * User setting segment registers.
+ * Code and stack selectors have already been
+ * checked. Others will be reset by 'iret'
+ * if they are not valid.
+ */
+ saved_state->cs = state->cs;
+ saved_state->ss = state->ss;
+ saved_state->ds = state->ds;
+ saved_state->es = state->es;
+ saved_state->fs = state->fs;
+ saved_state->gs = state->gs;
+
+ break;
+ }
+
+ case x86_SAVED_STATE64:
+ {
+ x86_saved_state64_t *state;
+ x86_saved_state64_t *saved_state;
+
+ if (count < x86_SAVED_STATE64_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (!thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ state = (x86_saved_state64_t *) tstate;
+
+ /* Verify that the supplied code segment selector is
+ * valid. In 64-bit mode, the FS and GS segment overrides
+ * use the FS.base and GS.base MSRs to calculate
+ * base addresses, and the trampolines don't directly
+ * restore the segment registers--hence they are no
+ * longer relevant for validation.
+ */
+ if (!valid_user_code_selector(state->isf.cs)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /* Check pc and stack are canonical addresses */
+ if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
+ !IS_USERADDR64_CANONICAL(state->isf.rip)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ pal_register_cache_state(thr_act, DIRTY);
+
+ saved_state = USER_REGS64(thr_act);
+
+ /*
+ * General registers
+ */
+ saved_state->r8 = state->r8;
+ saved_state->r9 = state->r9;
+ saved_state->r10 = state->r10;
+ saved_state->r11 = state->r11;
+ saved_state->r12 = state->r12;
+ saved_state->r13 = state->r13;
+ saved_state->r14 = state->r14;
+ saved_state->r15 = state->r15;
+ saved_state->rdi = state->rdi;
+ saved_state->rsi = state->rsi;
+ saved_state->rbp = state->rbp;
+ saved_state->rbx = state->rbx;
+ saved_state->rdx = state->rdx;
+ saved_state->rcx = state->rcx;
+ saved_state->rax = state->rax;
+ saved_state->isf.rsp = state->isf.rsp;
+ saved_state->isf.rip = state->isf.rip;
+
+ saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
+
+ /*
+ * User setting segment registers.
+ * Code and stack selectors have already been
+ * checked. Others will be reset by 'sys'
+ * if they are not valid.
+ */
+ saved_state->isf.cs = state->isf.cs;
+ saved_state->isf.ss = state->isf.ss;
+ saved_state->fs = state->fs;
+ saved_state->gs = state->gs;
+
+ break;
+ }
+
+ case x86_FLOAT_STATE32:
+ case x86_AVX_STATE32:
+ case x86_AVX512_STATE32:
+ {
+ if (count != _MachineStateCount[flavor]) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return fpu_set_fxstate(thr_act, tstate, flavor);
+ }
+
+ case x86_FLOAT_STATE64:
+ case x86_AVX_STATE64:
+ case x86_AVX512_STATE64:
+ {
+ if (count != _MachineStateCount[flavor]) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (!thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return fpu_set_fxstate(thr_act, tstate, flavor);
+ }
+
+ case x86_FLOAT_STATE:
+ {
+ x86_float_state_t *state;
+
+ if (count != x86_FLOAT_STATE_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ state = (x86_float_state_t *)tstate;
+ if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
+ thread_is_64bit_addr(thr_act)) {
+ return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
+ }
+ if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
+ !thread_is_64bit_addr(thr_act)) {
+ return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
+ }
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ case x86_AVX_STATE:
+ case x86_AVX512_STATE:
+ {
+ x86_avx_state_t *state;
+
+ if (count != _MachineStateCount[flavor]) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ state = (x86_avx_state_t *)tstate;
+ /* Flavors are defined to have sequential values: 32-bit, 64-bit, non-specific */
+ /* 64-bit flavor? */
+ if (state->ash.flavor == (flavor - 1) &&
+ state->ash.count == _MachineStateCount[flavor - 1] &&
+ thread_is_64bit_addr(thr_act)) {
+ return fpu_set_fxstate(thr_act,
+ (thread_state_t)&state->ufs.as64,
+ flavor - 1);
+ }
+ /* 32-bit flavor? */
+ if (state->ash.flavor == (flavor - 2) &&
+ state->ash.count == _MachineStateCount[flavor - 2] &&
+ !thread_is_64bit_addr(thr_act)) {
+ return fpu_set_fxstate(thr_act,
+ (thread_state_t)&state->ufs.as32,
+ flavor - 2);
+ }
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ case x86_THREAD_STATE32:
+ {
+ if (count != x86_THREAD_STATE32_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
+ }
+
+ case x86_THREAD_STATE64:
+ {
+ if (count != x86_THREAD_STATE64_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (!thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return set_thread_state64(thr_act, tstate, FALSE);
+ }
+
+ case x86_THREAD_FULL_STATE64:
+ {
+ if (count != x86_THREAD_FULL_STATE64_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (!thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /* If this process does not have a custom LDT, return failure */
+ if (thr_act->task->i386_ldt == 0) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return set_thread_state64(thr_act, tstate, TRUE);
+ }
+
+ case x86_THREAD_STATE:
+ {
+ x86_thread_state_t *state;
+
+ if (count != x86_THREAD_STATE_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ state = (x86_thread_state_t *)tstate;
+
+ if (state->tsh.flavor == x86_THREAD_STATE64 &&
+ state->tsh.count == x86_THREAD_STATE64_COUNT &&
+ thread_is_64bit_addr(thr_act)) {
+ return set_thread_state64(thr_act, &state->uts.ts64, FALSE);
+ } else if (state->tsh.flavor == x86_THREAD_FULL_STATE64 &&
+ state->tsh.count == x86_THREAD_FULL_STATE64_COUNT &&
+ thread_is_64bit_addr(thr_act) && thr_act->task->i386_ldt != 0) {
+ return set_thread_state64(thr_act, &state->uts.ts64, TRUE);
+ } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
+ state->tsh.count == x86_THREAD_STATE32_COUNT &&
+ !thread_is_64bit_addr(thr_act)) {
+ return set_thread_state32(thr_act, &state->uts.ts32);
+ } else {
+ return KERN_INVALID_ARGUMENT;
+ }
+ }
+ case x86_DEBUG_STATE32:
+ {
+ x86_debug_state32_t *state;
+ kern_return_t ret;
+
+ if (thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ state = (x86_debug_state32_t *)tstate;
+
+ ret = set_debug_state32(thr_act, state);
+
+ return ret;
+ }
+ case x86_DEBUG_STATE64:
+ {
+ x86_debug_state64_t *state;
+ kern_return_t ret;
+
+ if (!thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ state = (x86_debug_state64_t *)tstate;
+
+ ret = set_debug_state64(thr_act, state);
+
+ return ret;
+ }
+ case x86_DEBUG_STATE:
+ {
+ x86_debug_state_t *state;
+ kern_return_t ret = KERN_INVALID_ARGUMENT;
+
+ if (count != x86_DEBUG_STATE_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ state = (x86_debug_state_t *)tstate;
+ if (state->dsh.flavor == x86_DEBUG_STATE64 &&
+ state->dsh.count == x86_DEBUG_STATE64_COUNT &&
+ thread_is_64bit_addr(thr_act)) {
+ ret = set_debug_state64(thr_act, &state->uds.ds64);
+ } else if (state->dsh.flavor == x86_DEBUG_STATE32 &&
+ state->dsh.count == x86_DEBUG_STATE32_COUNT &&
+ !thread_is_64bit_addr(thr_act)) {
+ ret = set_debug_state32(thr_act, &state->uds.ds32);
+ }
+ return ret;
+ }
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return KERN_SUCCESS;
+}
+
+mach_vm_address_t
+machine_thread_pc(thread_t thr_act)
+{
+ if (thread_is_64bit_addr(thr_act)) {
+ return (mach_vm_address_t)USER_REGS64(thr_act)->isf.rip;
+ } else {
+ return (mach_vm_address_t)USER_REGS32(thr_act)->eip;
+ }
+}
+
+void
+machine_thread_reset_pc(thread_t thr_act, mach_vm_address_t pc)
+{
+ pal_register_cache_state(thr_act, DIRTY);
+
+ if (thread_is_64bit_addr(thr_act)) {
+ if (!IS_USERADDR64_CANONICAL(pc)) {
+ pc = 0;
+ }
+ USER_REGS64(thr_act)->isf.rip = (uint64_t)pc;
+ } else {
+ USER_REGS32(thr_act)->eip = (uint32_t)pc;
+ }
+}
+
+
+/*
+ * thread_getstatus:
+ *
+ * Get the status of the specified thread.
+ */
+
+kern_return_t
+machine_thread_get_state(
+ thread_t thr_act,
+ thread_flavor_t flavor,
+ thread_state_t tstate,
+ mach_msg_type_number_t *count)
+{
+ switch (flavor) {
+ case THREAD_STATE_FLAVOR_LIST:
+ {
+ if (*count < 3) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ tstate[0] = i386_THREAD_STATE;
+ tstate[1] = i386_FLOAT_STATE;
+ tstate[2] = i386_EXCEPTION_STATE;
+
+ *count = 3;
+ break;
+ }
+
+ case THREAD_STATE_FLAVOR_LIST_NEW:
+ {
+ if (*count < 4) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ tstate[0] = x86_THREAD_STATE;
+ tstate[1] = x86_FLOAT_STATE;
+ tstate[2] = x86_EXCEPTION_STATE;
+ tstate[3] = x86_DEBUG_STATE;
+
+ *count = 4;
+ break;
+ }
+
+ case THREAD_STATE_FLAVOR_LIST_10_9:
+ {
+ if (*count < 5) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ tstate[0] = x86_THREAD_STATE;
+ tstate[1] = x86_FLOAT_STATE;
+ tstate[2] = x86_EXCEPTION_STATE;
+ tstate[3] = x86_DEBUG_STATE;
+ tstate[4] = x86_AVX_STATE;
+
+ *count = 5;
+ break;
+ }
+
+ case THREAD_STATE_FLAVOR_LIST_10_13:
+ {
+ if (*count < 6) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ tstate[0] = x86_THREAD_STATE;
+ tstate[1] = x86_FLOAT_STATE;
+ tstate[2] = x86_EXCEPTION_STATE;
+ tstate[3] = x86_DEBUG_STATE;
+ tstate[4] = x86_AVX_STATE;
+ tstate[5] = x86_AVX512_STATE;
+
+ *count = 6;
+ break;
+ }
+
+ case THREAD_STATE_FLAVOR_LIST_10_15:
+ {
+ if (*count < 7) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ tstate[0] = x86_THREAD_STATE;
+ tstate[1] = x86_FLOAT_STATE;
+ tstate[2] = x86_EXCEPTION_STATE;
+ tstate[3] = x86_DEBUG_STATE;
+ tstate[4] = x86_AVX_STATE;
+ tstate[5] = x86_AVX512_STATE;
+ tstate[6] = x86_PAGEIN_STATE;
+
+ *count = 7;
+ break;
+ }
+
+ case x86_SAVED_STATE32:
+ {
+ x86_saved_state32_t *state;
+ x86_saved_state32_t *saved_state;
+
+ if (*count < x86_SAVED_STATE32_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ state = (x86_saved_state32_t *) tstate;
+ saved_state = USER_REGS32(thr_act);
+
+ /*
+ * First, copy everything:
+ */
+ *state = *saved_state;
+ state->ds = saved_state->ds & 0xffff;
+ state->es = saved_state->es & 0xffff;
+ state->fs = saved_state->fs & 0xffff;
+ state->gs = saved_state->gs & 0xffff;
+
+ *count = x86_SAVED_STATE32_COUNT;
+ break;
+ }
+
+ case x86_SAVED_STATE64:
+ {
+ x86_saved_state64_t *state;
+ x86_saved_state64_t *saved_state;
+
+ if (*count < x86_SAVED_STATE64_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (!thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ state = (x86_saved_state64_t *)tstate;
+ saved_state = USER_REGS64(thr_act);
+
+ /*
+ * First, copy everything:
+ */
+ *state = *saved_state;
+ state->ds = saved_state->ds & 0xffff;
+ state->es = saved_state->es & 0xffff;
+ state->fs = saved_state->fs & 0xffff;
+ state->gs = saved_state->gs & 0xffff;
+
+ *count = x86_SAVED_STATE64_COUNT;
+ break;
+ }
+
+ case x86_FLOAT_STATE32:
+ {
+ if (*count < x86_FLOAT_STATE32_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ *count = x86_FLOAT_STATE32_COUNT;
+
+ return fpu_get_fxstate(thr_act, tstate, flavor);
+ }
+
+ case x86_FLOAT_STATE64:
+ {
+ if (*count < x86_FLOAT_STATE64_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (!thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ *count = x86_FLOAT_STATE64_COUNT;
+
+ return fpu_get_fxstate(thr_act, tstate, flavor);
+ }
+
+ case x86_FLOAT_STATE:
+ {
+ x86_float_state_t *state;
+ kern_return_t kret;
+
+ if (*count < x86_FLOAT_STATE_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ state = (x86_float_state_t *)tstate;
+
+ /*
+ * no need to bzero... currently
+ * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
+ */
+ if (thread_is_64bit_addr(thr_act)) {
+ state->fsh.flavor = x86_FLOAT_STATE64;
+ state->fsh.count = x86_FLOAT_STATE64_COUNT;
+
+ kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
+ } else {
+ state->fsh.flavor = x86_FLOAT_STATE32;
+ state->fsh.count = x86_FLOAT_STATE32_COUNT;
+
+ kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
+ }
+ *count = x86_FLOAT_STATE_COUNT;
+
+ return kret;
+ }
+
+ case x86_AVX_STATE32:
+ case x86_AVX512_STATE32:
+ {
+ if (*count != _MachineStateCount[flavor]) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ *count = _MachineStateCount[flavor];
+
+ return fpu_get_fxstate(thr_act, tstate, flavor);
+ }
+
+ case x86_AVX_STATE64:
+ case x86_AVX512_STATE64:
+ {
+ if (*count != _MachineStateCount[flavor]) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (!thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ *count = _MachineStateCount[flavor];
+
+ return fpu_get_fxstate(thr_act, tstate, flavor);
+ }
+
+ case x86_AVX_STATE:
+ case x86_AVX512_STATE:
+ {
+ x86_avx_state_t *state;
+ thread_state_t fstate;
+
+ if (*count < _MachineStateCount[flavor]) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ *count = _MachineStateCount[flavor];
+ state = (x86_avx_state_t *)tstate;
+
+ bzero((char *)state, *count * sizeof(int));
+
+ if (thread_is_64bit_addr(thr_act)) {
+ flavor -= 1; /* 64-bit flavor */
+ fstate = (thread_state_t) &state->ufs.as64;
+ } else {
+ flavor -= 2; /* 32-bit flavor */
+ fstate = (thread_state_t) &state->ufs.as32;
+ }
+ state->ash.flavor = flavor;
+ state->ash.count = _MachineStateCount[flavor];
+
+ return fpu_get_fxstate(thr_act, fstate, flavor);
+ }
+
+ case x86_THREAD_STATE32:
+ {
+ if (*count < x86_THREAD_STATE32_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ *count = x86_THREAD_STATE32_COUNT;
+
+ get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
+ break;
+ }
+
+ case x86_THREAD_STATE64:
+ {
+ if (*count < x86_THREAD_STATE64_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (!thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ *count = x86_THREAD_STATE64_COUNT;
+
+ get_thread_state64(thr_act, tstate, FALSE);
+ break;
+ }
+
+ case x86_THREAD_FULL_STATE64:
+ {
+ if (*count < x86_THREAD_FULL_STATE64_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (!thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /* If this process does not have a custom LDT, return failure */
+ if (thr_act->task->i386_ldt == 0) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ *count = x86_THREAD_FULL_STATE64_COUNT;
+
+ get_thread_state64(thr_act, tstate, TRUE);
+ break;
+ }
+
+ case x86_THREAD_STATE:
+ {
+ x86_thread_state_t *state;
+
+ if (*count < x86_THREAD_STATE_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ state = (x86_thread_state_t *)tstate;
+
+ bzero((char *)state, sizeof(x86_thread_state_t));
+
+ if (thread_is_64bit_addr(thr_act)) {
+ state->tsh.flavor = x86_THREAD_STATE64;
+ state->tsh.count = x86_THREAD_STATE64_COUNT;
+
+ get_thread_state64(thr_act, &state->uts.ts64, FALSE);
+ } else {
+ state->tsh.flavor = x86_THREAD_STATE32;
+ state->tsh.count = x86_THREAD_STATE32_COUNT;
+
+ get_thread_state32(thr_act, &state->uts.ts32);
+ }
+ *count = x86_THREAD_STATE_COUNT;
+
+ break;
+ }
+
+
+ case x86_EXCEPTION_STATE32:
+ {
+ if (*count < x86_EXCEPTION_STATE32_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ *count = x86_EXCEPTION_STATE32_COUNT;
+
+ get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
+ /*
+ * Suppress the cpu number for binary compatibility
+ * of this deprecated state.
+ */
+ ((x86_exception_state32_t *)tstate)->cpu = 0;
+ break;
+ }
+
+ case x86_EXCEPTION_STATE64:
+ {
+ if (*count < x86_EXCEPTION_STATE64_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (!thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ *count = x86_EXCEPTION_STATE64_COUNT;
+
+ get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
+ /*
+ * Suppress the cpu number for binary compatibility
+ * of this deprecated state.
+ */
+ ((x86_exception_state64_t *)tstate)->cpu = 0;
+ break;
+ }
+
+ case x86_EXCEPTION_STATE:
+ {
+ x86_exception_state_t *state;
+
+ if (*count < x86_EXCEPTION_STATE_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ state = (x86_exception_state_t *)tstate;
+
+ bzero((char *)state, sizeof(x86_exception_state_t));
+
+ if (thread_is_64bit_addr(thr_act)) {
+ state->esh.flavor = x86_EXCEPTION_STATE64;
+ state->esh.count = x86_EXCEPTION_STATE64_COUNT;
+
+ get_exception_state64(thr_act, &state->ues.es64);
+ } else {
+ state->esh.flavor = x86_EXCEPTION_STATE32;
+ state->esh.count = x86_EXCEPTION_STATE32_COUNT;
+
+ get_exception_state32(thr_act, &state->ues.es32);
+ }
+ *count = x86_EXCEPTION_STATE_COUNT;
+
+ break;
+ }
+ case x86_DEBUG_STATE32:
+ {
+ if (*count < x86_DEBUG_STATE32_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
+
+ *count = x86_DEBUG_STATE32_COUNT;
+
+ break;
+ }
+ case x86_DEBUG_STATE64:
+ {
+ if (*count < x86_DEBUG_STATE64_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (!thread_is_64bit_addr(thr_act)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
+
+ *count = x86_DEBUG_STATE64_COUNT;
+
+ break;
+ }
+ case x86_DEBUG_STATE:
+ {
+ x86_debug_state_t *state;