]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/bsd_i386.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
index 4b933d763f49aa080553c8bb5d4cf7ed558fbb84..805cbc1de6dc51d6c121ae03c7b108de1794ba16 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -26,7 +26,6 @@
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 #ifdef MACH_BSD
-#include <mach_rt.h>
 #include <mach_debug.h>
 #include <mach_ldebug.h>
 
 
 #ifdef MACH_BSD
 extern void    mach_kauth_cred_uthread_update(void);
+extern void throttle_lowpri_io(int);
 #endif
 
-kern_return_t
-thread_userstack(
-    thread_t,
-    int,
-    thread_state_t,
-    unsigned int,
-    mach_vm_offset_t *,
-       int *
-);
-
-kern_return_t
-thread_entrypoint(
-    thread_t,
-    int,
-    thread_state_t,
-    unsigned int,
-    mach_vm_offset_t *
-); 
-
 void * find_user_regs(thread_t);
 
 unsigned int get_msr_exportmask(void);
@@ -100,9 +81,6 @@ unsigned int get_msr_nbits(void);
 
 unsigned int get_msr_rbits(void);
 
-extern void throttle_lowpri_io(boolean_t);
-
-
 /*
  * thread_userstack:
  *
@@ -115,8 +93,9 @@ thread_userstack(
     int                 flavor,
     thread_state_t      tstate,
     __unused unsigned int        count,
-    user_addr_t    *user_stack,
-       int                                     *customstack
+    mach_vm_offset_t    *user_stack,
+    int                 *customstack,
+    __unused boolean_t  is64bit
 )
 {
        if (customstack)
@@ -129,14 +108,15 @@ thread_userstack(
 
                        state25 = (x86_thread_state32_t *) tstate;
 
-                       if (state25->esp)
+                       if (state25->esp) {
                                *user_stack = state25->esp;
-                       else 
+                               if (customstack)
+                                       *customstack = 1;
+                       } else {
                                *user_stack = VM_USRSTACK32;
-                       if (customstack && state25->esp)
-                               *customstack = 1;
-                       else
-                               *customstack = 0;
+                               if (customstack)
+                                       *customstack = 0;
+                       }
                        break;
                }
 
@@ -146,14 +126,15 @@ thread_userstack(
 
                        state25 = (x86_thread_state64_t *) tstate;
 
-                       if (state25->rsp)
+                       if (state25->rsp) {
                                *user_stack = state25->rsp;
-                       else 
+                               if (customstack)
+                                       *customstack = 1;
+                       } else {
                                *user_stack = VM_USRSTACK64;
-                       if (customstack && state25->rsp)
-                               *customstack = 1;
-                       else
-                               *customstack = 0;
+                               if (customstack)
+                                       *customstack = 0;
+                       }
                        break;
                }
 
@@ -164,6 +145,24 @@ thread_userstack(
        return (KERN_SUCCESS);
 }
 
+/*
+ * thread_userstackdefault:
+ *
+ * Return the default stack location for the
+ * thread, if otherwise unknown.
+ */
+kern_return_t
+thread_userstackdefault(
+       mach_vm_offset_t *default_user_stack,
+       boolean_t is64bit)
+{
+       if (is64bit) {
+               *default_user_stack = VM_USRSTACK64;
+       } else {
+               *default_user_stack = VM_USRSTACK32;
+       }
+       return (KERN_SUCCESS);
+}
 
 kern_return_t
 thread_entrypoint(
@@ -202,62 +201,6 @@ thread_entrypoint(
        return (KERN_SUCCESS);
 }
 
-/*
- * Duplicate parent state in child
- * for U**X fork.
- */
-kern_return_t
-machine_thread_dup(
-    thread_t           parent,
-    thread_t           child
-)
-{
-       
-       pcb_t           parent_pcb;
-       pcb_t           child_pcb;
-
-       if ((child_pcb = child->machine.pcb) == NULL ||
-           (parent_pcb = parent->machine.pcb) == NULL)
-               return (KERN_FAILURE);
-       /*
-        * Copy over the x86_saved_state registers
-        */
-       if (cpu_mode_is64bit()) {
-               if (thread_is_64bit(parent))
-                       bcopy(USER_REGS64(parent), USER_REGS64(child), sizeof(x86_saved_state64_t));
-               else
-                       bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state_compat32_t));
-       } else
-               bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state32_t));
-
-       /*
-        * Check to see if parent is using floating point
-        * and if so, copy the registers to the child
-        */
-       fpu_dup_fxstate(parent, child);
-
-#ifdef MACH_BSD
-       /*
-        * Copy the parent's cthread id and USER_CTHREAD descriptor, if 32-bit.
-        */
-       child_pcb->cthread_self = parent_pcb->cthread_self;
-       if (!thread_is_64bit(parent))
-               child_pcb->cthread_desc = parent_pcb->cthread_desc;
-
-       /*
-        * FIXME - should a user specified LDT, TSS and V86 info
-        * be duplicated as well?? - probably not.
-        */
-       // duplicate any use LDT entry that was set I think this is appropriate.
-        if (parent_pcb->uldt_selector!= 0) {
-               child_pcb->uldt_selector = parent_pcb->uldt_selector;
-               child_pcb->uldt_desc = parent_pcb->uldt_desc;
-       }
-#endif
-
-       return (KERN_SUCCESS);
-}
-
 /* 
  * FIXME - thread_set_child
  */
@@ -266,8 +209,9 @@ void thread_set_child(thread_t child, int pid);
 void
 thread_set_child(thread_t child, int pid)
 {
+       pal_register_cache_state(child, DIRTY);
 
-       if (thread_is_64bit(child)) {
+       if (thread_is_64bit_addr(child)) {
                x86_saved_state64_t     *iss64;
 
                iss64 = USER_REGS64(child);
@@ -287,31 +231,6 @@ thread_set_child(thread_t child, int pid)
 }
 
 
-void thread_set_parent(thread_t parent, int pid);
-
-void
-thread_set_parent(thread_t parent, int pid)
-{
-
-       if (thread_is_64bit(parent)) {
-               x86_saved_state64_t     *iss64;
-
-               iss64 = USER_REGS64(parent);
-
-               iss64->rax = pid;
-               iss64->rdx = 0;
-               iss64->isf.rflags &= ~EFL_CF;
-       } else {
-               x86_saved_state32_t     *iss32;
-
-               iss32 = USER_REGS32(parent);
-
-               iss32->eax = pid;
-               iss32->edx = 0;
-               iss32->efl &= ~EFL_CF;
-       }
-}
-
 
 /*
  * System Call handling code
@@ -319,15 +238,14 @@ thread_set_parent(thread_t parent, int pid)
 
 extern long fuword(vm_offset_t);
 
-
-
+__attribute__((noreturn))
 void
 machdep_syscall(x86_saved_state_t *state)
 {
        int                     args[machdep_call_count];
        int                     trapno;
        int                     nargs;
-       machdep_call_t          *entry;
+       const machdep_call_t    *entry;
        x86_saved_state32_t     *regs;
 
        assert(is_saved_state32(state));
@@ -393,23 +311,27 @@ machdep_syscall(x86_saved_state_t *state)
        default:
                panic("machdep_syscall: too many args");
        }
-       if (current_thread()->funnel_lock)
-               (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
 
        DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n", regs->eax);
 
-       throttle_lowpri_io(TRUE);
+#if DEBUG || DEVELOPMENT
+       kern_allocation_name_t
+       prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
+       assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
+#endif /* DEBUG || DEVELOPMENT */
+
+       throttle_lowpri_io(1);
 
        thread_exception_return();
        /* NOTREACHED */
 }
 
-
+__attribute__((noreturn))
 void
 machdep_syscall64(x86_saved_state_t *state)
 {
        int                     trapno;
-       machdep_call_t          *entry;
+       const machdep_call_t    *entry;
        x86_saved_state64_t     *regs;
 
        assert(is_saved_state64(state));
@@ -435,156 +357,27 @@ machdep_syscall64(x86_saved_state_t *state)
        case 1:
                regs->rax = (*entry->routine.args64_1)(regs->rdi);
                break;
+       case 2:
+               regs->rax = (*entry->routine.args64_2)(regs->rdi, regs->rsi);
+               break;
        default:
                panic("machdep_syscall64: too many args");
        }
-       if (current_thread()->funnel_lock)
-               (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
 
        DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs->rax);
 
-       throttle_lowpri_io(TRUE);
+#if DEBUG || DEVELOPMENT
+       kern_allocation_name_t
+       prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
+       assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
+#endif /* DEBUG || DEVELOPMENT */
+
+       throttle_lowpri_io(1);
 
        thread_exception_return();
        /* NOTREACHED */
 }
 
-/*
- * thread_fast_set_cthread_self: Sets the machine kernel thread ID of the
- * current thread to the given thread ID; fast version for 32-bit processes
- *
- * Parameters:    self                    Thread ID to set
- *                
- * Returns:        0                      Success
- *                !0                      Not success
- */
-kern_return_t
-thread_fast_set_cthread_self(uint32_t self)
-{
-       thread_t thread = current_thread();
-       pcb_t pcb = thread->machine.pcb;
-       struct real_descriptor desc = {
-               .limit_low = 1,
-               .limit_high = 0,
-               .base_low = self & 0xffff,
-               .base_med = (self >> 16) & 0xff,
-               .base_high = (self >> 24) & 0xff,
-               .access = ACC_P|ACC_PL_U|ACC_DATA_W,
-               .granularity = SZ_32|SZ_G,
-       };
-
-       current_thread()->machine.pcb->cthread_self = (uint64_t) self;  /* preserve old func too */
-
-       /* assign descriptor */
-       mp_disable_preemption();
-       pcb->cthread_desc = desc;
-       *ldt_desc_p(USER_CTHREAD) = desc;
-       saved_state32(pcb->iss)->gs = USER_CTHREAD;
-       mp_enable_preemption();
-
-       return (USER_CTHREAD);
-}
-
-/*
- * thread_fast_set_cthread_self64: Sets the machine kernel thread ID of the
- * current thread to the given thread ID; fast version for 64-bit processes 
- *
- * Parameters:    self                    Thread ID
- *                
- * Returns:        0                      Success
- *                !0                      Not success
- */
-kern_return_t
-thread_fast_set_cthread_self64(uint64_t self)
-{
-       pcb_t pcb = current_thread()->machine.pcb;
-       cpu_data_t              *cdp;
-
-       /* check for canonical address, set 0 otherwise  */
-       if (!IS_USERADDR64_CANONICAL(self))
-               self = 0ULL;
-
-       pcb->cthread_self = self;
-       mp_disable_preemption();
-       cdp = current_cpu_datap();
-#if defined(__x86_64__)
-       if ((cdp->cpu_uber.cu_user_gs_base != pcb->cthread_self) ||
-           (pcb->cthread_self != rdmsr64(MSR_IA32_KERNEL_GS_BASE)))
-               wrmsr64(MSR_IA32_KERNEL_GS_BASE, self);
-#endif
-       cdp->cpu_uber.cu_user_gs_base = self;
-       mp_enable_preemption();
-       return (USER_CTHREAD);
-}
-
-/*
- * thread_set_user_ldt routine is the interface for the user level
- * settable ldt entry feature.  allowing a user to create arbitrary
- * ldt entries seems to be too large of a security hole, so instead
- * this mechanism is in place to allow user level processes to have
- * an ldt entry that can be used in conjunction with the FS register.
- *
- * Swapping occurs inside the pcb.c file along with initialization
- * when a thread is created. The basic functioning theory is that the
- * pcb->uldt_selector variable will contain either 0 meaning the
- * process has not set up any entry, or the selector to be used in
- * the FS register. pcb->uldt_desc contains the actual descriptor the
- * user has set up stored in machine usable ldt format.
- *
- * Currently one entry is shared by all threads (USER_SETTABLE), but
- * this could be changed in the future by changing how this routine
- * allocates the selector. There seems to be no real reason at this
- * time to have this added feature, but in the future it might be
- * needed.
- *
- * address is the linear address of the start of the data area size
- * is the size in bytes of the area flags should always be set to 0
- * for now. in the future it could be used to set R/W permisions or
- * other functions. Currently the segment is created as a data segment
- * up to 1 megabyte in size with full read/write permisions only.
- *
- * this call returns the segment selector or -1 if any error occurs
- */
-kern_return_t
-thread_set_user_ldt(uint32_t address, uint32_t size, uint32_t flags)
-{
-       pcb_t pcb;
-       struct fake_descriptor temp;
-       int mycpu;
-
-       if (flags != 0)
-               return -1;              // flags not supported
-       if (size > 0xFFFFF)
-               return -1;              // size too big, 1 meg is the limit
-
-       mp_disable_preemption();
-       mycpu = cpu_number();
-
-       // create a "fake" descriptor so we can use fix_desc()
-       // to build a real one...
-       //   32 bit default operation size
-       //   standard read/write perms for a data segment
-       pcb = (pcb_t)current_thread()->machine.pcb;
-       temp.offset = address;
-       temp.lim_or_seg = size;
-       temp.size_or_wdct = SZ_32;
-       temp.access = ACC_P|ACC_PL_U|ACC_DATA_W;
-
-       // turn this into a real descriptor
-       fix_desc(&temp,1);
-
-       // set up our data in the pcb
-       pcb->uldt_desc = *(struct real_descriptor*)&temp;
-       pcb->uldt_selector = USER_SETTABLE;             // set the selector value
-
-       // now set it up in the current table...
-       *ldt_desc_p(USER_SETTABLE) = *(struct real_descriptor*)&temp;
-
-       mp_enable_preemption();
-
-       return USER_SETTABLE;
-}
-
 #endif /* MACH_BSD */
 
 
@@ -603,37 +396,19 @@ struct mach_call_args {
 };
 
 static kern_return_t
-mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args);
+mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp);
 
 
 static kern_return_t
-mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args)
+mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp)
 {
-       unsigned int args32[9];
-
-       if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args32, nargs * sizeof (int)))
+       if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args, trapp->mach_trap_u32_words * sizeof (int)))
                return KERN_INVALID_ARGUMENT;
-
-       switch (nargs) {
-       case 9: args->arg9 = args32[8];
-       case 8: args->arg8 = args32[7];
-       case 7: args->arg7 = args32[6];
-       case 6: args->arg6 = args32[5];
-       case 5: args->arg5 = args32[4];
-       case 4: args->arg4 = args32[3];
-       case 3: args->arg3 = args32[2];
-       case 2: args->arg2 = args32[1];
-       case 1: args->arg1 = args32[0];
-       }
-       if (call_number == 90) {
-               /* munge_l for mach_wait_until_trap() */
-               args->arg1 = (((uint64_t)(args32[0])) | ((((uint64_t)(args32[1]))<<32)));
-       }
-       if (call_number == 93) {
-               /* munge_wl for mk_timer_arm_trap() */
-               args->arg2 = (((uint64_t)(args32[1])) | ((((uint64_t)(args32[2]))<<32)));
-       }
-
+#if CONFIG_REQUIRES_U32_MUNGING
+       trapp->mach_trap_arg_munge32(args);
+#else
+#error U32 mach traps on x86_64 kernel requires munging
+#endif
        return KERN_SUCCESS;
 }
 
@@ -642,6 +417,7 @@ __private_extern__ void mach_call_munger(x86_saved_state_t *state);
 
 extern const char *mach_syscall_name_table[];
 
+__attribute__((noreturn))
 void
 mach_call_munger(x86_saved_state_t *state)
 {
@@ -652,6 +428,9 @@ mach_call_munger(x86_saved_state_t *state)
        struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
        x86_saved_state32_t     *regs;
 
+       struct uthread *ut = get_bsdthread_info(current_thread());
+       uthread_reset_proc_refcount(ut);
+
        assert(is_saved_state32(state));
        regs = saved_state32(state);
 
@@ -679,7 +458,7 @@ mach_call_munger(x86_saved_state_t *state)
 
        argc = mach_trap_table[call_number].mach_trap_arg_count;
        if (argc) {
-               retval = mach_call_arg_munger32(regs->uesp, argc, call_number, &args);
+               retval = mach_call_arg_munger32(regs->uesp, &args,  &mach_trap_table[call_number]);
                if (retval != KERN_SUCCESS) {
                        regs->eax = retval;
 
@@ -694,18 +473,34 @@ mach_call_munger(x86_saved_state_t *state)
 #ifdef MACH_BSD
        mach_kauth_cred_uthread_update();
 #endif
-       KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
-                       args.arg1, args.arg2, args.arg3, args.arg4, 0);
+
+       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+               MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
+               args.arg1, args.arg2, args.arg3, args.arg4, 0);
 
        retval = mach_call(&args);
 
        DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval);
 
-       KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
-                       retval, 0, 0, 0, 0);
+       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+               MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
+               retval, 0, 0, 0, 0);
+
        regs->eax = retval;
 
-       throttle_lowpri_io(TRUE);
+#if DEBUG || DEVELOPMENT
+       kern_allocation_name_t
+       prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
+       assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
+#endif /* DEBUG || DEVELOPMENT */
+
+       throttle_lowpri_io(1);
+
+#if PROC_REF_DEBUG
+       if (__improbable(uthread_get_proc_refcount(ut) != 0)) {
+               panic("system call returned with uu_proc_refcount != 0");
+       }
+#endif
 
        thread_exception_return();
        /* NOTREACHED */
@@ -714,14 +509,19 @@ mach_call_munger(x86_saved_state_t *state)
 
 __private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
 
+__attribute__((noreturn))
 void
 mach_call_munger64(x86_saved_state_t *state)
 {
        int call_number;
        int argc;
        mach_call_t mach_call;
+       struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
        x86_saved_state64_t     *regs;
 
+       struct uthread *ut = get_bsdthread_info(current_thread());
+       uthread_reset_proc_refcount(ut);
+
        assert(is_saved_state64(state));
        regs = saved_state64(state);
 
@@ -731,10 +531,9 @@ mach_call_munger64(x86_saved_state_t *state)
                "mach_call_munger64: code=%d(%s)\n",
                call_number, mach_syscall_name_table[call_number]);
 
-       KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,
-                                          (call_number)) | DBG_FUNC_START,
-                             regs->rdi, regs->rsi,
-                             regs->rdx, regs->r10, 0);
+       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
+               MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_START,
+               regs->rdi, regs->rsi, regs->rdx, regs->r10, 0);
        
        if (call_number < 0 || call_number >= mach_trap_count) {
                i386_exception(EXC_SYSCALL, regs->rax, 1);
@@ -747,17 +546,23 @@ mach_call_munger64(x86_saved_state_t *state)
                /* NOTREACHED */
        }
        argc = mach_trap_table[call_number].mach_trap_arg_count;
+       if (argc) {
+               int args_in_regs = MIN(6, argc);
+
+               memcpy(&args.arg1, &regs->rdi, args_in_regs * sizeof(syscall_arg_t));
 
-       if (argc > 6) {
+               if (argc > 6) {
                int copyin_count;
 
-               copyin_count = (argc - 6) * (int)sizeof(uint64_t);
+                       assert(argc <= 9);
+                       copyin_count = (argc - 6) * (int)sizeof(syscall_arg_t);
 
-               if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&regs->v_arg6, copyin_count)) {
+               if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&args.arg7, copyin_count)) {
                        regs->rax = KERN_INVALID_ARGUMENT;
                        
-                       thread_exception_return();
-                       /* NOTREACHED */
+                               thread_exception_return();
+                               /* NOTREACHED */
+                       }
                }
        }
 
@@ -765,15 +570,27 @@ mach_call_munger64(x86_saved_state_t *state)
        mach_kauth_cred_uthread_update();
 #endif
 
-       regs->rax = (uint64_t)mach_call((void *)(&regs->rdi));
+       regs->rax = (uint64_t)mach_call((void *)&args);
        
        DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs->rax);
 
-       KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,
-                                          (call_number)) | DBG_FUNC_END,
-                             regs->rax, 0, 0, 0, 0);
+       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
+               MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END, 
+               regs->rax, 0, 0, 0, 0);
 
-       throttle_lowpri_io(TRUE);
+#if DEBUG || DEVELOPMENT
+       kern_allocation_name_t
+       prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
+       assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
+#endif /* DEBUG || DEVELOPMENT */
+
+       throttle_lowpri_io(1);
+
+#if PROC_REF_DEBUG
+       if (__improbable(uthread_get_proc_refcount(ut) != 0)) {
+               panic("system call returned with uu_proc_refcount != 0");
+       }
+#endif
 
        thread_exception_return();
        /* NOTREACHED */
@@ -791,7 +608,8 @@ thread_setuserstack(
        thread_t        thread,
        mach_vm_address_t       user_stack)
 {
-       if (thread_is_64bit(thread)) {
+       pal_register_cache_state(thread, DIRTY);
+       if (thread_is_64bit_addr(thread)) {
                x86_saved_state64_t     *iss64;
 
                iss64 = USER_REGS64(thread);
@@ -817,7 +635,8 @@ thread_adjuserstack(
        thread_t        thread,
        int             adjust)
 {
-       if (thread_is_64bit(thread)) {
+       pal_register_cache_state(thread, DIRTY);
+       if (thread_is_64bit_addr(thread)) {
                x86_saved_state64_t     *iss64;
 
                iss64 = USER_REGS64(thread);
@@ -845,7 +664,8 @@ thread_adjuserstack(
 void
 thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
 {
-       if (thread_is_64bit(thread)) {
+       pal_register_cache_state(thread, DIRTY);
+       if (thread_is_64bit_addr(thread)) {
                x86_saved_state64_t     *iss64;
 
                iss64 = USER_REGS64(thread);
@@ -864,7 +684,8 @@ thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
 kern_return_t
 thread_setsinglestep(thread_t thread, int on)
 {
-       if (thread_is_64bit(thread)) {
+       pal_register_cache_state(thread, DIRTY);
+       if (thread_is_64bit_addr(thread)) {
                x86_saved_state64_t     *iss64;
 
                iss64 = USER_REGS64(thread);
@@ -891,30 +712,22 @@ thread_setsinglestep(thread_t thread, int on)
        return (KERN_SUCCESS);
 }
 
-
-
-/* XXX this should be a struct savearea so that CHUD will work better on x86 */
 void *
-find_user_regs(thread_t thread)
+get_user_regs(thread_t th)
 {
-       return USER_STATE(thread);
+       pal_register_cache_state(th, DIRTY);
+       return(USER_STATE(th));
 }
 
 void *
-get_user_regs(thread_t th)
+find_user_regs(thread_t thread)
 {
-       if (th->machine.pcb)
-               return(USER_STATE(th));
-       else {
-               printf("[get_user_regs: thread does not have pcb]");
-               return NULL;
-       }
+       return get_user_regs(thread);
 }
 
 #if CONFIG_DTRACE
 /*
  * DTrace would like to have a peek at the kernel interrupt state, if available.
- * Based on osfmk/chud/i386/chud_thread_i386.c:chudxnu_thread_get_state(), which see.
  */
 x86_saved_state_t *find_kern_regs(thread_t);