X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/b0d623f7f2ae71ed96e60569f61f9a9a27016e80..e8c3f78193f1895ea514044358b93b1add9322f3:/bsd/dev/i386/unix_signal.c diff --git a/bsd/dev/i386/unix_signal.c b/bsd/dev/i386/unix_signal.c index eb96e879e..88e615b8b 100644 --- a/bsd/dev/i386/unix_signal.c +++ b/bsd/dev/i386/unix_signal.c @@ -46,6 +46,9 @@ #include #include #include + +#include + #include /* for thread_abort_safely */ #include @@ -53,17 +56,16 @@ #include #include #include +#include -#include +#include +#include #include - /* Forward: */ -extern boolean_t machine_exception(int, mach_exception_code_t, - mach_exception_subcode_t, int *, mach_exception_subcode_t *); -extern kern_return_t thread_getstatus(register thread_t act, int flavor, +extern kern_return_t thread_getstatus(thread_t act, int flavor, thread_state_t tstate, mach_msg_type_number_t *count); extern kern_return_t thread_setstatus(thread_t thread, int flavor, thread_state_t tstate, mach_msg_type_number_t count); @@ -98,13 +100,37 @@ struct sigframe32 { int sig; user32_addr_t sinfo; /* siginfo32_t* */ user32_addr_t uctx; /* struct ucontext32 */ + user32_addr_t token; +}; + +/* + * Declare table of structure flavors and sizes for 64-bit and 32-bit processes + * for the cases of extended states (plain FP, or AVX): + */ +typedef struct { + int flavor; natural_t state_count; size_t mcontext_size; +} xstate_info_t; +static const xstate_info_t thread_state64[] = { + [FP] = { x86_FLOAT_STATE64, x86_FLOAT_STATE64_COUNT, sizeof(struct mcontext64) }, + [AVX] = { x86_AVX_STATE64, x86_AVX_STATE64_COUNT, sizeof(struct mcontext_avx64) }, +#if !defined(RC_HIDE_XNU_J137) + [AVX512] = { x86_AVX512_STATE64, x86_AVX512_STATE64_COUNT, sizeof(struct mcontext_avx512_64) } +#endif +}; +static const xstate_info_t thread_state32[] = { + [FP] = { x86_FLOAT_STATE32, x86_FLOAT_STATE32_COUNT, sizeof(struct mcontext32) }, + [AVX] = { x86_AVX_STATE32, x86_AVX_STATE32_COUNT, sizeof(struct mcontext_avx32) }, +#if !defined(RC_HIDE_XNU_J137) + [AVX512] = { x86_AVX512_STATE32, x86_AVX512_STATE32_COUNT, sizeof(struct mcontext_avx512_32) } +#endif }; /* * NOTE: Source and target may *NOT* overlap! + * XXX: Unify with bsd/kern/kern_exit.c */ static void -siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out) +siginfo_user_to_user32_x86(user_siginfo_t *in, user32_siginfo_t *out) { out->si_signo = in->si_signo; out->si_errno = in->si_errno; @@ -120,7 +146,7 @@ siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out) } static void -siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out) +siginfo_user_to_user64_x86(user_siginfo_t *in, user64_siginfo_t *out) { out->si_signo = in->si_signo; out->si_errno = in->si_errno; @@ -137,10 +163,15 @@ siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out) void sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint32_t code) { - union { - struct mcontext32 mctx32; - struct mcontext64 mctx64; - } mctx; + union { + struct mcontext_avx32 mctx_avx32; + struct mcontext_avx64 mctx_avx64; +#if !defined(RC_HIDE_XNU_J137) + struct mcontext_avx512_32 mctx_avx512_32; + struct mcontext_avx512_64 mctx_avx512_64; +#endif + } mctx_store, *mctxp = &mctx_store; + user_addr_t ua_sp; user_addr_t ua_fp; user_addr_t ua_cr2; @@ -160,7 +191,10 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint struct uthread * ut; int stack_size = 0; int infostyle = UC_TRAD; - + xstate_t sig_xstate; + user_addr_t token_uctx; + kern_return_t kr; + thread = current_thread(); ut = get_bsdthread_info(thread); @@ -178,31 +212,35 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint bzero((caddr_t)&sinfo64, sizeof(sinfo64)); sinfo64.si_signo = sig; - + + bzero(mctxp, sizeof(*mctxp)); + + sig_xstate = current_xstate(); if (proc_is64bit(p)) { x86_thread_state64_t *tstate64; struct user_ucontext64 uctx64; + user64_addr_t token; flavor = x86_THREAD_STATE64; state_count = x86_THREAD_STATE64_COUNT; - state = (void *)&mctx.mctx64.ss; + state = (void *)&mctxp->mctx_avx64.ss; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; - flavor = x86_FLOAT_STATE64; - state_count = x86_FLOAT_STATE64_COUNT; - state = (void *)&mctx.mctx64.fs; + flavor = thread_state64[sig_xstate].flavor; + state_count = thread_state64[sig_xstate].state_count; + state = (void *)&mctxp->mctx_avx64.fs; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; flavor = x86_EXCEPTION_STATE64; state_count = x86_EXCEPTION_STATE64_COUNT; - state = (void *)&mctx.mctx64.es; + state = (void *)&mctxp->mctx_avx64.es; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; - tstate64 = &mctx.mctx64.ss; + tstate64 = &mctxp->mctx_avx64.ss; /* figure out where our new stack lives */ if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && @@ -214,7 +252,7 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint } else { ua_sp = tstate64->rsp; } - ua_cr2 = mctx.mctx64.es.faultvaddr; + ua_cr2 = mctxp->mctx_avx64.es.faultvaddr; /* The x86_64 ABI defines a 128-byte red zone. */ ua_sp -= C_64_REDZONE_LEN; @@ -225,7 +263,7 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint ua_sp -= sizeof (user64_siginfo_t); ua_sip = ua_sp; - ua_sp -= sizeof (struct mcontext64); + ua_sp -= thread_state64[sig_xstate].mcontext_size; ua_mctxp = ua_sp; /* @@ -240,6 +278,14 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint */ ua_fp -= sizeof(user_addr_t); + /* + * Generate the validation token for sigreturn + */ + token_uctx = ua_uctxp; + kr = machine_thread_siguctx_pointer_convert_to_user(thread, &token_uctx); + assert(kr == KERN_SUCCESS); + token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token; + /* * Build the signal context to be used by sigreturn. */ @@ -254,13 +300,13 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint uctx64.uc_stack.ss_flags |= SS_ONSTACK; uctx64.uc_link = 0; - uctx64.uc_mcsize = sizeof(struct mcontext64); + uctx64.uc_mcsize = thread_state64[sig_xstate].mcontext_size; uctx64.uc_mcontext64 = ua_mctxp; if (copyout((caddr_t)&uctx64, ua_uctxp, sizeof (uctx64))) goto bad; - if (copyout((caddr_t)&mctx.mctx64, ua_mctxp, sizeof (struct mcontext64))) + if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state64[sig_xstate].mcontext_size)) goto bad; sinfo64.pad[0] = tstate64->rsp; @@ -285,31 +331,32 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint tstate64->rdx = sig; tstate64->rcx = ua_sip; tstate64->r8 = ua_uctxp; - + tstate64->r9 = token; } else { x86_thread_state32_t *tstate32; struct user_ucontext32 uctx32; struct sigframe32 frame32; + user32_addr_t token; flavor = x86_THREAD_STATE32; state_count = x86_THREAD_STATE32_COUNT; - state = (void *)&mctx.mctx32.ss; + state = (void *)&mctxp->mctx_avx32.ss; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; - flavor = x86_FLOAT_STATE32; - state_count = x86_FLOAT_STATE32_COUNT; - state = (void *)&mctx.mctx32.fs; + flavor = thread_state32[sig_xstate].flavor; + state_count = thread_state32[sig_xstate].state_count; + state = (void *)&mctxp->mctx_avx32.fs; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; flavor = x86_EXCEPTION_STATE32; state_count = x86_EXCEPTION_STATE32_COUNT; - state = (void *)&mctx.mctx32.es; + state = (void *)&mctxp->mctx_avx32.es; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; - tstate32 = &mctx.mctx32.ss; + tstate32 = &mctxp->mctx_avx32.ss; /* figure out where our new stack lives */ if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && @@ -321,7 +368,7 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint } else { ua_sp = tstate32->esp; } - ua_cr2 = mctx.mctx32.es.faultvaddr; + ua_cr2 = mctxp->mctx_avx32.es.faultvaddr; ua_sp -= sizeof (struct user_ucontext32); ua_uctxp = ua_sp; // someone tramples the first word! @@ -329,7 +376,7 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint ua_sp -= sizeof (user32_siginfo_t); ua_sip = ua_sp; - ua_sp -= sizeof (struct mcontext32); + ua_sp -= thread_state32[sig_xstate].mcontext_size; ua_mctxp = ua_sp; ua_sp -= sizeof (struct sigframe32); @@ -347,6 +394,15 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint */ ua_fp -= sizeof(frame32.retaddr); + /* + * Generate the validation token for sigreturn + */ + token_uctx = ua_uctxp; + kr = machine_thread_siguctx_pointer_convert_to_user(thread, &token_uctx); + assert(kr == KERN_SUCCESS); + token = CAST_DOWN_EXPLICIT(user32_addr_t, token_uctx) ^ + CAST_DOWN_EXPLICIT(user32_addr_t, ps->ps_sigreturn_token); + /* * Build the argument list for the signal handler. * Handler should call sigreturn to get out of it @@ -357,6 +413,7 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint frame32.catcher = CAST_DOWN_EXPLICIT(user32_addr_t, ua_catcher); frame32.sinfo = CAST_DOWN_EXPLICIT(user32_addr_t, ua_sip); frame32.uctx = CAST_DOWN_EXPLICIT(user32_addr_t, ua_uctxp); + frame32.token = token; if (copyout((caddr_t)&frame32, ua_fp, sizeof (frame32))) goto bad; @@ -375,14 +432,14 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint uctx32.uc_stack.ss_flags |= SS_ONSTACK; uctx32.uc_link = 0; - uctx32.uc_mcsize = sizeof(struct mcontext32); + uctx32.uc_mcsize = thread_state64[sig_xstate].mcontext_size; uctx32.uc_mcontext = CAST_DOWN_EXPLICIT(user32_addr_t, ua_mctxp); if (copyout((caddr_t)&uctx32, ua_uctxp, sizeof (uctx32))) goto bad; - if (copyout((caddr_t)&mctx.mctx32, ua_mctxp, sizeof (struct mcontext32))) + if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state32[sig_xstate].mcontext_size)) goto bad; sinfo64.pad[0] = tstate32->esp; @@ -495,7 +552,8 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint * in the siginfo to the handler is supposed to only * contain the status, so we have to shift it out. */ - sinfo64.si_status = WEXITSTATUS(status_and_exitcode); + sinfo64.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000); + p->p_xhighbits = 0; break; } } @@ -504,7 +562,7 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint bzero((caddr_t)&sinfo64_user64, sizeof(sinfo64_user64)); - siginfo_user_to_user64(&sinfo64,&sinfo64_user64); + siginfo_user_to_user64_x86(&sinfo64,&sinfo64_user64); #if CONFIG_DTRACE bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo)); @@ -536,14 +594,14 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint flavor = x86_THREAD_STATE64; state_count = x86_THREAD_STATE64_COUNT; - state = (void *)&mctx.mctx64.ss; + state = (void *)&mctxp->mctx_avx64.ss; } else { x86_thread_state32_t *tstate32; user32_siginfo_t sinfo32; bzero((caddr_t)&sinfo32, sizeof(sinfo32)); - siginfo_user_to_user32(&sinfo64,&sinfo32); + siginfo_user_to_user32_x86(&sinfo64,&sinfo32); #if CONFIG_DTRACE bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo)); @@ -571,7 +629,7 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint if (copyout((caddr_t)&sinfo32, ua_sip, sizeof (sinfo32))) goto bad; - tstate32 = &mctx.mctx32.ss; + tstate32 = &mctxp->mctx_avx32.ss; tstate32->eip = CAST_DOWN_EXPLICIT(user32_addr_t, trampact); tstate32->esp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp); @@ -593,12 +651,15 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint goto bad; ml_fp_setvalid(FALSE); + /* Tell the PAL layer about the signal */ + pal_set_signal_delivery( thread ); proc_lock(p); return; bad: + proc_lock(p); SIGACTION(p, SIGILL) = SIG_DFL; sig = sigmask(SIGILL); @@ -626,12 +687,18 @@ bad: int sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) { - union { - struct mcontext32 mctx32; - struct mcontext64 mctx64; - } mctx; + union { + struct mcontext_avx32 mctx_avx32; + struct mcontext_avx64 mctx_avx64; +#if !defined(RC_HIDE_XNU_J137) + struct mcontext_avx512_32 mctx_avx512_32; + struct mcontext_avx512_64 mctx_avx512_64; +#endif + } mctx_store, *mctxp = &mctx_store; + thread_t thread = current_thread(); struct uthread * ut; + struct sigacts *ps = p->p_sigacts; int error; int onstack = 0; @@ -641,6 +708,11 @@ sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) mach_msg_type_number_t fs_count; unsigned int fs_flavor; void * fs; + int rval = EJUSTRETURN; + xstate_t sig_xstate; + uint32_t sigreturn_validation; + user_addr_t token_uctx; + kern_return_t kr; ut = (struct uthread *)get_bsdthread_info(thread); @@ -656,33 +728,55 @@ sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) return (0); } + bzero(mctxp, sizeof(*mctxp)); + + sig_xstate = current_xstate(); + + sigreturn_validation = atomic_load_explicit( + &ps->ps_sigreturn_validation, memory_order_relaxed); + token_uctx = uap->uctx; + kr = machine_thread_siguctx_pointer_convert_to_user(thread, &token_uctx); + assert(kr == KERN_SUCCESS); + if (proc_is64bit(p)) { struct user_ucontext64 uctx64; + user64_addr_t token; if ((error = copyin(uap->uctx, (void *)&uctx64, sizeof (uctx64)))) return(error); - if ((error = copyin(uctx64.uc_mcontext64, (void *)&mctx.mctx64, sizeof (struct mcontext64)))) + if ((error = copyin(uctx64.uc_mcontext64, (void *)mctxp, thread_state64[sig_xstate].mcontext_size))) return(error); onstack = uctx64.uc_onstack & 01; ut->uu_sigmask = uctx64.uc_sigmask & ~sigcantmask; - ts_flavor = x86_THREAD_STATE64; + ts_flavor = x86_THREAD_STATE64; ts_count = x86_THREAD_STATE64_COUNT; - ts = (void *)&mctx.mctx64.ss; - - fs_flavor = x86_FLOAT_STATE64; - fs_count = x86_FLOAT_STATE64_COUNT; - fs = (void *)&mctx.mctx64.fs; - + ts = (void *)&mctxp->mctx_avx64.ss; + + fs_flavor = thread_state64[sig_xstate].flavor; + fs_count = thread_state64[sig_xstate].state_count; + fs = (void *)&mctxp->mctx_avx64.fs; + + token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token; + if ((user64_addr_t)uap->token != token) { +#if DEVELOPMENT || DEBUG + printf("process %s[%d] sigreturn token mismatch: received 0x%llx expected 0x%llx\n", + p->p_comm, p->p_pid, (user64_addr_t)uap->token, token); +#endif /* DEVELOPMENT || DEBUG */ + if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) { + rval = EINVAL; + } + } } else { struct user_ucontext32 uctx32; + user32_addr_t token; if ((error = copyin(uap->uctx, (void *)&uctx32, sizeof (uctx32)))) return(error); - if ((error = copyin(CAST_USER_ADDR_T(uctx32.uc_mcontext), (void *)&mctx.mctx32, sizeof (struct mcontext32)))) + if ((error = copyin(CAST_USER_ADDR_T(uctx32.uc_mcontext), (void *)mctxp, thread_state32[sig_xstate].mcontext_size))) return(error); onstack = uctx32.uc_onstack & 01; @@ -690,11 +784,23 @@ sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) ts_flavor = x86_THREAD_STATE32; ts_count = x86_THREAD_STATE32_COUNT; - ts = (void *)&mctx.mctx32.ss; - - fs_flavor = x86_FLOAT_STATE32; - fs_count = x86_FLOAT_STATE32_COUNT; - fs = (void *)&mctx.mctx32.fs; + ts = (void *)&mctxp->mctx_avx32.ss; + + fs_flavor = thread_state32[sig_xstate].flavor; + fs_count = thread_state32[sig_xstate].state_count; + fs = (void *)&mctxp->mctx_avx32.fs; + + token = CAST_DOWN_EXPLICIT(user32_addr_t, uap->uctx) ^ + CAST_DOWN_EXPLICIT(user32_addr_t, ps->ps_sigreturn_token); + if ((user32_addr_t)uap->token != token) { +#if DEVELOPMENT || DEBUG + printf("process %s[%d] sigreturn token mismatch: received 0x%x expected 0x%x\n", + p->p_comm, p->p_pid, (user32_addr_t)uap->token, token); +#endif /* DEVELOPMENT || DEBUG */ + if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) { + rval = EINVAL; + } + } } if (onstack) @@ -705,72 +811,73 @@ sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) if (ut->uu_siglist & ~ut->uu_sigmask) signal_setast(thread); + if (rval == EINVAL) { + goto error_ret; + } + /* * thread_set_state() does all the needed checks for the passed in * content */ - if (thread_setstatus(thread, ts_flavor, ts, ts_count) != KERN_SUCCESS) - return(EINVAL); - + if (thread_setstatus(thread, ts_flavor, ts, ts_count) != KERN_SUCCESS) { + rval = EINVAL; +#if DEVELOPMENT || DEBUG + printf("process %s[%d] sigreturn thread_setstatus error %d\n", + p->p_comm, p->p_pid, rval); +#endif /* DEVELOPMENT || DEBUG */ + goto error_ret; + } + ml_fp_setvalid(TRUE); - if (thread_setstatus(thread, fs_flavor, fs, fs_count) != KERN_SUCCESS) - return(EINVAL); + if (thread_setstatus(thread, fs_flavor, fs, fs_count) != KERN_SUCCESS) { + rval = EINVAL; +#if DEVELOPMENT || DEBUG + printf("process %s[%d] sigreturn thread_setstatus error %d\n", + p->p_comm, p->p_pid, rval); +#endif /* DEVELOPMENT || DEBUG */ + goto error_ret; - return (EJUSTRETURN); + } +error_ret: + return rval; } /* - * machine_exception() performs MD translation - * of a mach exception to a unix signal and code. + * machine_exception() performs machine-dependent translation + * of a mach exception to a unix signal. */ - -boolean_t -machine_exception( - int exception, - mach_exception_code_t code, - __unused mach_exception_subcode_t subcode, - int *unix_signal, - mach_exception_code_t *unix_code) +int +machine_exception(int exception, + mach_exception_code_t code, + __unused mach_exception_subcode_t subcode) { - switch(exception) { - - case EXC_BAD_ACCESS: - /* Map GP fault to SIGSEGV, otherwise defer to caller */ - if (code == EXC_I386_GPFLT) { - *unix_signal = SIGSEGV; - *unix_code = code; + case EXC_BAD_ACCESS: + /* Map GP fault to SIGSEGV, otherwise defer to caller */ + if (code == EXC_I386_GPFLT) { + return SIGSEGV; + } break; - } - return(FALSE); - case EXC_BAD_INSTRUCTION: - *unix_signal = SIGILL; - *unix_code = code; - break; + case EXC_BAD_INSTRUCTION: + return SIGILL; - case EXC_ARITHMETIC: - *unix_signal = SIGFPE; - *unix_code = code; - break; + case EXC_ARITHMETIC: + return SIGFPE; - case EXC_SOFTWARE: - if (code == EXC_I386_BOUND) { - /* - * Map #BR, the Bound Range Exceeded exception, to - * SIGTRAP. - */ - *unix_signal = SIGTRAP; - *unix_code = code; + case EXC_SOFTWARE: + if (code == EXC_I386_BOUND) { + /* + * Map #BR, the Bound Range Exceeded exception, to + * SIGTRAP. + */ + return SIGTRAP; + } break; - } - - default: - return(FALSE); } - - return(TRUE); + + return 0; }