X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/4452a7af2eac33dbad800bcc91f2399d62c18f53..5ba3f43ea354af8ad55bea84372a2bc834d8757c:/bsd/dev/i386/unix_signal.c?ds=sidebyside diff --git a/bsd/dev/i386/unix_signal.c b/bsd/dev/i386/unix_signal.c index 69e3dc720..1c271607f 100644 --- a/bsd/dev/i386/unix_signal.c +++ b/bsd/dev/i386/unix_signal.c @@ -48,17 +48,23 @@ #include #include /* for thread_abort_safely */ #include -#include #include #include +#include #include +#include + +#include #include +#include + /* Forward: */ -extern boolean_t machine_exception(int, int, int, int *, int *); -extern kern_return_t thread_getstatus(register thread_t act, int flavor, +extern boolean_t machine_exception(int, mach_exception_code_t, + mach_exception_subcode_t, int *, mach_exception_subcode_t *); +extern kern_return_t thread_getstatus(thread_t act, int flavor, thread_state_t tstate, mach_msg_type_number_t *count); extern kern_return_t thread_setstatus(thread_t thread, int flavor, thread_state_t tstate, mach_msg_type_number_t count); @@ -67,6 +73,8 @@ extern kern_return_t thread_setstatus(thread_t thread, int flavor, /* These defns should match the Libc implmn */ #define UC_TRAD 1 #define UC_FLAVOR 30 +#define UC_SET_ALT_STACK 0x40000000 +#define UC_RESET_ALT_STACK 0x80000000 #define C_32_STK_ALIGN 16 #define C_64_STK_ALIGN 16 @@ -85,23 +93,83 @@ extern kern_return_t thread_setstatus(thread_t thread, int flavor, * to the user specified pc, psl. */ struct sigframe32 { - int retaddr; - sig_t catcher; - int sigstyle; - int sig; - siginfo_t * sinfo; - struct ucontext * uctx; + int retaddr; + user32_addr_t catcher; /* sig_t */ + int sigstyle; + int sig; + user32_addr_t sinfo; /* siginfo32_t* */ + user32_addr_t uctx; /* struct ucontext32 */ }; +/* + * Declare table of structure flavors and sizes for 64-bit and 32-bit processes + * for the cases of extended states (plain FP, or AVX): + */ +typedef struct { + int flavor; natural_t state_count; size_t mcontext_size; +} xstate_info_t; +static const xstate_info_t thread_state64[] = { + [FP] = { x86_FLOAT_STATE64, x86_FLOAT_STATE64_COUNT, sizeof(struct mcontext64) }, + [AVX] = { x86_AVX_STATE64, x86_AVX_STATE64_COUNT, sizeof(struct mcontext_avx64) }, +#if !defined(RC_HIDE_XNU_J137) + [AVX512] = { x86_AVX512_STATE64, x86_AVX512_STATE64_COUNT, sizeof(struct mcontext_avx512_64) } +#endif +}; +static const xstate_info_t thread_state32[] = { + [FP] = { x86_FLOAT_STATE32, x86_FLOAT_STATE32_COUNT, sizeof(struct mcontext32) }, + [AVX] = { x86_AVX_STATE32, x86_AVX_STATE32_COUNT, sizeof(struct mcontext_avx32) }, +#if !defined(RC_HIDE_XNU_J137) + [AVX512] = { x86_AVX512_STATE32, x86_AVX512_STATE32_COUNT, sizeof(struct mcontext_avx512_32) } +#endif +}; +/* + * NOTE: Source and target may *NOT* overlap! + * XXX: Unify with bsd/kern/kern_exit.c + */ +static void +siginfo_user_to_user32_x86(user_siginfo_t *in, user32_siginfo_t *out) +{ + out->si_signo = in->si_signo; + out->si_errno = in->si_errno; + out->si_code = in->si_code; + out->si_pid = in->si_pid; + out->si_uid = in->si_uid; + out->si_status = in->si_status; + out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_addr); + /* following cast works for sival_int because of padding */ + out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_value.sival_ptr); + out->si_band = in->si_band; /* range reduction */ + out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */ +} + +static void +siginfo_user_to_user64_x86(user_siginfo_t *in, user64_siginfo_t *out) +{ + out->si_signo = in->si_signo; + out->si_errno = in->si_errno; + out->si_code = in->si_code; + out->si_pid = in->si_pid; + out->si_uid = in->si_uid; + out->si_status = in->si_status; + out->si_addr = in->si_addr; + out->si_value.sival_ptr = in->si_value.sival_ptr; + out->si_band = in->si_band; /* range reduction */ + out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */ +} void -sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused u_long code) +sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint32_t code) { - union { - struct mcontext32 mctx32; - struct mcontext64 mctx64; - } mctx; + union { + struct mcontext_avx32 mctx_avx32; + struct mcontext_avx64 mctx_avx64; +#if !defined(RC_HIDE_XNU_J137) + struct mcontext_avx512_32 mctx_avx512_32; + struct mcontext_avx512_64 mctx_avx512_64; +#endif + } mctx_store, *mctxp = &mctx_store; + user_addr_t ua_sp; user_addr_t ua_fp; user_addr_t ua_cr2; @@ -112,75 +180,74 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused u_lo struct sigacts *ps = p->p_sigacts; int oonstack, flavor; + user_addr_t trampact; + int sigonstack; void * state; mach_msg_type_number_t state_count; - int uthsigaltstack = 0; - int altstack = 0; - thread_t thread = current_thread(); + thread_t thread; struct uthread * ut; int stack_size = 0; int infostyle = UC_TRAD; - - if (p->p_sigacts->ps_siginfo & sigmask(sig)) - infostyle = UC_FLAVOR; + xstate_t sig_xstate; + thread = current_thread(); ut = get_bsdthread_info(thread); - uthsigaltstack = p->p_lflag & P_LTHSIGSTACK; + if (p->p_sigacts->ps_siginfo & sigmask(sig)) + infostyle = UC_FLAVOR; + + oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK; + trampact = ps->ps_trampact[sig]; + sigonstack = (ps->ps_sigonstack & sigmask(sig)); - if (uthsigaltstack != 0 ) { - oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK; - altstack = ut->uu_flag & UT_ALTSTACK; - } else { - oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK; - altstack = ps->ps_flags & SAS_ALTSTACK; - } /* * init siginfo */ - bzero((caddr_t)&sinfo64, sizeof(user_siginfo_t)); + proc_unlock(p); + + bzero((caddr_t)&sinfo64, sizeof(sinfo64)); sinfo64.si_signo = sig; - + + bzero(mctxp, sizeof(*mctxp)); + + sig_xstate = current_xstate(); + if (proc_is64bit(p)) { x86_thread_state64_t *tstate64; struct user_ucontext64 uctx64; flavor = x86_THREAD_STATE64; state_count = x86_THREAD_STATE64_COUNT; - state = (void *)&mctx.mctx64.ss; + state = (void *)&mctxp->mctx_avx64.ss; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; - flavor = x86_FLOAT_STATE64; - state_count = x86_FLOAT_STATE64_COUNT; - state = (void *)&mctx.mctx64.fs; + flavor = thread_state64[sig_xstate].flavor; + state_count = thread_state64[sig_xstate].state_count; + state = (void *)&mctxp->mctx_avx64.fs; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; flavor = x86_EXCEPTION_STATE64; state_count = x86_EXCEPTION_STATE64_COUNT; - state = (void *)&mctx.mctx64.es; + state = (void *)&mctxp->mctx_avx64.es; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; - tstate64 = &mctx.mctx64.ss; + tstate64 = &mctxp->mctx_avx64.ss; - if (altstack && !oonstack && (ps->ps_sigonstack & sigmask(sig))) { - if (uthsigaltstack != 0) { - ua_sp = ut->uu_sigstk.ss_sp; - stack_size = ut->uu_sigstk.ss_size; - ua_sp += stack_size; - ut->uu_sigstk.ss_flags |= SA_ONSTACK; - } else { - ua_sp = ps->ps_sigstk.ss_sp; - stack_size = ps->ps_sigstk.ss_size; - ua_sp += stack_size; - ps->ps_sigstk.ss_flags |= SA_ONSTACK; - } - } else + /* figure out where our new stack lives */ + if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && + (sigonstack)) { + ua_sp = ut->uu_sigstk.ss_sp; + stack_size = ut->uu_sigstk.ss_size; + ua_sp += stack_size; + ut->uu_sigstk.ss_flags |= SA_ONSTACK; + } else { ua_sp = tstate64->rsp; - ua_cr2 = mctx.mctx64.es.faultvaddr; + } + ua_cr2 = mctxp->mctx_avx64.es.faultvaddr; /* The x86_64 ABI defines a 128-byte red zone. */ ua_sp -= C_64_REDZONE_LEN; @@ -188,10 +255,10 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused u_lo ua_sp -= sizeof (struct user_ucontext64); ua_uctxp = ua_sp; // someone tramples the first word! - ua_sp -= sizeof (user_siginfo_t); + ua_sp -= sizeof (user64_siginfo_t); ua_sip = ua_sp; - ua_sp -= sizeof (struct mcontext64); + ua_sp -= thread_state64[sig_xstate].mcontext_size; ua_mctxp = ua_sp; /* @@ -220,19 +287,19 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused u_lo uctx64.uc_stack.ss_flags |= SS_ONSTACK; uctx64.uc_link = 0; - uctx64.uc_mcsize = sizeof(struct mcontext64); + uctx64.uc_mcsize = thread_state64[sig_xstate].mcontext_size; uctx64.uc_mcontext64 = ua_mctxp; if (copyout((caddr_t)&uctx64, ua_uctxp, sizeof (uctx64))) goto bad; - if (copyout((caddr_t)&mctx.mctx64, ua_mctxp, sizeof (struct mcontext64))) + if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state64[sig_xstate].mcontext_size)) goto bad; sinfo64.pad[0] = tstate64->rsp; sinfo64.si_addr = tstate64->rip; - tstate64->rip = ps->ps_trampact[sig]; + tstate64->rip = trampact; tstate64->rsp = ua_fp; tstate64->rflags = get_eflags_exportmask(); /* @@ -254,52 +321,48 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused u_lo } else { x86_thread_state32_t *tstate32; - struct ucontext uctx32; + struct user_ucontext32 uctx32; struct sigframe32 frame32; flavor = x86_THREAD_STATE32; state_count = x86_THREAD_STATE32_COUNT; - state = (void *)&mctx.mctx32.ss; + state = (void *)&mctxp->mctx_avx32.ss; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; - flavor = x86_FLOAT_STATE32; - state_count = x86_FLOAT_STATE32_COUNT; - state = (void *)&mctx.mctx32.fs; + flavor = thread_state32[sig_xstate].flavor; + state_count = thread_state32[sig_xstate].state_count; + state = (void *)&mctxp->mctx_avx32.fs; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; flavor = x86_EXCEPTION_STATE32; state_count = x86_EXCEPTION_STATE32_COUNT; - state = (void *)&mctx.mctx32.es; + state = (void *)&mctxp->mctx_avx32.es; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; - tstate32 = &mctx.mctx32.ss; + tstate32 = &mctxp->mctx_avx32.ss; - if (altstack && !oonstack && (ps->ps_sigonstack & sigmask(sig))) { - if (uthsigaltstack != 0) { - ua_sp = ut->uu_sigstk.ss_sp; - stack_size = ut->uu_sigstk.ss_size; - ua_sp += stack_size; - ut->uu_sigstk.ss_flags |= SA_ONSTACK; - } else { - ua_sp = ps->ps_sigstk.ss_sp; - stack_size = ps->ps_sigstk.ss_size; - ua_sp += stack_size; - ps->ps_sigstk.ss_flags |= SA_ONSTACK; - } - } else + /* figure out where our new stack lives */ + if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && + (sigonstack)) { + ua_sp = ut->uu_sigstk.ss_sp; + stack_size = ut->uu_sigstk.ss_size; + ua_sp += stack_size; + ut->uu_sigstk.ss_flags |= SA_ONSTACK; + } else { ua_sp = tstate32->esp; - ua_cr2 = mctx.mctx32.es.faultvaddr; + } + ua_cr2 = mctxp->mctx_avx32.es.faultvaddr; - ua_sp -= sizeof (struct ucontext); + ua_sp -= sizeof (struct user_ucontext32); ua_uctxp = ua_sp; // someone tramples the first word! - ua_sp -= sizeof (siginfo_t); + ua_sp -= sizeof (user32_siginfo_t); ua_sip = ua_sp; - ua_sp -= sizeof (struct mcontext32); + ua_sp -= thread_state32[sig_xstate].mcontext_size; ua_mctxp = ua_sp; ua_sp -= sizeof (struct sigframe32); @@ -324,9 +387,9 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused u_lo frame32.retaddr = -1; frame32.sigstyle = infostyle; frame32.sig = sig; - frame32.catcher = CAST_DOWN(sig_t, ua_catcher); - frame32.sinfo = CAST_DOWN(siginfo_t *, ua_sip); - frame32.uctx = CAST_DOWN(struct ucontext *, ua_uctxp); + frame32.catcher = CAST_DOWN_EXPLICIT(user32_addr_t, ua_catcher); + frame32.sinfo = CAST_DOWN_EXPLICIT(user32_addr_t, ua_sip); + frame32.uctx = CAST_DOWN_EXPLICIT(user32_addr_t, ua_uctxp); if (copyout((caddr_t)&frame32, ua_fp, sizeof (frame32))) goto bad; @@ -338,21 +401,21 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused u_lo uctx32.uc_onstack = oonstack; uctx32.uc_sigmask = mask; - uctx32.uc_stack.ss_sp = CAST_DOWN(char *, ua_fp); + uctx32.uc_stack.ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp); uctx32.uc_stack.ss_size = stack_size; if (oonstack) uctx32.uc_stack.ss_flags |= SS_ONSTACK; uctx32.uc_link = 0; - uctx32.uc_mcsize = sizeof(struct mcontext32); + uctx32.uc_mcsize = thread_state64[sig_xstate].mcontext_size; - uctx32.uc_mcontext = CAST_DOWN(struct mcontext *, ua_mctxp); + uctx32.uc_mcontext = CAST_DOWN_EXPLICIT(user32_addr_t, ua_mctxp); if (copyout((caddr_t)&uctx32, ua_uctxp, sizeof (uctx32))) goto bad; - if (copyout((caddr_t)&mctx.mctx32, ua_mctxp, sizeof (struct mcontext32))) + if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state32[sig_xstate].mcontext_size)) goto bad; sinfo64.pad[0] = tstate32->esp; @@ -360,36 +423,12 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused u_lo } switch (sig) { - case SIGCHLD: - sinfo64.si_pid = p->si_pid; - p->si_pid =0; - sinfo64.si_status = p->si_status; - p->si_status = 0; - sinfo64.si_uid = p->si_uid; - p->si_uid =0; - sinfo64.si_code = p->si_code; - p->si_code = 0; - if (sinfo64.si_code == CLD_EXITED) { - if (WIFEXITED(sinfo64.si_status)) - sinfo64.si_code = CLD_EXITED; - else if (WIFSIGNALED(sinfo64.si_status)) { - if (WCOREDUMP(sinfo64.si_status)) - sinfo64.si_code = CLD_DUMPED; - else - sinfo64.si_code = CLD_KILLED; - } - } - break; case SIGILL: switch (ut->uu_code) { case EXC_I386_INVOP: sinfo64.si_code = ILL_ILLOPC; break; - case EXC_I386_GPFLT: - sinfo64.si_code = ILL_PRVOPC; - break; default: - printf("unknown SIGILL code %d\n", ut->uu_code); sinfo64.si_code = ILL_NOOP; } break; @@ -400,7 +439,13 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused u_lo #define FP_OE 3 /* overflow */ #define FP_UE 4 /* underflow */ #define FP_PE 5 /* precision */ - if (ut->uu_subcode & (1 << FP_ZE)) { + if (ut->uu_code == EXC_I386_DIV) { + sinfo64.si_code = FPE_INTDIV; + } + else if (ut->uu_code == EXC_I386_INTO) { + sinfo64.si_code = FPE_INTOVF; + } + else if (ut->uu_subcode & (1 << FP_ZE)) { sinfo64.si_code = FPE_FLTDIV; } else if (ut->uu_subcode & (1 << FP_OE)) { sinfo64.si_code = FPE_FLTOVF; @@ -411,8 +456,6 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused u_lo } else if (ut->uu_subcode & (1 << FP_IE)) { sinfo64.si_code = FPE_FLTINV; } else { - printf("unknown SIGFPE code %d, subcode %x\n", - ut->uu_code, ut->uu_subcode); sinfo64.si_code = FPE_NOOP; } break; @@ -427,6 +470,12 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused u_lo sinfo64.si_addr = ua_cr2; switch (ut->uu_code) { + case EXC_I386_GPFLT: + /* CR2 is meaningless after GP fault */ + /* XXX namespace clash! */ + sinfo64.si_addr = 0ULL; + sinfo64.si_code = 0; + break; case KERN_PROTECTION_FAILURE: sinfo64.si_code = SEGV_ACCERR; break; @@ -434,41 +483,133 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused u_lo sinfo64.si_code = SEGV_MAPERR; break; default: - printf("unknown SIGSEGV code %d\n", ut->uu_code); sinfo64.si_code = FPE_NOOP; } break; default: + { + int status_and_exitcode; + + /* + * All other signals need to fill out a minimum set of + * information for the siginfo structure passed into + * the signal handler, if SA_SIGINFO was specified. + * + * p->si_status actually contains both the status and + * the exit code; we save it off in its own variable + * for later breakdown. + */ + proc_lock(p); + sinfo64.si_pid = p->si_pid; + p->si_pid =0; + status_and_exitcode = p->si_status; + p->si_status = 0; + sinfo64.si_uid = p->si_uid; + p->si_uid =0; + sinfo64.si_code = p->si_code; + p->si_code = 0; + proc_unlock(p); + if (sinfo64.si_code == CLD_EXITED) { + if (WIFEXITED(status_and_exitcode)) + sinfo64.si_code = CLD_EXITED; + else if (WIFSIGNALED(status_and_exitcode)) { + if (WCOREDUMP(status_and_exitcode)) { + sinfo64.si_code = CLD_DUMPED; + status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); + } else { + sinfo64.si_code = CLD_KILLED; + status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); + } + } + } + /* + * The recorded status contains the exit code and the + * signal information, but the information to be passed + * in the siginfo to the handler is supposed to only + * contain the status, so we have to shift it out. + */ + sinfo64.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000); + p->p_xhighbits = 0; break; + } } if (proc_is64bit(p)) { - if (copyout((caddr_t)&sinfo64, ua_sip, sizeof (sinfo64))) - goto bad; + user64_siginfo_t sinfo64_user64; + + bzero((caddr_t)&sinfo64_user64, sizeof(sinfo64_user64)); + + siginfo_user_to_user64_x86(&sinfo64,&sinfo64_user64); + +#if CONFIG_DTRACE + bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo)); + + ut->t_dtrace_siginfo.si_signo = sinfo64.si_signo; + ut->t_dtrace_siginfo.si_code = sinfo64.si_code; + ut->t_dtrace_siginfo.si_pid = sinfo64.si_pid; + ut->t_dtrace_siginfo.si_uid = sinfo64.si_uid; + ut->t_dtrace_siginfo.si_status = sinfo64.si_status; + /* XXX truncates faulting address to void * on K32 */ + ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo64.si_addr); + + /* Fire DTrace proc:::fault probe when signal is generated by hardware. */ + switch (sig) { + case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP: + DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo)); + break; + default: + break; + } + + /* XXX truncates catcher address to uintptr_t */ + DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo), + void (*)(void), CAST_DOWN(sig_t, ua_catcher)); +#endif /* CONFIG_DTRACE */ + + if (copyout((caddr_t)&sinfo64_user64, ua_sip, sizeof (sinfo64_user64))) + goto bad; flavor = x86_THREAD_STATE64; state_count = x86_THREAD_STATE64_COUNT; - state = (void *)&mctx.mctx64.ss; + state = (void *)&mctxp->mctx_avx64.ss; } else { - x86_thread_state32_t *tstate32; - siginfo_t sinfo32; + x86_thread_state32_t *tstate32; + user32_siginfo_t sinfo32; - bzero((caddr_t)&sinfo32, sizeof(siginfo_t)); + bzero((caddr_t)&sinfo32, sizeof(sinfo32)); - sinfo32.si_signo = sinfo64.si_signo; - sinfo32.si_code = sinfo64.si_code; - sinfo32.si_pid = sinfo64.si_pid; - sinfo32.si_uid = sinfo64.si_uid; - sinfo32.si_status = sinfo64.si_status; - sinfo32.si_addr = CAST_DOWN(void *, sinfo64.si_addr); - sinfo32.pad[0] = sinfo64.pad[0]; + siginfo_user_to_user32_x86(&sinfo64,&sinfo32); - if (copyout((caddr_t)&sinfo32, ua_sip, sizeof (sinfo32))) - goto bad; +#if CONFIG_DTRACE + bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo)); + + ut->t_dtrace_siginfo.si_signo = sinfo32.si_signo; + ut->t_dtrace_siginfo.si_code = sinfo32.si_code; + ut->t_dtrace_siginfo.si_pid = sinfo32.si_pid; + ut->t_dtrace_siginfo.si_uid = sinfo32.si_uid; + ut->t_dtrace_siginfo.si_status = sinfo32.si_status; + ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo32.si_addr); + + /* Fire DTrace proc:::fault probe when signal is generated by hardware. */ + switch (sig) { + case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP: + DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo)); + break; + default: + break; + } + + DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo), + void (*)(void), CAST_DOWN(sig_t, ua_catcher)); +#endif /* CONFIG_DTRACE */ + + if (copyout((caddr_t)&sinfo32, ua_sip, sizeof (sinfo32))) + goto bad; - tstate32 = &mctx.mctx32.ss; - tstate32->eip = CAST_DOWN(unsigned int, ps->ps_trampact[sig]); - tstate32->esp = CAST_DOWN(unsigned int, ua_fp); - + tstate32 = &mctxp->mctx_avx32.ss; + + tstate32->eip = CAST_DOWN_EXPLICIT(user32_addr_t, trampact); + tstate32->esp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp); + tstate32->eflags = get_eflags_exportmask(); tstate32->cs = USER_CS; @@ -486,16 +627,25 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused u_lo goto bad; ml_fp_setvalid(FALSE); + /* Tell the PAL layer about the signal */ + pal_set_signal_delivery( thread ); + + proc_lock(p); + return; bad: + + proc_lock(p); SIGACTION(p, SIGILL) = SIG_DFL; sig = sigmask(SIGILL); p->p_sigignore &= ~sig; p->p_sigcatch &= ~sig; ut->uu_sigmask &= ~sig; /* sendsig is called with signal lock held */ - psignal_lock(p, SIGILL, 0); + proc_unlock(p); + psignal_locked(p, SIGILL); + proc_lock(p); return; } @@ -511,19 +661,20 @@ bad: */ int -sigreturn( - struct proc *p, - struct sigreturn_args *uap, - __unused int *retval) +sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) { - union { - struct mcontext32 mctx32; - struct mcontext64 mctx64; - } mctx; + union { + struct mcontext_avx32 mctx_avx32; + struct mcontext_avx64 mctx_avx64; +#if !defined(RC_HIDE_XNU_J137) + struct mcontext_avx512_32 mctx_avx512_32; + struct mcontext_avx512_64 mctx_avx512_64; +#endif + } mctx_store, *mctxp = &mctx_store; + thread_t thread = current_thread(); struct uthread * ut; int error; - int uthsigaltstack = 0; int onstack = 0; mach_msg_type_number_t ts_count; @@ -532,9 +683,26 @@ sigreturn( mach_msg_type_number_t fs_count; unsigned int fs_flavor; void * fs; + int rval = EJUSTRETURN; + xstate_t sig_xstate; ut = (struct uthread *)get_bsdthread_info(thread); - uthsigaltstack = p->p_lflag & P_LTHSIGSTACK; + + /* + * If we are being asked to change the altstack flag on the thread, we + * just set/reset it and return (the uap->uctx is not used). + */ + if ((unsigned int)uap->infostyle == UC_SET_ALT_STACK) { + ut->uu_sigstk.ss_flags |= SA_ONSTACK; + return (0); + } else if ((unsigned int)uap->infostyle == UC_RESET_ALT_STACK) { + ut->uu_sigstk.ss_flags &= ~SA_ONSTACK; + return (0); + } + + bzero(mctxp, sizeof(*mctxp)); + + sig_xstate = current_xstate(); if (proc_is64bit(p)) { struct user_ucontext64 uctx64; @@ -542,27 +710,27 @@ sigreturn( if ((error = copyin(uap->uctx, (void *)&uctx64, sizeof (uctx64)))) return(error); - if ((error = copyin(uctx64.uc_mcontext64, (void *)&mctx.mctx64, sizeof (struct mcontext64)))) + if ((error = copyin(uctx64.uc_mcontext64, (void *)mctxp, thread_state64[sig_xstate].mcontext_size))) return(error); onstack = uctx64.uc_onstack & 01; ut->uu_sigmask = uctx64.uc_sigmask & ~sigcantmask; - ts_flavor = x86_THREAD_STATE64; + ts_flavor = x86_THREAD_STATE64; ts_count = x86_THREAD_STATE64_COUNT; - ts = (void *)&mctx.mctx64.ss; + ts = (void *)&mctxp->mctx_avx64.ss; - fs_flavor = x86_FLOAT_STATE64; - fs_count = x86_FLOAT_STATE64_COUNT; - fs = (void *)&mctx.mctx64.fs; + fs_flavor = thread_state64[sig_xstate].flavor; + fs_count = thread_state64[sig_xstate].state_count; + fs = (void *)&mctxp->mctx_avx64.fs; } else { - struct ucontext uctx32; + struct user_ucontext32 uctx32; if ((error = copyin(uap->uctx, (void *)&uctx32, sizeof (uctx32)))) return(error); - if ((error = copyin(CAST_USER_ADDR_T(uctx32.uc_mcontext), (void *)&mctx.mctx32, sizeof (struct mcontext32)))) + if ((error = copyin(CAST_USER_ADDR_T(uctx32.uc_mcontext), (void *)mctxp, thread_state32[sig_xstate].mcontext_size))) return(error); onstack = uctx32.uc_onstack & 01; @@ -570,38 +738,38 @@ sigreturn( ts_flavor = x86_THREAD_STATE32; ts_count = x86_THREAD_STATE32_COUNT; - ts = (void *)&mctx.mctx32.ss; + ts = (void *)&mctxp->mctx_avx32.ss; - fs_flavor = x86_FLOAT_STATE32; - fs_count = x86_FLOAT_STATE32_COUNT; - fs = (void *)&mctx.mctx32.fs; - } - if (onstack) { - if (uthsigaltstack != 0) - ut->uu_sigstk.ss_flags |= SA_ONSTACK; - else - p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK; - } else { - if (uthsigaltstack != 0) - ut->uu_sigstk.ss_flags &= ~SA_ONSTACK; - else - p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK; + fs_flavor = thread_state32[sig_xstate].flavor; + fs_count = thread_state32[sig_xstate].state_count; + fs = (void *)&mctxp->mctx_avx32.fs; } + + if (onstack) + ut->uu_sigstk.ss_flags |= SA_ONSTACK; + else + ut->uu_sigstk.ss_flags &= ~SA_ONSTACK; + if (ut->uu_siglist & ~ut->uu_sigmask) signal_setast(thread); - /* - * thread_set_state() does all the needed checks for the passed in content + * thread_set_state() does all the needed checks for the passed in + * content */ - if (thread_setstatus(thread, ts_flavor, ts, ts_count) != KERN_SUCCESS) - return(EINVAL); - + if (thread_setstatus(thread, ts_flavor, ts, ts_count) != KERN_SUCCESS) { + rval = EINVAL; + goto error_ret; + } + ml_fp_setvalid(TRUE); - if (thread_setstatus(thread, fs_flavor, fs, fs_count) != KERN_SUCCESS) - return(EINVAL); + if (thread_setstatus(thread, fs_flavor, fs, fs_count) != KERN_SUCCESS) { + rval = EINVAL; + goto error_ret; - return (EJUSTRETURN); + } +error_ret: + return rval; } @@ -612,78 +780,49 @@ sigreturn( boolean_t machine_exception( - int exception, - int code, - __unused int subcode, - int *unix_signal, - int *unix_code -) -{ - - switch(exception) { - - case EXC_BAD_INSTRUCTION: - *unix_signal = SIGILL; - *unix_code = code; - break; - - case EXC_ARITHMETIC: - *unix_signal = SIGFPE; - *unix_code = code; - break; - - default: - return(FALSE); - } - - return(TRUE); -} - -#include -#include - -int __pthread_cset(struct sysent *); -void __pthread_creset(struct sysent *); - -int -__pthread_cset(struct sysent *callp) + int exception, + mach_exception_code_t code, + __unused mach_exception_subcode_t subcode, + int *unix_signal, + mach_exception_code_t *unix_code) { - unsigned int cancel_enable; - thread_t thread; - struct uthread * uthread; - thread = current_thread(); - uthread = get_bsdthread_info(thread); + switch(exception) { - cancel_enable = callp->sy_cancel; - if (cancel_enable == _SYSCALL_CANCEL_NONE) { - uthread->uu_flag |= UT_NOTCANCELPT; - } else { - if((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) { - if (cancel_enable == _SYSCALL_CANCEL_PRE) - return(EINTR); - else - thread_abort_safely(thread); + case EXC_BAD_ACCESS: + /* Map GP fault to SIGSEGV, otherwise defer to caller */ + if (code == EXC_I386_GPFLT) { + *unix_signal = SIGSEGV; + *unix_code = code; + break; + } + return(FALSE); + + case EXC_BAD_INSTRUCTION: + *unix_signal = SIGILL; + *unix_code = code; + break; + + case EXC_ARITHMETIC: + *unix_signal = SIGFPE; + *unix_code = code; + break; + + case EXC_SOFTWARE: + if (code == EXC_I386_BOUND) { + /* + * Map #BR, the Bound Range Exceeded exception, to + * SIGTRAP. + */ + *unix_signal = SIGTRAP; + *unix_code = code; + break; } - } - return(0); -} - - -void -__pthread_creset(struct sysent *callp) -{ - - unsigned int cancel_enable; - thread_t thread; - struct uthread * uthread; - - thread = current_thread(); - uthread = get_bsdthread_info(thread); - - cancel_enable = callp->sy_cancel; - if (!cancel_enable) - uthread->uu_flag &= ~UT_NOTCANCELPT; + default: + return(FALSE); + } + + return(TRUE); }