+ proc_unlock(p);
+
+ bzero((caddr_t)&sinfo64, sizeof(sinfo64));
+ sinfo64.si_signo = sig;
+
+
+ if (proc_is64bit(p)) {
+ x86_thread_state64_t *tstate64;
+ struct user_ucontext64 uctx64;
+
+ flavor = x86_THREAD_STATE64;
+ state_count = x86_THREAD_STATE64_COUNT;
+ state = (void *)&mctx.mctx64.ss;
+ if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
+ goto bad;
+
+ flavor = x86_FLOAT_STATE64;
+ state_count = x86_FLOAT_STATE64_COUNT;
+ state = (void *)&mctx.mctx64.fs;
+ if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
+ goto bad;
+
+ flavor = x86_EXCEPTION_STATE64;
+ state_count = x86_EXCEPTION_STATE64_COUNT;
+ state = (void *)&mctx.mctx64.es;
+ if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
+ goto bad;
+
+ tstate64 = &mctx.mctx64.ss;
+
+ /* figure out where our new stack lives */
+ if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
+ (sigonstack)) {
+ ua_sp = ut->uu_sigstk.ss_sp;
+ stack_size = ut->uu_sigstk.ss_size;
+ ua_sp += stack_size;
+ ut->uu_sigstk.ss_flags |= SA_ONSTACK;
+ } else {
+ ua_sp = tstate64->rsp;
+ }
+ ua_cr2 = mctx.mctx64.es.faultvaddr;
+
+ /* The x86_64 ABI defines a 128-byte red zone. */
+ ua_sp -= C_64_REDZONE_LEN;
+
+ ua_sp -= sizeof (struct user_ucontext64);
+ ua_uctxp = ua_sp; // someone tramples the first word!
+
+ ua_sp -= sizeof (user64_siginfo_t);
+ ua_sip = ua_sp;
+
+ ua_sp -= sizeof (struct mcontext64);
+ ua_mctxp = ua_sp;
+
+ /*
+ * Align the frame and stack pointers to 16 bytes for SSE.
+ * (Note that we use 'ua_fp' as the base of the stack going forward)
+ */
+ ua_fp = TRUNC_DOWN64(ua_sp, C_64_STK_ALIGN);
+
+ /*
+ * But we need to account for the return address so the alignment is
+ * truly "correct" at _sigtramp
+ */
+ ua_fp -= sizeof(user_addr_t);
+
+ /*
+ * Build the signal context to be used by sigreturn.
+ */
+ bzero(&uctx64, sizeof(uctx64));
+
+ uctx64.uc_onstack = oonstack;
+ uctx64.uc_sigmask = mask;
+ uctx64.uc_stack.ss_sp = ua_fp;
+ uctx64.uc_stack.ss_size = stack_size;
+
+ if (oonstack)
+ uctx64.uc_stack.ss_flags |= SS_ONSTACK;
+ uctx64.uc_link = 0;
+
+ uctx64.uc_mcsize = sizeof(struct mcontext64);
+ uctx64.uc_mcontext64 = ua_mctxp;
+
+ if (copyout((caddr_t)&uctx64, ua_uctxp, sizeof (uctx64)))
+ goto bad;
+
+ if (copyout((caddr_t)&mctx.mctx64, ua_mctxp, sizeof (struct mcontext64)))
+ goto bad;
+
+ sinfo64.pad[0] = tstate64->rsp;
+ sinfo64.si_addr = tstate64->rip;
+
+ tstate64->rip = trampact;
+ tstate64->rsp = ua_fp;
+ tstate64->rflags = get_eflags_exportmask();
+ /*
+ * JOE - might not need to set these
+ */
+ tstate64->cs = USER64_CS;
+ tstate64->fs = NULL_SEG;
+ tstate64->gs = USER_CTHREAD;
+
+ /*
+ * Build the argument list for the signal handler.
+ * Handler should call sigreturn to get out of it
+ */
+ tstate64->rdi = ua_catcher;
+ tstate64->rsi = infostyle;
+ tstate64->rdx = sig;
+ tstate64->rcx = ua_sip;
+ tstate64->r8 = ua_uctxp;
+
+ } else {
+ x86_thread_state32_t *tstate32;
+ struct user_ucontext32 uctx32;
+ struct sigframe32 frame32;
+
+ flavor = x86_THREAD_STATE32;
+ state_count = x86_THREAD_STATE32_COUNT;
+ state = (void *)&mctx.mctx32.ss;
+ if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
+ goto bad;
+
+ flavor = x86_FLOAT_STATE32;
+ state_count = x86_FLOAT_STATE32_COUNT;
+ state = (void *)&mctx.mctx32.fs;
+ if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
+ goto bad;
+
+ flavor = x86_EXCEPTION_STATE32;
+ state_count = x86_EXCEPTION_STATE32_COUNT;
+ state = (void *)&mctx.mctx32.es;
+ if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
+ goto bad;
+
+ tstate32 = &mctx.mctx32.ss;
+
+ /* figure out where our new stack lives */
+ if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
+ (sigonstack)) {
+ ua_sp = ut->uu_sigstk.ss_sp;
+ stack_size = ut->uu_sigstk.ss_size;
+ ua_sp += stack_size;
+ ut->uu_sigstk.ss_flags |= SA_ONSTACK;
+ } else {
+ ua_sp = tstate32->esp;
+ }
+ ua_cr2 = mctx.mctx32.es.faultvaddr;
+
+ ua_sp -= sizeof (struct user_ucontext32);
+ ua_uctxp = ua_sp; // someone tramples the first word!
+
+ ua_sp -= sizeof (user32_siginfo_t);
+ ua_sip = ua_sp;
+
+ ua_sp -= sizeof (struct mcontext32);
+ ua_mctxp = ua_sp;
+
+ ua_sp -= sizeof (struct sigframe32);
+ ua_fp = ua_sp;
+
+ /*
+ * Align the frame and stack pointers to 16 bytes for SSE.
+ * (Note that we use 'fp' as the base of the stack going forward)
+ */
+ ua_fp = TRUNC_DOWN32(ua_fp, C_32_STK_ALIGN);
+
+ /*
+ * But we need to account for the return address so the alignment is
+ * truly "correct" at _sigtramp
+ */
+ ua_fp -= sizeof(frame32.retaddr);
+
+ /*
+ * Build the argument list for the signal handler.
+ * Handler should call sigreturn to get out of it
+ */
+ frame32.retaddr = -1;
+ frame32.sigstyle = infostyle;
+ frame32.sig = sig;
+ frame32.catcher = CAST_DOWN_EXPLICIT(user32_addr_t, ua_catcher);
+ frame32.sinfo = CAST_DOWN_EXPLICIT(user32_addr_t, ua_sip);
+ frame32.uctx = CAST_DOWN_EXPLICIT(user32_addr_t, ua_uctxp);
+
+ if (copyout((caddr_t)&frame32, ua_fp, sizeof (frame32)))
+ goto bad;
+
+ /*
+ * Build the signal context to be used by sigreturn.
+ */
+ bzero(&uctx32, sizeof(uctx32));
+
+ uctx32.uc_onstack = oonstack;
+ uctx32.uc_sigmask = mask;
+ uctx32.uc_stack.ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp);
+ uctx32.uc_stack.ss_size = stack_size;
+
+ if (oonstack)
+ uctx32.uc_stack.ss_flags |= SS_ONSTACK;
+ uctx32.uc_link = 0;
+
+ uctx32.uc_mcsize = sizeof(struct mcontext32);
+
+ uctx32.uc_mcontext = CAST_DOWN_EXPLICIT(user32_addr_t, ua_mctxp);
+
+ if (copyout((caddr_t)&uctx32, ua_uctxp, sizeof (uctx32)))
+ goto bad;
+
+ if (copyout((caddr_t)&mctx.mctx32, ua_mctxp, sizeof (struct mcontext32)))
+ goto bad;
+
+ sinfo64.pad[0] = tstate32->esp;
+ sinfo64.si_addr = tstate32->eip;
+ }
+
+ switch (sig) {
+ case SIGILL:
+ switch (ut->uu_code) {
+ case EXC_I386_INVOP:
+ sinfo64.si_code = ILL_ILLOPC;
+ break;
+ default:
+ sinfo64.si_code = ILL_NOOP;
+ }
+ break;
+ case SIGFPE:
+#define FP_IE 0 /* Invalid operation */
+#define FP_DE 1 /* Denormalized operand */
+#define FP_ZE 2 /* Zero divide */
+#define FP_OE 3 /* overflow */
+#define FP_UE 4 /* underflow */
+#define FP_PE 5 /* precision */
+ if (ut->uu_code == EXC_I386_DIV) {
+ sinfo64.si_code = FPE_INTDIV;
+ }
+ else if (ut->uu_code == EXC_I386_INTO) {
+ sinfo64.si_code = FPE_INTOVF;
+ }
+ else if (ut->uu_subcode & (1 << FP_ZE)) {
+ sinfo64.si_code = FPE_FLTDIV;
+ } else if (ut->uu_subcode & (1 << FP_OE)) {
+ sinfo64.si_code = FPE_FLTOVF;
+ } else if (ut->uu_subcode & (1 << FP_UE)) {
+ sinfo64.si_code = FPE_FLTUND;
+ } else if (ut->uu_subcode & (1 << FP_PE)) {
+ sinfo64.si_code = FPE_FLTRES;
+ } else if (ut->uu_subcode & (1 << FP_IE)) {
+ sinfo64.si_code = FPE_FLTINV;
+ } else {
+ sinfo64.si_code = FPE_NOOP;
+ }
+ break;
+ case SIGBUS:
+ sinfo64.si_code = BUS_ADRERR;
+ sinfo64.si_addr = ua_cr2;
+ break;
+ case SIGTRAP:
+ sinfo64.si_code = TRAP_BRKPT;
+ break;
+ case SIGSEGV:
+ sinfo64.si_addr = ua_cr2;
+
+ switch (ut->uu_code) {
+ case EXC_I386_GPFLT:
+ /* CR2 is meaningless after GP fault */
+ /* XXX namespace clash! */
+ sinfo64.si_addr = 0ULL;
+ sinfo64.si_code = 0;
+ break;
+ case KERN_PROTECTION_FAILURE:
+ sinfo64.si_code = SEGV_ACCERR;
+ break;
+ case KERN_INVALID_ADDRESS:
+ sinfo64.si_code = SEGV_MAPERR;
+ break;
+ default:
+ sinfo64.si_code = FPE_NOOP;
+ }
+ break;
+ default:
+ {
+ int status_and_exitcode;
+
+ /*
+ * All other signals need to fill out a minimum set of
+ * information for the siginfo structure passed into
+ * the signal handler, if SA_SIGINFO was specified.
+ *
+ * p->si_status actually contains both the status and
+ * the exit code; we save it off in its own variable
+ * for later breakdown.
+ */
+ proc_lock(p);
+ sinfo64.si_pid = p->si_pid;
+ p->si_pid =0;
+ status_and_exitcode = p->si_status;
+ p->si_status = 0;
+ sinfo64.si_uid = p->si_uid;
+ p->si_uid =0;
+ sinfo64.si_code = p->si_code;
+ p->si_code = 0;
+ proc_unlock(p);
+ if (sinfo64.si_code == CLD_EXITED) {
+ if (WIFEXITED(status_and_exitcode))
+ sinfo64.si_code = CLD_EXITED;
+ else if (WIFSIGNALED(status_and_exitcode)) {
+ if (WCOREDUMP(status_and_exitcode)) {
+ sinfo64.si_code = CLD_DUMPED;
+ status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
+ } else {
+ sinfo64.si_code = CLD_KILLED;
+ status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
+ }
+ }
+ }
+ /*
+ * The recorded status contains the exit code and the
+ * signal information, but the information to be passed
+ * in the siginfo to the handler is supposed to only
+ * contain the status, so we have to shift it out.
+ */
+ sinfo64.si_status = WEXITSTATUS(status_and_exitcode);
+ break;
+ }
+ }
+ if (proc_is64bit(p)) {
+ user64_siginfo_t sinfo64_user64;
+
+ bzero((caddr_t)&sinfo64_user64, sizeof(sinfo64_user64));
+
+ siginfo_user_to_user64(&sinfo64,&sinfo64_user64);
+
+#if CONFIG_DTRACE
+ bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
+
+ ut->t_dtrace_siginfo.si_signo = sinfo64.si_signo;
+ ut->t_dtrace_siginfo.si_code = sinfo64.si_code;
+ ut->t_dtrace_siginfo.si_pid = sinfo64.si_pid;
+ ut->t_dtrace_siginfo.si_uid = sinfo64.si_uid;
+ ut->t_dtrace_siginfo.si_status = sinfo64.si_status;
+ /* XXX truncates faulting address to void * on K32 */
+ ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo64.si_addr);
+
+ /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
+ switch (sig) {
+ case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
+ DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
+ break;
+ default:
+ break;
+ }
+
+ /* XXX truncates catcher address to uintptr_t */
+ DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
+ void (*)(void), CAST_DOWN(sig_t, ua_catcher));
+#endif /* CONFIG_DTRACE */
+
+ if (copyout((caddr_t)&sinfo64_user64, ua_sip, sizeof (sinfo64_user64)))
+ goto bad;
+
+ flavor = x86_THREAD_STATE64;
+ state_count = x86_THREAD_STATE64_COUNT;
+ state = (void *)&mctx.mctx64.ss;