X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/e5568f75972dfc723778653c11cb6b4dc825716a..5ba3f43ea354af8ad55bea84372a2bc834d8757c:/bsd/dev/i386/unix_signal.c diff --git a/bsd/dev/i386/unix_signal.c b/bsd/dev/i386/unix_signal.c index efd73bfb3..1c271607f 100644 --- a/bsd/dev/i386/unix_signal.c +++ b/bsd/dev/i386/unix_signal.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Copyright (c) 1992 NeXT, Inc. @@ -31,40 +37,50 @@ #include #include -#include +#include #include -#include +#include #include - +#include +#include +#include +#include +#include /* for thread_abort_safely */ +#include + +#include #include +#include +#include +#include -#include -#include - - -/* - * FIXME.. should be included from mach_kernel/i386/seg.h - */ +#include -#define USER_CS 0x17 -#define USER_DS 0x1f -#define USER_CTHREAD 0x27 +#include +#include -#define UDATA_SEL USER_DS -#define UCODE_SEL USER_CS -#define UCTHREAD_SEL USER_CTHREAD -#define valid_user_code_selector(x) (TRUE) -#define valid_user_data_selector(x) (TRUE) -#define valid_user_stack_selector(x) (TRUE) - - -#define NULL_SEG 0 +/* Forward: */ +extern boolean_t machine_exception(int, mach_exception_code_t, + mach_exception_subcode_t, int *, mach_exception_subcode_t *); +extern kern_return_t thread_getstatus(thread_t act, int flavor, + thread_state_t tstate, mach_msg_type_number_t *count); +extern kern_return_t thread_setstatus(thread_t thread, int flavor, + thread_state_t tstate, mach_msg_type_number_t count); /* Signal handler flavors supported */ /* These defns should match the Libc implmn */ #define UC_TRAD 1 +#define UC_FLAVOR 30 +#define UC_SET_ALT_STACK 0x40000000 +#define UC_RESET_ALT_STACK 0x80000000 + +#define C_32_STK_ALIGN 16 +#define C_64_STK_ALIGN 16 +#define C_64_REDZONE_LEN 128 +#define TRUNC_DOWN32(a,c) ((((uint32_t)a)-(c)) & ((uint32_t)(-(c)))) +#define TRUNC_DOWN64(a,c) ((((uint64_t)a)-(c)) & ((uint64_t)(-(c)))) /* * Send an interrupt to process. @@ -76,124 +92,560 @@ * pointer, and the argument pointer, it returns * to the user specified pc, psl. */ +struct sigframe32 { + int retaddr; + user32_addr_t catcher; /* sig_t */ + int sigstyle; + int sig; + user32_addr_t sinfo; /* siginfo32_t* */ + user32_addr_t uctx; /* struct ucontext32 */ +}; + +/* + * Declare table of structure flavors and sizes for 64-bit and 32-bit processes + * for the cases of extended states (plain FP, or AVX): + */ +typedef struct { + int flavor; natural_t state_count; size_t mcontext_size; +} xstate_info_t; +static const xstate_info_t thread_state64[] = { + [FP] = { x86_FLOAT_STATE64, x86_FLOAT_STATE64_COUNT, sizeof(struct mcontext64) }, + [AVX] = { x86_AVX_STATE64, x86_AVX_STATE64_COUNT, sizeof(struct mcontext_avx64) }, +#if !defined(RC_HIDE_XNU_J137) + [AVX512] = { x86_AVX512_STATE64, x86_AVX512_STATE64_COUNT, sizeof(struct mcontext_avx512_64) } +#endif +}; +static const xstate_info_t thread_state32[] = { + [FP] = { x86_FLOAT_STATE32, x86_FLOAT_STATE32_COUNT, sizeof(struct mcontext32) }, + [AVX] = { x86_AVX_STATE32, x86_AVX_STATE32_COUNT, sizeof(struct mcontext_avx32) }, +#if !defined(RC_HIDE_XNU_J137) + [AVX512] = { x86_AVX512_STATE32, x86_AVX512_STATE32_COUNT, sizeof(struct mcontext_avx512_32) } +#endif +}; + +/* + * NOTE: Source and target may *NOT* overlap! + * XXX: Unify with bsd/kern/kern_exit.c + */ +static void +siginfo_user_to_user32_x86(user_siginfo_t *in, user32_siginfo_t *out) +{ + out->si_signo = in->si_signo; + out->si_errno = in->si_errno; + out->si_code = in->si_code; + out->si_pid = in->si_pid; + out->si_uid = in->si_uid; + out->si_status = in->si_status; + out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_addr); + /* following cast works for sival_int because of padding */ + out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_value.sival_ptr); + out->si_band = in->si_band; /* range reduction */ + out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */ +} + +static void +siginfo_user_to_user64_x86(user_siginfo_t *in, user64_siginfo_t *out) +{ + out->si_signo = in->si_signo; + out->si_errno = in->si_errno; + out->si_code = in->si_code; + out->si_pid = in->si_pid; + out->si_uid = in->si_uid; + out->si_status = in->si_status; + out->si_addr = in->si_addr; + out->si_value.sival_ptr = in->si_value.sival_ptr; + out->si_band = in->si_band; /* range reduction */ + out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */ +} void -sendsig(p, catcher, sig, mask, code) - struct proc *p; - sig_t catcher; - int sig, mask; - u_long code; +sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint32_t code) { - struct sigframe { - int retaddr; - sig_t catcher; - int sigstyle; - int sig; - int code; - struct sigcontext * scp; - } frame, *fp; - struct sigcontext context, *scp; + union { + struct mcontext_avx32 mctx_avx32; + struct mcontext_avx64 mctx_avx64; +#if !defined(RC_HIDE_XNU_J137) + struct mcontext_avx512_32 mctx_avx512_32; + struct mcontext_avx512_64 mctx_avx512_64; +#endif + } mctx_store, *mctxp = &mctx_store; + + user_addr_t ua_sp; + user_addr_t ua_fp; + user_addr_t ua_cr2; + user_addr_t ua_sip; + user_addr_t ua_uctxp; + user_addr_t ua_mctxp; + user_siginfo_t sinfo64; + struct sigacts *ps = p->p_sigacts; - int oonstack; - thread_t thread = current_thread(); - thread_act_t th_act = current_act(); + int oonstack, flavor; + user_addr_t trampact; + int sigonstack; + void * state; + mach_msg_type_number_t state_count; + + thread_t thread; struct uthread * ut; - struct i386_saved_state * saved_state = (struct i386_saved_state *) - get_user_regs(th_act); - sig_t trampact; - - ut = get_bsdthread_info(th_act); - oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK; - if ((ps->ps_flags & SAS_ALTSTACK) && !oonstack && - (ps->ps_sigonstack & sigmask(sig))) { - scp = ((struct sigcontext *)ps->ps_sigstk.ss_sp) - 1; - ps->ps_sigstk.ss_flags |= SA_ONSTACK; - } else - scp = ((struct sigcontext *)saved_state->uesp) - 1; - fp = ((struct sigframe *)scp) - 1; - - /* - * Build the argument list for the signal handler. - */ + int stack_size = 0; + int infostyle = UC_TRAD; + xstate_t sig_xstate; + + thread = current_thread(); + ut = get_bsdthread_info(thread); + + if (p->p_sigacts->ps_siginfo & sigmask(sig)) + infostyle = UC_FLAVOR; + oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK; trampact = ps->ps_trampact[sig]; - /* Handler should call sigreturn to get out of it */ - frame.retaddr = 0xffffffff; - frame.catcher = catcher; - frame.sigstyle = UC_TRAD; - frame.sig = sig; - - if (sig == SIGILL || sig == SIGFPE) { - frame.code = code; - } else - frame.code = 0; - frame.scp = scp; - if (copyout((caddr_t)&frame, (caddr_t)fp, sizeof (frame))) - goto bad; - -#if PC_SUPPORT - { - PCcontext_t context = threadPCContext(thread); - - if (context && context->running) { - oonstack |= 02; - context->running = FALSE; - } - } -#endif + sigonstack = (ps->ps_sigonstack & sigmask(sig)); + /* - * Build the signal context to be used by sigreturn. + * init siginfo */ - context.sc_onstack = oonstack; - context.sc_mask = mask; - context.sc_eax = saved_state->eax; - context.sc_ebx = saved_state->ebx; - context.sc_ecx = saved_state->ecx; - context.sc_edx = saved_state->edx; - context.sc_edi = saved_state->edi; - context.sc_esi = saved_state->esi; - context.sc_ebp = saved_state->ebp; - context.sc_esp = saved_state->uesp; - context.sc_ss = saved_state->ss; - context.sc_eflags = saved_state->efl; - context.sc_eip = saved_state->eip; - context.sc_cs = saved_state->cs; - if (saved_state->efl & EFL_VM) { - context.sc_ds = saved_state->v86_segs.v86_ds; - context.sc_es = saved_state->v86_segs.v86_es; - context.sc_fs = saved_state->v86_segs.v86_fs; - context.sc_gs = saved_state->v86_segs.v86_gs; - - saved_state->efl &= ~EFL_VM; + proc_unlock(p); + + bzero((caddr_t)&sinfo64, sizeof(sinfo64)); + sinfo64.si_signo = sig; + + bzero(mctxp, sizeof(*mctxp)); + + sig_xstate = current_xstate(); + + if (proc_is64bit(p)) { + x86_thread_state64_t *tstate64; + struct user_ucontext64 uctx64; + + flavor = x86_THREAD_STATE64; + state_count = x86_THREAD_STATE64_COUNT; + state = (void *)&mctxp->mctx_avx64.ss; + if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) + goto bad; + + flavor = thread_state64[sig_xstate].flavor; + state_count = thread_state64[sig_xstate].state_count; + state = (void *)&mctxp->mctx_avx64.fs; + if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) + goto bad; + + flavor = x86_EXCEPTION_STATE64; + state_count = x86_EXCEPTION_STATE64_COUNT; + state = (void *)&mctxp->mctx_avx64.es; + if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) + goto bad; + + tstate64 = &mctxp->mctx_avx64.ss; + + /* figure out where our new stack lives */ + if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && + (sigonstack)) { + ua_sp = ut->uu_sigstk.ss_sp; + stack_size = ut->uu_sigstk.ss_size; + ua_sp += stack_size; + ut->uu_sigstk.ss_flags |= SA_ONSTACK; + } else { + ua_sp = tstate64->rsp; + } + ua_cr2 = mctxp->mctx_avx64.es.faultvaddr; + + /* The x86_64 ABI defines a 128-byte red zone. */ + ua_sp -= C_64_REDZONE_LEN; + + ua_sp -= sizeof (struct user_ucontext64); + ua_uctxp = ua_sp; // someone tramples the first word! + + ua_sp -= sizeof (user64_siginfo_t); + ua_sip = ua_sp; + + ua_sp -= thread_state64[sig_xstate].mcontext_size; + ua_mctxp = ua_sp; + + /* + * Align the frame and stack pointers to 16 bytes for SSE. + * (Note that we use 'ua_fp' as the base of the stack going forward) + */ + ua_fp = TRUNC_DOWN64(ua_sp, C_64_STK_ALIGN); + + /* + * But we need to account for the return address so the alignment is + * truly "correct" at _sigtramp + */ + ua_fp -= sizeof(user_addr_t); + + /* + * Build the signal context to be used by sigreturn. + */ + bzero(&uctx64, sizeof(uctx64)); + + uctx64.uc_onstack = oonstack; + uctx64.uc_sigmask = mask; + uctx64.uc_stack.ss_sp = ua_fp; + uctx64.uc_stack.ss_size = stack_size; + + if (oonstack) + uctx64.uc_stack.ss_flags |= SS_ONSTACK; + uctx64.uc_link = 0; + + uctx64.uc_mcsize = thread_state64[sig_xstate].mcontext_size; + uctx64.uc_mcontext64 = ua_mctxp; + + if (copyout((caddr_t)&uctx64, ua_uctxp, sizeof (uctx64))) + goto bad; + + if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state64[sig_xstate].mcontext_size)) + goto bad; + + sinfo64.pad[0] = tstate64->rsp; + sinfo64.si_addr = tstate64->rip; + + tstate64->rip = trampact; + tstate64->rsp = ua_fp; + tstate64->rflags = get_eflags_exportmask(); + /* + * JOE - might not need to set these + */ + tstate64->cs = USER64_CS; + tstate64->fs = NULL_SEG; + tstate64->gs = USER_CTHREAD; + + /* + * Build the argument list for the signal handler. + * Handler should call sigreturn to get out of it + */ + tstate64->rdi = ua_catcher; + tstate64->rsi = infostyle; + tstate64->rdx = sig; + tstate64->rcx = ua_sip; + tstate64->r8 = ua_uctxp; + } else { - context.sc_ds = saved_state->ds; - context.sc_es = saved_state->es; - context.sc_fs = saved_state->fs; - context.sc_gs = saved_state->gs; + x86_thread_state32_t *tstate32; + struct user_ucontext32 uctx32; + struct sigframe32 frame32; + + flavor = x86_THREAD_STATE32; + state_count = x86_THREAD_STATE32_COUNT; + state = (void *)&mctxp->mctx_avx32.ss; + if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) + goto bad; + + flavor = thread_state32[sig_xstate].flavor; + state_count = thread_state32[sig_xstate].state_count; + state = (void *)&mctxp->mctx_avx32.fs; + if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) + goto bad; + + flavor = x86_EXCEPTION_STATE32; + state_count = x86_EXCEPTION_STATE32_COUNT; + state = (void *)&mctxp->mctx_avx32.es; + if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) + goto bad; + + tstate32 = &mctxp->mctx_avx32.ss; + + /* figure out where our new stack lives */ + if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && + (sigonstack)) { + ua_sp = ut->uu_sigstk.ss_sp; + stack_size = ut->uu_sigstk.ss_size; + ua_sp += stack_size; + ut->uu_sigstk.ss_flags |= SA_ONSTACK; + } else { + ua_sp = tstate32->esp; + } + ua_cr2 = mctxp->mctx_avx32.es.faultvaddr; + + ua_sp -= sizeof (struct user_ucontext32); + ua_uctxp = ua_sp; // someone tramples the first word! + + ua_sp -= sizeof (user32_siginfo_t); + ua_sip = ua_sp; + + ua_sp -= thread_state32[sig_xstate].mcontext_size; + ua_mctxp = ua_sp; + + ua_sp -= sizeof (struct sigframe32); + ua_fp = ua_sp; + + /* + * Align the frame and stack pointers to 16 bytes for SSE. + * (Note that we use 'fp' as the base of the stack going forward) + */ + ua_fp = TRUNC_DOWN32(ua_fp, C_32_STK_ALIGN); + + /* + * But we need to account for the return address so the alignment is + * truly "correct" at _sigtramp + */ + ua_fp -= sizeof(frame32.retaddr); + + /* + * Build the argument list for the signal handler. + * Handler should call sigreturn to get out of it + */ + frame32.retaddr = -1; + frame32.sigstyle = infostyle; + frame32.sig = sig; + frame32.catcher = CAST_DOWN_EXPLICIT(user32_addr_t, ua_catcher); + frame32.sinfo = CAST_DOWN_EXPLICIT(user32_addr_t, ua_sip); + frame32.uctx = CAST_DOWN_EXPLICIT(user32_addr_t, ua_uctxp); + + if (copyout((caddr_t)&frame32, ua_fp, sizeof (frame32))) + goto bad; + + /* + * Build the signal context to be used by sigreturn. + */ + bzero(&uctx32, sizeof(uctx32)); + + uctx32.uc_onstack = oonstack; + uctx32.uc_sigmask = mask; + uctx32.uc_stack.ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp); + uctx32.uc_stack.ss_size = stack_size; + + if (oonstack) + uctx32.uc_stack.ss_flags |= SS_ONSTACK; + uctx32.uc_link = 0; + + uctx32.uc_mcsize = thread_state64[sig_xstate].mcontext_size; + + uctx32.uc_mcontext = CAST_DOWN_EXPLICIT(user32_addr_t, ua_mctxp); + + if (copyout((caddr_t)&uctx32, ua_uctxp, sizeof (uctx32))) + goto bad; + + if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state32[sig_xstate].mcontext_size)) + goto bad; + + sinfo64.pad[0] = tstate32->esp; + sinfo64.si_addr = tstate32->eip; + } + + switch (sig) { + case SIGILL: + switch (ut->uu_code) { + case EXC_I386_INVOP: + sinfo64.si_code = ILL_ILLOPC; + break; + default: + sinfo64.si_code = ILL_NOOP; + } + break; + case SIGFPE: +#define FP_IE 0 /* Invalid operation */ +#define FP_DE 1 /* Denormalized operand */ +#define FP_ZE 2 /* Zero divide */ +#define FP_OE 3 /* overflow */ +#define FP_UE 4 /* underflow */ +#define FP_PE 5 /* precision */ + if (ut->uu_code == EXC_I386_DIV) { + sinfo64.si_code = FPE_INTDIV; + } + else if (ut->uu_code == EXC_I386_INTO) { + sinfo64.si_code = FPE_INTOVF; + } + else if (ut->uu_subcode & (1 << FP_ZE)) { + sinfo64.si_code = FPE_FLTDIV; + } else if (ut->uu_subcode & (1 << FP_OE)) { + sinfo64.si_code = FPE_FLTOVF; + } else if (ut->uu_subcode & (1 << FP_UE)) { + sinfo64.si_code = FPE_FLTUND; + } else if (ut->uu_subcode & (1 << FP_PE)) { + sinfo64.si_code = FPE_FLTRES; + } else if (ut->uu_subcode & (1 << FP_IE)) { + sinfo64.si_code = FPE_FLTINV; + } else { + sinfo64.si_code = FPE_NOOP; + } + break; + case SIGBUS: + sinfo64.si_code = BUS_ADRERR; + sinfo64.si_addr = ua_cr2; + break; + case SIGTRAP: + sinfo64.si_code = TRAP_BRKPT; + break; + case SIGSEGV: + sinfo64.si_addr = ua_cr2; + + switch (ut->uu_code) { + case EXC_I386_GPFLT: + /* CR2 is meaningless after GP fault */ + /* XXX namespace clash! */ + sinfo64.si_addr = 0ULL; + sinfo64.si_code = 0; + break; + case KERN_PROTECTION_FAILURE: + sinfo64.si_code = SEGV_ACCERR; + break; + case KERN_INVALID_ADDRESS: + sinfo64.si_code = SEGV_MAPERR; + break; + default: + sinfo64.si_code = FPE_NOOP; + } + break; + default: + { + int status_and_exitcode; + + /* + * All other signals need to fill out a minimum set of + * information for the siginfo structure passed into + * the signal handler, if SA_SIGINFO was specified. + * + * p->si_status actually contains both the status and + * the exit code; we save it off in its own variable + * for later breakdown. + */ + proc_lock(p); + sinfo64.si_pid = p->si_pid; + p->si_pid =0; + status_and_exitcode = p->si_status; + p->si_status = 0; + sinfo64.si_uid = p->si_uid; + p->si_uid =0; + sinfo64.si_code = p->si_code; + p->si_code = 0; + proc_unlock(p); + if (sinfo64.si_code == CLD_EXITED) { + if (WIFEXITED(status_and_exitcode)) + sinfo64.si_code = CLD_EXITED; + else if (WIFSIGNALED(status_and_exitcode)) { + if (WCOREDUMP(status_and_exitcode)) { + sinfo64.si_code = CLD_DUMPED; + status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); + } else { + sinfo64.si_code = CLD_KILLED; + status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); + } + } + } + /* + * The recorded status contains the exit code and the + * signal information, but the information to be passed + * in the siginfo to the handler is supposed to only + * contain the status, so we have to shift it out. + */ + sinfo64.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000); + p->p_xhighbits = 0; + break; + } } - if (copyout((caddr_t)&context, (caddr_t)scp, sizeof (context))) - goto bad; + if (proc_is64bit(p)) { + user64_siginfo_t sinfo64_user64; + + bzero((caddr_t)&sinfo64_user64, sizeof(sinfo64_user64)); + + siginfo_user_to_user64_x86(&sinfo64,&sinfo64_user64); + +#if CONFIG_DTRACE + bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo)); + + ut->t_dtrace_siginfo.si_signo = sinfo64.si_signo; + ut->t_dtrace_siginfo.si_code = sinfo64.si_code; + ut->t_dtrace_siginfo.si_pid = sinfo64.si_pid; + ut->t_dtrace_siginfo.si_uid = sinfo64.si_uid; + ut->t_dtrace_siginfo.si_status = sinfo64.si_status; + /* XXX truncates faulting address to void * on K32 */ + ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo64.si_addr); + + /* Fire DTrace proc:::fault probe when signal is generated by hardware. */ + switch (sig) { + case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP: + DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo)); + break; + default: + break; + } + + /* XXX truncates catcher address to uintptr_t */ + DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo), + void (*)(void), CAST_DOWN(sig_t, ua_catcher)); +#endif /* CONFIG_DTRACE */ + + if (copyout((caddr_t)&sinfo64_user64, ua_sip, sizeof (sinfo64_user64))) + goto bad; + + flavor = x86_THREAD_STATE64; + state_count = x86_THREAD_STATE64_COUNT; + state = (void *)&mctxp->mctx_avx64.ss; + } else { + x86_thread_state32_t *tstate32; + user32_siginfo_t sinfo32; + + bzero((caddr_t)&sinfo32, sizeof(sinfo32)); + + siginfo_user_to_user32_x86(&sinfo64,&sinfo32); + +#if CONFIG_DTRACE + bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo)); + + ut->t_dtrace_siginfo.si_signo = sinfo32.si_signo; + ut->t_dtrace_siginfo.si_code = sinfo32.si_code; + ut->t_dtrace_siginfo.si_pid = sinfo32.si_pid; + ut->t_dtrace_siginfo.si_uid = sinfo32.si_uid; + ut->t_dtrace_siginfo.si_status = sinfo32.si_status; + ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo32.si_addr); + + /* Fire DTrace proc:::fault probe when signal is generated by hardware. */ + switch (sig) { + case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP: + DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo)); + break; + default: + break; + } + + DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo), + void (*)(void), CAST_DOWN(sig_t, ua_catcher)); +#endif /* CONFIG_DTRACE */ + + if (copyout((caddr_t)&sinfo32, ua_sip, sizeof (sinfo32))) + goto bad; + + tstate32 = &mctxp->mctx_avx32.ss; - saved_state->eip = (unsigned int)trampact; - saved_state->cs = UCODE_SEL; + tstate32->eip = CAST_DOWN_EXPLICIT(user32_addr_t, trampact); + tstate32->esp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp); - saved_state->uesp = (unsigned int)fp; - saved_state->ss = UDATA_SEL; + tstate32->eflags = get_eflags_exportmask(); + + tstate32->cs = USER_CS; + tstate32->ss = USER_DS; + tstate32->ds = USER_DS; + tstate32->es = USER_DS; + tstate32->fs = NULL_SEG; + tstate32->gs = USER_CTHREAD; + + flavor = x86_THREAD_STATE32; + state_count = x86_THREAD_STATE32_COUNT; + state = (void *)tstate32; + } + if (thread_setstatus(thread, flavor, (thread_state_t)state, state_count) != KERN_SUCCESS) + goto bad; + ml_fp_setvalid(FALSE); + + /* Tell the PAL layer about the signal */ + pal_set_signal_delivery( thread ); + + proc_lock(p); - saved_state->ds = UDATA_SEL; - saved_state->es = UDATA_SEL; - saved_state->fs = NULL_SEG; - saved_state->gs = USER_CTHREAD; return; bad: + + proc_lock(p); SIGACTION(p, SIGILL) = SIG_DFL; sig = sigmask(SIGILL); p->p_sigignore &= ~sig; p->p_sigcatch &= ~sig; ut->uu_sigmask &= ~sig; /* sendsig is called with signal lock held */ - psignal_lock(p, SIGILL, 0); + proc_unlock(p); + psignal_locked(p, SIGILL); + proc_lock(p); return; } @@ -207,87 +659,120 @@ bad: * psl to gain improper priviledges or to cause * a machine fault. */ -struct sigreturn_args { - struct sigcontext *sigcntxp; -}; -/* ARGSUSED */ + int -sigreturn(p, uap, retval) - struct proc *p; - struct sigreturn_args *uap; - int *retval; +sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) { - struct sigcontext context; - thread_t thread = current_thread(); - thread_act_t th_act = current_act(); - int error; - struct i386_saved_state* saved_state = (struct i386_saved_state*) - get_user_regs(th_act); + union { + struct mcontext_avx32 mctx_avx32; + struct mcontext_avx64 mctx_avx64; +#if !defined(RC_HIDE_XNU_J137) + struct mcontext_avx512_32 mctx_avx512_32; + struct mcontext_avx512_64 mctx_avx512_64; +#endif + } mctx_store, *mctxp = &mctx_store; + + thread_t thread = current_thread(); struct uthread * ut; + int error; + int onstack = 0; + + mach_msg_type_number_t ts_count; + unsigned int ts_flavor; + void * ts; + mach_msg_type_number_t fs_count; + unsigned int fs_flavor; + void * fs; + int rval = EJUSTRETURN; + xstate_t sig_xstate; + + ut = (struct uthread *)get_bsdthread_info(thread); + + /* + * If we are being asked to change the altstack flag on the thread, we + * just set/reset it and return (the uap->uctx is not used). + */ + if ((unsigned int)uap->infostyle == UC_SET_ALT_STACK) { + ut->uu_sigstk.ss_flags |= SA_ONSTACK; + return (0); + } else if ((unsigned int)uap->infostyle == UC_RESET_ALT_STACK) { + ut->uu_sigstk.ss_flags &= ~SA_ONSTACK; + return (0); + } + + bzero(mctxp, sizeof(*mctxp)); + + sig_xstate = current_xstate(); + + if (proc_is64bit(p)) { + struct user_ucontext64 uctx64; + + if ((error = copyin(uap->uctx, (void *)&uctx64, sizeof (uctx64)))) + return(error); + if ((error = copyin(uctx64.uc_mcontext64, (void *)mctxp, thread_state64[sig_xstate].mcontext_size))) + return(error); - - if (saved_state == NULL) - return EINVAL; + onstack = uctx64.uc_onstack & 01; + ut->uu_sigmask = uctx64.uc_sigmask & ~sigcantmask; - if (error = copyin((caddr_t)uap->sigcntxp, (caddr_t)&context, - sizeof (context))) - return(error); - ut = (struct uthread *)get_bsdthread_info(th_act); + ts_flavor = x86_THREAD_STATE64; + ts_count = x86_THREAD_STATE64_COUNT; + ts = (void *)&mctxp->mctx_avx64.ss; - if (context.sc_onstack & 01) - p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK; + fs_flavor = thread_state64[sig_xstate].flavor; + fs_count = thread_state64[sig_xstate].state_count; + fs = (void *)&mctxp->mctx_avx64.fs; + + } else { + struct user_ucontext32 uctx32; + + if ((error = copyin(uap->uctx, (void *)&uctx32, sizeof (uctx32)))) + return(error); + + if ((error = copyin(CAST_USER_ADDR_T(uctx32.uc_mcontext), (void *)mctxp, thread_state32[sig_xstate].mcontext_size))) + return(error); + + onstack = uctx32.uc_onstack & 01; + ut->uu_sigmask = uctx32.uc_sigmask & ~sigcantmask; + + ts_flavor = x86_THREAD_STATE32; + ts_count = x86_THREAD_STATE32_COUNT; + ts = (void *)&mctxp->mctx_avx32.ss; + + fs_flavor = thread_state32[sig_xstate].flavor; + fs_count = thread_state32[sig_xstate].state_count; + fs = (void *)&mctxp->mctx_avx32.fs; + } + + if (onstack) + ut->uu_sigstk.ss_flags |= SA_ONSTACK; else - p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK; - ut->uu_sigmask = context.sc_mask &~ sigcantmask; - if(ut->uu_siglist & ~ut->uu_sigmask) - signal_setast(current_act()); - saved_state->eax = context.sc_eax; - saved_state->ebx = context.sc_ebx; - saved_state->ecx = context.sc_ecx; - saved_state->edx = context.sc_edx; - saved_state->edi = context.sc_edi; - saved_state->esi = context.sc_esi; - saved_state->ebp = context.sc_ebp; - saved_state->uesp = context.sc_esp; - saved_state->ss = context.sc_ss; - saved_state->efl = context.sc_eflags; - saved_state->efl &= ~EFL_USERCLR; - saved_state->efl |= EFL_USERSET; - saved_state->eip = context.sc_eip; - saved_state->cs = context.sc_cs; - - if (context.sc_eflags & EFL_VM) { - saved_state->ds = NULL_SEG; - saved_state->es = NULL_SEG; - saved_state->fs = NULL_SEG; - saved_state->gs = NULL_SEG; - saved_state->v86_segs.v86_ds = context.sc_ds; - saved_state->v86_segs.v86_es = context.sc_es; - saved_state->v86_segs.v86_fs = context.sc_fs; - saved_state->v86_segs.v86_gs = context.sc_gs; - - saved_state->efl |= EFL_VM; - } - else { - saved_state->ds = context.sc_ds; - saved_state->es = context.sc_es; - saved_state->fs = context.sc_fs; - saved_state->gs = context.sc_gs; - } - -#if PC_SUPPORT - if (context.sc_onstack & 02) { - PCcontext_t context = threadPCContext(thread); + ut->uu_sigstk.ss_flags &= ~SA_ONSTACK; + + if (ut->uu_siglist & ~ut->uu_sigmask) + signal_setast(thread); + /* + * thread_set_state() does all the needed checks for the passed in + * content + */ + if (thread_setstatus(thread, ts_flavor, ts, ts_count) != KERN_SUCCESS) { + rval = EINVAL; + goto error_ret; + } - if (context) - context->running = TRUE; - } -#endif + ml_fp_setvalid(TRUE); + + if (thread_setstatus(thread, fs_flavor, fs, fs_count) != KERN_SUCCESS) { + rval = EINVAL; + goto error_ret; - return (EJUSTRETURN); + } +error_ret: + return rval; } + /* * machine_exception() performs MD translation * of a mach exception to a unix signal and code. @@ -295,29 +780,49 @@ sigreturn(p, uap, retval) boolean_t machine_exception( - int exception, - int code, - int subcode, - int *unix_signal, - int *unix_code -) + int exception, + mach_exception_code_t code, + __unused mach_exception_subcode_t subcode, + int *unix_signal, + mach_exception_code_t *unix_code) { - switch(exception) { - - case EXC_BAD_INSTRUCTION: - *unix_signal = SIGILL; - *unix_code = code; - break; - - case EXC_ARITHMETIC: - *unix_signal = SIGFPE; - *unix_code = code; - break; - - default: - return(FALSE); - } + switch(exception) { + + case EXC_BAD_ACCESS: + /* Map GP fault to SIGSEGV, otherwise defer to caller */ + if (code == EXC_I386_GPFLT) { + *unix_signal = SIGSEGV; + *unix_code = code; + break; + } + return(FALSE); + + case EXC_BAD_INSTRUCTION: + *unix_signal = SIGILL; + *unix_code = code; + break; + + case EXC_ARITHMETIC: + *unix_signal = SIGFPE; + *unix_code = code; + break; + + case EXC_SOFTWARE: + if (code == EXC_I386_BOUND) { + /* + * Map #BR, the Bound Range Exceeded exception, to + * SIGTRAP. + */ + *unix_signal = SIGTRAP; + *unix_code = code; + break; + } + + default: + return(FALSE); + } - return(TRUE); + return(TRUE); } +