X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/593a1d5fd87cdf5b46dd5fcb84467b432cea0f91..c7d2c2c6ee645e10cbccdd01c6191873ec77239d:/bsd/kern/kern_sig.c?ds=sidebyside diff --git a/bsd/kern/kern_sig.c b/bsd/kern/kern_sig.c index b9b14ffb5..bf5507903 100644 --- a/bsd/kern/kern_sig.c +++ b/bsd/kern/kern_sig.c @@ -92,7 +92,7 @@ #include #include -#include +#include #include @@ -101,7 +101,6 @@ #include #include /* for coredump */ #include /* for APC support */ -#include #include /* extern void *get_bsdtask_info(task_t); */ #include #include @@ -112,6 +111,7 @@ #include #include +#include /* * Missing prototypes that Mach should export @@ -121,7 +121,6 @@ extern int thread_enable_fpe(thread_t act, int onoff); extern thread_t port_name_to_thread(mach_port_name_t port_name); extern kern_return_t get_signalact(task_t , thread_t *, int); -extern boolean_t thread_should_abort(thread_t); extern unsigned int get_useraddr(void); /* @@ -134,10 +133,11 @@ extern void doexception(int exc, mach_exception_code_t code, static void stop(proc_t, proc_t); int cansignal(proc_t, kauth_cred_t, proc_t, int, int); int killpg1(proc_t, int, int, int, int); -int setsigvec(proc_t, int, struct __user_sigaction *); static void psignal_uthread(thread_t, int); +static void psignal_try_thread(proc_t, thread_t, int signum); kern_return_t do_bsdexception(int, int, int); void __posix_sem_syscall_return(kern_return_t); +char *proc_name_address(void *p); /* implementations in osfmk/kern/sync_sema.c. We do not want port.h in this scope, so void * them */ kern_return_t semaphore_timedwait_signal_trap_internal(mach_port_name_t, mach_port_name_t, unsigned int, clock_res_t, void (*)(kern_return_t)); @@ -148,9 +148,15 @@ kern_return_t semaphore_wait_trap_internal(mach_port_name_t, void (*)(kern_retur static int filt_sigattach(struct knote *kn); static void filt_sigdetach(struct knote *kn); static int filt_signal(struct knote *kn, long hint); - -struct filterops sig_filtops = - { 0, filt_sigattach, filt_sigdetach, filt_signal }; +static void filt_signaltouch(struct knote *kn, struct kevent_internal_s *kev, + long type); + +struct filterops sig_filtops = { + .f_attach = filt_sigattach, + .f_detach = filt_sigdetach, + .f_event = filt_signal, + .f_touch = filt_signaltouch, +}; /* structures and fns for killpg1 iterartion callback and filters */ struct killpg1_filtargs { @@ -163,6 +169,7 @@ struct killpg1_iterargs { kauth_cred_t uc; int signum; int * nfoundp; + int zombie; }; static int killpg1_filt(proc_t p, void * arg); @@ -178,6 +185,7 @@ static kern_return_t get_signalthread(proc_t, int, thread_t *); #define PSIG_LOCKED 0x1 #define PSIG_VFORK 0x2 #define PSIG_THREAD 0x4 +#define PSIG_TRY_THREAD 0x8 static void psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum); @@ -186,9 +194,17 @@ static void psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, * NOTE: Source and target may *NOT* overlap! (target is smaller) */ static void -sigaltstack_64to32(struct user_sigaltstack *in, struct sigaltstack *out) +sigaltstack_kern_to_user32(struct kern_sigaltstack *in, struct user32_sigaltstack *out) +{ + out->ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, in->ss_sp); + out->ss_size = CAST_DOWN_EXPLICIT(user32_size_t, in->ss_size); + out->ss_flags = in->ss_flags; +} + +static void +sigaltstack_kern_to_user64(struct kern_sigaltstack *in, struct user64_sigaltstack *out) { - out->ss_sp = CAST_DOWN(void *,in->ss_sp); + out->ss_sp = in->ss_sp; out->ss_size = in->ss_size; out->ss_flags = in->ss_flags; } @@ -199,24 +215,39 @@ sigaltstack_64to32(struct user_sigaltstack *in, struct sigaltstack *out) * the beginning. */ static void -sigaltstack_32to64(struct sigaltstack *in, struct user_sigaltstack *out) +sigaltstack_user32_to_kern(struct user32_sigaltstack *in, struct kern_sigaltstack *out) +{ + out->ss_flags = in->ss_flags; + out->ss_size = in->ss_size; + out->ss_sp = CAST_USER_ADDR_T(in->ss_sp); +} +static void +sigaltstack_user64_to_kern(struct user64_sigaltstack *in, struct kern_sigaltstack *out) { out->ss_flags = in->ss_flags; out->ss_size = in->ss_size; - out->ss_sp = CAST_USER_ADDR_T(in->ss_sp); + out->ss_sp = in->ss_sp; } static void -sigaction_64to32(struct user_sigaction *in, struct sigaction *out) +sigaction_kern_to_user32(struct kern_sigaction *in, struct user32_sigaction *out) { /* This assumes 32 bit __sa_handler is of type sig_t */ - out->__sigaction_u.__sa_handler = CAST_DOWN(sig_t,in->__sigaction_u.__sa_handler); + out->__sigaction_u.__sa_handler = CAST_DOWN_EXPLICIT(user32_addr_t,in->__sigaction_u.__sa_handler); + out->sa_mask = in->sa_mask; + out->sa_flags = in->sa_flags; +} +static void +sigaction_kern_to_user64(struct kern_sigaction *in, struct user64_sigaction *out) +{ + /* This assumes 32 bit __sa_handler is of type sig_t */ + out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler; out->sa_mask = in->sa_mask; out->sa_flags = in->sa_flags; } static void -__sigaction_32to64(struct __sigaction *in, struct __user_sigaction *out) +__sigaction_user32_to_kern(struct __user32_sigaction *in, struct __kern_sigaction *out) { out->__sigaction_u.__sa_handler = CAST_USER_ADDR_T(in->__sigaction_u.__sa_handler); out->sa_tramp = CAST_USER_ADDR_T(in->sa_tramp); @@ -224,6 +255,14 @@ __sigaction_32to64(struct __sigaction *in, struct __user_sigaction *out) out->sa_flags = in->sa_flags; } +static void +__sigaction_user64_to_kern(struct __user64_sigaction *in, struct __kern_sigaction *out) +{ + out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler; + out->sa_tramp = in->sa_tramp; + out->sa_mask = in->sa_mask; + out->sa_flags = in->sa_flags; +} #if SIGNAL_DEBUG void ram_printf(int); @@ -267,6 +306,10 @@ cansignal(proc_t p, kauth_cred_t uc, proc_t q, int signum, int zombie) if (p == q) return(1); + /* you can't send launchd SIGKILL, even if root */ + if (signum == SIGKILL && q == initproc) + return(0); + if (!suser(uc, NULL)) return (1); /* root can always signal */ @@ -287,8 +330,8 @@ cansignal(proc_t p, kauth_cred_t uc, proc_t q, int signum, int zombie) proc_list_unlock(); /* - * If the real or effective UID of the sender matches the real, - * effective, or ssaved UID of the target, permit the signal to + * If the real or effective UID of the sender matches the real + * or saved UID of the target, permit the signal to * be sent. */ if (zombie == 0) @@ -296,12 +339,10 @@ cansignal(proc_t p, kauth_cred_t uc, proc_t q, int signum, int zombie) else my_cred = proc_ucred(q); - if (uc->cr_ruid == my_cred->cr_ruid || - uc->cr_ruid == my_cred->cr_svuid || - kauth_cred_getuid(uc) == my_cred->cr_ruid || - kauth_cred_getuid(uc) == my_cred->cr_svuid || - uc->cr_ruid == kauth_cred_getuid(my_cred) || - kauth_cred_getuid(uc) == kauth_cred_getuid(my_cred)) { + if (kauth_cred_getruid(uc) == kauth_cred_getruid(my_cred) || + kauth_cred_getruid(uc) == kauth_cred_getsvuid(my_cred) || + kauth_cred_getuid(uc) == kauth_cred_getruid(my_cred) || + kauth_cred_getuid(uc) == kauth_cred_getsvuid(my_cred)) { if (zombie == 0) kauth_cred_unref(&my_cred); return (1); @@ -313,21 +354,72 @@ cansignal(proc_t p, kauth_cred_t uc, proc_t q, int signum, int zombie) return (0); } +/* + * Some signals can be restricted from being handled, + * forcing the default action for that signal. This behavior applies only to + * non-root (EUID != 0) processes, and is configured with the "sigrestrict=x" + * bootarg: + * + * 0 (default): Disallow use of restricted signals. Trying to register a handler + * returns ENOTSUP, which userspace may use to take special action (e.g. abort). + * 1: As above, but return EINVAL. Restricted signals behave similarly to SIGKILL. + * 2: Usual POSIX semantics. + */ +unsigned sigrestrict_arg = 0; + +#if PLATFORM_WatchOS +static int +sigrestrictmask(void) +{ + if (kauth_getuid() != 0 && sigrestrict_arg != 2) { + return SIGRESTRICTMASK; + } + return 0; +} + +static int +signal_is_restricted(proc_t p, int signum) +{ + if (sigmask(signum) & sigrestrictmask()) { + if (sigrestrict_arg == 0 && + task_get_apptype(p->task) == TASK_APPTYPE_APP_DEFAULT) { + return ENOTSUP; + } else { + return EINVAL; + } + } + return 0; +} + +#else + +static inline int +signal_is_restricted(proc_t p, int signum) +{ + (void)p; + (void)signum; + return 0; +} +#endif /* !PLATFORM_WatchOS */ /* * Returns: 0 Success * EINVAL * copyout:EFAULT * copyin:EFAULT + * + * Notes: Uses current thread as a parameter to inform PPC to enable + * FPU exceptions via setsigvec(); this operation is not proxy + * safe! */ /* ARGSUSED */ int -sigaction(proc_t p, struct sigaction_args *uap, __unused register_t *retval) +sigaction(proc_t p, struct sigaction_args *uap, __unused int32_t *retval) { - struct user_sigaction vec; - struct __user_sigaction __vec; + struct kern_sigaction vec; + struct __kern_sigaction __vec; - struct user_sigaction *sa = &vec; + struct kern_sigaction *sa = &vec; struct sigacts *ps = p->p_sigacts; int signum; @@ -335,9 +427,34 @@ sigaction(proc_t p, struct sigaction_args *uap, __unused register_t *retval) signum = uap->signum; if (signum <= 0 || signum >= NSIG || - signum == SIGKILL || signum == SIGSTOP) + signum == SIGKILL || signum == SIGSTOP) return (EINVAL); + if (uap->nsa) { + if (IS_64BIT_PROCESS(p)) { + struct __user64_sigaction __vec64; + error = copyin(uap->nsa, &__vec64, sizeof(__vec64)); + __sigaction_user64_to_kern(&__vec64, &__vec); + } else { + struct __user32_sigaction __vec32; + error = copyin(uap->nsa, &__vec32, sizeof(__vec32)); + __sigaction_user32_to_kern(&__vec32, &__vec); + } + if (error) + return (error); + __vec.sa_flags &= SA_USERSPACE_MASK; /* Only pass on valid sa_flags */ + + if ((__vec.sa_flags & SA_SIGINFO) || __vec.sa_handler != SIG_DFL) { + if ((error = signal_is_restricted(p, signum))) { + if (error == ENOTSUP) { + printf("%s(%d): denied attempt to register action for signal %d\n", + proc_name_address(p), proc_pid(p), signum); + } + return error; + } + } + } + if (uap->osa) { sa->sa_handler = ps->ps_sigact[signum]; sa->sa_mask = ps->ps_catchmask[signum]; @@ -359,40 +476,35 @@ sigaction(proc_t p, struct sigaction_args *uap, __unused register_t *retval) sa->sa_flags |= SA_NOCLDWAIT; if (IS_64BIT_PROCESS(p)) { - error = copyout(sa, uap->osa, sizeof(struct user_sigaction)); + struct user64_sigaction vec64; + sigaction_kern_to_user64(sa, &vec64); + error = copyout(&vec64, uap->osa, sizeof(vec64)); } else { - struct sigaction vec32; - sigaction_64to32(sa, &vec32); - error = copyout(&vec32, uap->osa, sizeof(struct sigaction)); + struct user32_sigaction vec32; + sigaction_kern_to_user32(sa, &vec32); + error = copyout(&vec32, uap->osa, sizeof(vec32)); } if (error) return (error); } + if (uap->nsa) { - if (IS_64BIT_PROCESS(p)) { - error = copyin(uap->nsa, &__vec, sizeof(struct __user_sigaction)); - } else { - struct __sigaction __vec32; - error = copyin(uap->nsa, &__vec32, sizeof(struct __sigaction)); - __sigaction_32to64(&__vec32, &__vec); - } - if (error) - return (error); - __vec.sa_flags &= SA_USERSPACE_MASK; /* Only pass on valid sa_flags */ - error = setsigvec(p, signum, &__vec); + error = setsigvec(p, current_thread(), signum, &__vec, FALSE); } + return (error); } /* Routines to manipulate bits on all threads */ int -clear_procsiglist(proc_t p, int bit) +clear_procsiglist(proc_t p, int bit, boolean_t in_signalstart) { struct uthread * uth; thread_t thact; proc_lock(p); - proc_signalstart(p, 1); + if (!in_signalstart) + proc_signalstart(p, 1); if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) { thact = p->p_vforkact; @@ -400,7 +512,8 @@ clear_procsiglist(proc_t p, int bit) if (uth) { uth->uu_siglist &= ~bit; } - proc_signalend(p, 1); + if (!in_signalstart) + proc_signalend(p, 1); proc_unlock(p); return(0); } @@ -408,8 +521,9 @@ clear_procsiglist(proc_t p, int bit) TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) { uth->uu_siglist &= ~bit; } - - proc_signalend(p, 1); + p->p_siglist &= ~bit; + if (!in_signalstart) + proc_signalend(p, 1); proc_unlock(p); return(0); @@ -507,8 +621,18 @@ set_procsigmask(proc_t p, int bit) } /* XXX should be static? */ +/* + * Notes: The thread parameter is used in the PPC case to select the + * thread on which the floating point exception will be enabled + * or disabled. We can't simply take current_thread(), since + * this is called from posix_spawn() on the not currently running + * process/thread pair. + * + * We mark thread as unused to alow compilation without warning + * on non-PPC platforms. + */ int -setsigvec(proc_t p, int signum, struct __user_sigaction *sa) +setsigvec(proc_t p, __unused thread_t thread, int signum, struct __kern_sigaction *sa, boolean_t in_sigstart) { struct sigacts *ps = p->p_sigacts; int bit; @@ -553,23 +677,15 @@ setsigvec(proc_t p, int signum, struct __user_sigaction *sa) ps->ps_signodefer &= ~bit; if (signum == SIGCHLD) { if (sa->sa_flags & SA_NOCLDSTOP) - OSBitOrAtomic(P_NOCLDSTOP, (UInt32 *)&p->p_flag); + OSBitOrAtomic(P_NOCLDSTOP, &p->p_flag); else - OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP), (UInt32 *)&p->p_flag); + OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP), &p->p_flag); if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN)) - OSBitOrAtomic(P_NOCLDWAIT, (UInt32 *)&p->p_flag); + OSBitOrAtomic(P_NOCLDWAIT, &p->p_flag); else - OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT), (UInt32 *)&p->p_flag); + OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT), &p->p_flag); } -#ifdef __ppc__ - if (signum == SIGFPE) { - if (sa->sa_handler == SIG_DFL || sa->sa_handler == SIG_IGN) - thread_enable_fpe(current_thread(), 0); - else - thread_enable_fpe(current_thread(), 1); - } -#endif /* __ppc__ */ /* * Set bit in p_sigignore for signals that are set to SIG_IGN, * and for signals set to SIG_DFL where the default is to ignore. @@ -579,7 +695,7 @@ setsigvec(proc_t p, int signum, struct __user_sigaction *sa) if (sa->sa_handler == SIG_IGN || (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) { - clear_procsiglist(p, bit); + clear_procsiglist(p, bit, in_sigstart); if (signum != SIGCONT) p->p_sigignore |= bit; /* easier in psignal */ p->p_sigcatch &= ~bit; @@ -602,7 +718,7 @@ siginit(proc_t p) { int i; - for (i = 0; i < NSIG; i++) + for (i = 1; i < NSIG; i++) if (sigprop[i] & SA_IGNORE && i != SIGCONT) p->p_sigignore |= sigmask(i); } @@ -618,6 +734,19 @@ execsigs(proc_t p, thread_t thread) struct uthread *ut; ut = (struct uthread *)get_bsdthread_info(thread); + + /* + * transfer saved signal states from the process + * back to the current thread. + * + * NOTE: We do this without the process locked, + * because we are guaranteed to be single-threaded + * by this point in exec and the p_siglist is + * only accessed by threads inside the process. + */ + ut->uu_siglist |= p->p_siglist; + p->p_siglist = 0; + /* * Reset caught signals. Held signals remain held * through p_sigmask (unless they were caught, @@ -630,13 +759,11 @@ execsigs(proc_t p, thread_t thread) if (sigprop[nc] & SA_IGNORE) { if (nc != SIGCONT) p->p_sigignore |= mask; - if (thread){ - ut->uu_siglist &= ~mask; - } else - clear_procsiglist(p, mask); + ut->uu_siglist &= ~mask; } ps->ps_sigact[nc] = SIG_DFL; } + /* * Reset stack state to the user stack. * Clear set of signals caught on the signal stack. @@ -657,7 +784,7 @@ execsigs(proc_t p, thread_t thread) * the library stub does the rest. */ int -sigprocmask(proc_t p, struct sigprocmask_args *uap, __unused register_t *retval) +sigprocmask(proc_t p, struct sigprocmask_args *uap, __unused int32_t *retval) { int error = 0; sigset_t oldmask, nmask; @@ -702,7 +829,7 @@ out: } int -sigpending(__unused proc_t p, struct sigpending_args *uap, __unused register_t *retval) +sigpending(__unused proc_t p, struct sigpending_args *uap, __unused int32_t *retval) { struct uthread *ut; sigset_t pendlist; @@ -729,14 +856,14 @@ sigcontinue(__unused int error) } int -sigsuspend(proc_t p, struct sigsuspend_args *uap, register_t *retval) +sigsuspend(proc_t p, struct sigsuspend_args *uap, int32_t *retval) { __pthread_testcancel(1); return(sigsuspend_nocancel(p, (struct sigsuspend_nocancel_args *)uap, retval)); } int -sigsuspend_nocancel(proc_t p, struct sigsuspend_nocancel_args *uap, __unused register_t *retval) +sigsuspend_nocancel(proc_t p, struct sigsuspend_nocancel_args *uap, __unused int32_t *retval) { struct uthread *ut; @@ -761,7 +888,7 @@ sigsuspend_nocancel(proc_t p, struct sigsuspend_nocancel_args *uap, __unused reg int __disable_threadsignal(__unused proc_t p, __unused struct __disable_threadsignal_args *uap, - __unused register_t *retval) + __unused int32_t *retval) { struct uthread *uth; @@ -799,7 +926,7 @@ __pthread_testcancel(int presyscall) int __pthread_markcancel(__unused proc_t p, - struct __pthread_markcancel_args *uap, __unused register_t *retval) + struct __pthread_markcancel_args *uap, __unused int32_t *retval) { thread_act_t target_act; int error = 0; @@ -831,7 +958,7 @@ __pthread_markcancel(__unused proc_t p, */ int __pthread_canceled(__unused proc_t p, - struct __pthread_canceled_args *uap, __unused register_t *retval) + struct __pthread_canceled_args *uap, __unused int32_t *retval) { thread_act_t thread; struct uthread *uth; @@ -877,65 +1004,184 @@ __posix_sem_syscall_return(kern_return_t kern_result) /* does not return */ } +#if OLD_SEMWAIT_SIGNAL /* * Returns: 0 Success * EINTR * ETIMEDOUT * EINVAL + * EFAULT if timespec is NULL */ int -__semwait_signal(__unused proc_t p, struct __semwait_signal_args *uap, - register_t *retval) +__old_semwait_signal(proc_t p, struct __old_semwait_signal_args *uap, + int32_t *retval) { __pthread_testcancel(0); - return(__semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval)); + return(__old_semwait_signal_nocancel(p, (struct __old_semwait_signal_nocancel_args *)uap, retval)); } int -__semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_args *uap, - __unused register_t *retval) +__old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_args *uap, + __unused int32_t *retval) { - + kern_return_t kern_result; + int error; mach_timespec_t then; struct timespec now; - + struct user_timespec ts; + boolean_t truncated_timeout = FALSE; + if(uap->timeout) { - + + if (IS_64BIT_PROCESS(p)) { + struct user64_timespec ts64; + error = copyin(uap->ts, &ts64, sizeof(ts64)); + ts.tv_sec = ts64.tv_sec; + ts.tv_nsec = ts64.tv_nsec; + } else { + struct user32_timespec ts32; + error = copyin(uap->ts, &ts32, sizeof(ts32)); + ts.tv_sec = ts32.tv_sec; + ts.tv_nsec = ts32.tv_nsec; + } + + if (error) { + return error; + } + + if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) { + ts.tv_sec = 0xFFFFFFFF; + ts.tv_nsec = 0; + truncated_timeout = TRUE; + } + if (uap->relative) { - then.tv_sec = uap->tv_sec; - then.tv_nsec = uap->tv_nsec; + then.tv_sec = ts.tv_sec; + then.tv_nsec = ts.tv_nsec; } else { nanotime(&now); - then.tv_sec = uap->tv_sec - now.tv_sec; - then.tv_nsec = uap->tv_nsec - now.tv_nsec; - if (then.tv_nsec < 0) { - then.tv_nsec += NSEC_PER_SEC; - then.tv_sec--; - } + /* if time has elapsed, set time to null timepsec to bailout rightaway */ - if ((int)then.tv_sec < 0) { + if (now.tv_sec == ts.tv_sec ? + now.tv_nsec > ts.tv_nsec : + now.tv_sec > ts.tv_sec) { then.tv_sec = 0; then.tv_nsec = 0; + } else { + then.tv_sec = ts.tv_sec - now.tv_sec; + then.tv_nsec = ts.tv_nsec - now.tv_nsec; + if (then.tv_nsec < 0) { + then.tv_nsec += NSEC_PER_SEC; + then.tv_sec--; + } } } - + if (uap->mutex_sem == 0) kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return); else kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return); - + } else { - + if (uap->mutex_sem == 0) kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return); else - + kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return); } - - if (kern_result == KERN_SUCCESS) + + if (kern_result == KERN_SUCCESS && !truncated_timeout) return(0); + else if (kern_result == KERN_SUCCESS && truncated_timeout) + return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */ + else if (kern_result == KERN_ABORTED) + return(EINTR); + else if (kern_result == KERN_OPERATION_TIMED_OUT) + return(ETIMEDOUT); + else + return(EINVAL); +} +#endif /* OLD_SEMWAIT_SIGNAL*/ + +/* + * Returns: 0 Success + * EINTR + * ETIMEDOUT + * EINVAL + * EFAULT if timespec is NULL + */ +int +__semwait_signal(proc_t p, struct __semwait_signal_args *uap, + int32_t *retval) +{ + __pthread_testcancel(0); + return(__semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval)); +} + +int +__semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_args *uap, + __unused int32_t *retval) +{ + + kern_return_t kern_result; + mach_timespec_t then; + struct timespec now; + struct user_timespec ts; + boolean_t truncated_timeout = FALSE; + + if(uap->timeout) { + + ts.tv_sec = uap->tv_sec; + ts.tv_nsec = uap->tv_nsec; + + if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) { + ts.tv_sec = 0xFFFFFFFF; + ts.tv_nsec = 0; + truncated_timeout = TRUE; + } + + if (uap->relative) { + then.tv_sec = ts.tv_sec; + then.tv_nsec = ts.tv_nsec; + } else { + nanotime(&now); + + /* if time has elapsed, set time to null timepsec to bailout rightaway */ + if (now.tv_sec == ts.tv_sec ? + now.tv_nsec > ts.tv_nsec : + now.tv_sec > ts.tv_sec) { + then.tv_sec = 0; + then.tv_nsec = 0; + } else { + then.tv_sec = ts.tv_sec - now.tv_sec; + then.tv_nsec = ts.tv_nsec - now.tv_nsec; + if (then.tv_nsec < 0) { + then.tv_nsec += NSEC_PER_SEC; + then.tv_sec--; + } + } + } + + if (uap->mutex_sem == 0) + kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return); + else + kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return); + + } else { + + if (uap->mutex_sem == 0) + kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return); + else + + kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return); + } + + if (kern_result == KERN_SUCCESS && !truncated_timeout) + return(0); + else if (kern_result == KERN_SUCCESS && truncated_timeout) + return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */ else if (kern_result == KERN_ABORTED) return(EINTR); else if (kern_result == KERN_OPERATION_TIMED_OUT) @@ -944,9 +1190,10 @@ __semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_ar return(EINVAL); } + int __pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap, - __unused register_t *retval) + __unused int32_t *retval) { thread_t target_act; int error = 0; @@ -979,7 +1226,7 @@ out: int __pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap, - __unused register_t *retval) + __unused int32_t *retval) { user_addr_t set = uap->set; user_addr_t oset = uap->oset; @@ -1033,14 +1280,14 @@ out: * copyout:EFAULT */ int -__sigwait(proc_t p, struct __sigwait_args *uap, register_t *retval) +__sigwait(proc_t p, struct __sigwait_args *uap, int32_t *retval) { __pthread_testcancel(1); return(__sigwait_nocancel(p, (struct __sigwait_nocancel_args *)uap, retval)); } int -__sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused register_t *retval) +__sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused int32_t *retval) { struct uthread *ut; struct uthread *uth; @@ -1103,7 +1350,7 @@ __sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused regis /* No Continuations for now */ error = msleep((caddr_t)&ut->uu_sigwait, &p->p_mlock, PPAUSE|PCATCH, "pause", 0); - if ((error == EINTR) || (error == ERESTART)) + if (error == ERESTART) error = 0; sigw = (ut->uu_sigwait & siglist); @@ -1118,6 +1365,11 @@ sigwait1: panic("sigwait with no signal wakeup"); /* Clear the pending signal in the thread it was delivered */ uth->uu_siglist &= ~(sigmask(signum)); + +#if CONFIG_DTRACE + DTRACE_PROC2(signal__clear, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo)); +#endif + proc_unlock(p); if (uap->sig != USER_ADDR_NULL) error = copyout(&signum, uap->sig, sizeof(int)); @@ -1129,10 +1381,10 @@ sigwait1: } int -sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused register_t *retval) +sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused int32_t *retval) { - struct user_sigaltstack ss; - struct user_sigaltstack *pstk; + struct kern_sigaltstack ss; + struct kern_sigaltstack *pstk; int error; struct uthread *uth; int onstack; @@ -1145,11 +1397,13 @@ sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused register_t onstack = pstk->ss_flags & SA_ONSTACK; if (uap->oss) { if (IS_64BIT_PROCESS(p)) { - error = copyout(pstk, uap->oss, sizeof(struct user_sigaltstack)); + struct user64_sigaltstack ss64; + sigaltstack_kern_to_user64(pstk, &ss64); + error = copyout(&ss64, uap->oss, sizeof(ss64)); } else { - struct sigaltstack ss32; - sigaltstack_64to32(pstk, &ss32); - error = copyout(&ss32, uap->oss, sizeof(struct sigaltstack)); + struct user32_sigaltstack ss32; + sigaltstack_kern_to_user32(pstk, &ss32); + error = copyout(&ss32, uap->oss, sizeof(ss32)); } if (error) return (error); @@ -1157,11 +1411,13 @@ sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused register_t if (uap->nss == USER_ADDR_NULL) return (0); if (IS_64BIT_PROCESS(p)) { - error = copyin(uap->nss, &ss, sizeof(struct user_sigaltstack)); + struct user64_sigaltstack ss64; + error = copyin(uap->nss, &ss64, sizeof(ss64)); + sigaltstack_user64_to_kern(&ss64, &ss); } else { - struct sigaltstack ss32; - error = copyin(uap->nss, &ss32, sizeof(struct sigaltstack)); - sigaltstack_32to64(&ss32,&ss); + struct user32_sigaltstack ss32; + error = copyin(uap->nss, &ss32, sizeof(ss32)); + sigaltstack_user32_to_kern(&ss32, &ss); } if (error) return (error); @@ -1189,7 +1445,7 @@ sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused register_t } int -kill(proc_t cp, struct kill_args *uap, __unused register_t *retval) +kill(proc_t cp, struct kill_args *uap, __unused int32_t *retval) { proc_t p; kauth_cred_t uc = kauth_cred_get(); @@ -1270,20 +1526,34 @@ killpg1_callback(proc_t p, void * arg) int signum = kargp->signum; int * nfoundp = kargp->nfoundp; int n; + int zombie = 0; + int error = 0; + if ((kargp->zombie != 0) && ((p->p_listflag & P_LIST_EXITED) == P_LIST_EXITED)) + zombie = 1; - if (cansignal(cp, uc, p, signum, 0) == 0) - return(PROC_RETURNED); + if (zombie != 0) { + proc_list_lock(); + error = cansignal(cp, uc, p, signum, zombie); + proc_list_unlock(); + + if (error != 0 && nfoundp != NULL) { + n = *nfoundp; + *nfoundp = n+1; + } + } else { + if (cansignal(cp, uc, p, signum, 0) == 0) + return(PROC_RETURNED); - if (nfoundp != NULL) { - n = *nfoundp; - *nfoundp = n+1; + if (nfoundp != NULL) { + n = *nfoundp; + *nfoundp = n+1; + } + if (signum != 0) + psignal(p, signum); } - if (signum != 0) - psignal(p, signum); return(PROC_RETURNED); - } /* @@ -1293,7 +1563,6 @@ killpg1_callback(proc_t p, void * arg) int killpg1(proc_t cp, int signum, int pgid, int all, int posix) { - proc_t p; kauth_cred_t uc; struct pgrp *pgrp; int nfound = 0; @@ -1313,24 +1582,9 @@ killpg1(proc_t cp, int signum, int pgid, int all, int posix) karg.uc = uc; karg.nfoundp = &nfound; karg.signum = signum; + karg.zombie = 1; - proc_iterate(PROC_ALLPROCLIST, killpg1_callback, &karg, killpg1_filt, (void *)&kfarg); - /* - * Signalling zombies is a no-op, but they must be counted - * among those processes which have been signalled, since - * they are still members of the process group. - */ - - proc_list_lock(); - - for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) { - if (p->p_pid <= 1 || p->p_flag & P_SYSTEM || - (!posix && p == cp) || !cansignal(cp, uc, p, signum, 1)) - continue; - nfound++; - } - - proc_list_unlock(); + proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST), killpg1_callback, &karg, killpg1_filt, (void *)&kfarg); } else { if (pgid == 0) { @@ -1350,6 +1604,7 @@ killpg1(proc_t cp, int signum, int pgid, int all, int posix) karg.uc = uc; karg.signum = signum; karg.cp = cp; + karg.zombie = 0; /* PGRP_DROPREF drops the pgrp refernce */ @@ -1385,7 +1640,7 @@ gsignal(int pgid, int signum) static int pgsignal_filt(proc_t p, void * arg) { - int checkctty = (int)arg; + int checkctty = *(int*)arg; if ((checkctty == 0) || p->p_flag & P_CONTROLT) return(1); @@ -1397,7 +1652,7 @@ pgsignal_filt(proc_t p, void * arg) static int pgsignal_callback(proc_t p, void * arg) { - int signum = (int)arg; + int signum = *(int*)arg; psignal(p, signum); return(PROC_RETURNED); @@ -1408,7 +1663,7 @@ void pgsignal(struct pgrp *pgrp, int signum, int checkctty) { if (pgrp != PGRP_NULL) { - pgrp_iterate(pgrp, PGRP_BLOCKITERATE, pgsignal_callback, (void *)signum, pgsignal_filt, (void *)checkctty); + pgrp_iterate(pgrp, PGRP_BLOCKITERATE, pgsignal_callback, &signum, pgsignal_filt, &checkctty); } } @@ -1420,7 +1675,7 @@ tty_pgsignal(struct tty *tp, int signum, int checkctty) pg = tty_pgrp(tp); if (pg != PGRP_NULL) { - pgrp_iterate(pg, PGRP_BLOCKITERATE, pgsignal_callback, (void *)signum, pgsignal_filt, (void *)checkctty); + pgrp_iterate(pg, PGRP_BLOCKITERATE, pgsignal_callback, &signum, pgsignal_filt, &checkctty); pg_rele(pg); } } @@ -1445,7 +1700,7 @@ threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code) p = (proc_t)(get_bsdtask_info(sig_task)); uth = get_bsdthread_info(sig_actthread); - if (uth && (uth->uu_flag & UT_VFORK)) + if (uth->uu_flag & UT_VFORK) p = uth->uu_proc; proc_lock(p); @@ -1462,6 +1717,18 @@ threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code) signal_setast(sig_actthread); } +/* + * get_signalthread + * + * Picks an appropriate thread from a process to target with a signal. + * + * Called with proc locked. + * Returns thread with BSD ast set. + * + * We attempt to deliver a proc-wide signal to the first thread in the task. + * This allows single threaded applications which use signals to + * be able to be linked with multithreaded libraries. + */ static kern_return_t get_signalthread(proc_t p, int signum, thread_t * thr) { @@ -1470,7 +1737,7 @@ get_signalthread(proc_t p, int signum, thread_t * thr) thread_t sig_thread; struct task * sig_task = p->task; kern_return_t kret; - + *thr = THREAD_NULL; if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) { @@ -1481,20 +1748,17 @@ get_signalthread(proc_t p, int signum, thread_t * thr) return(KERN_SUCCESS); }else return(KERN_FAILURE); - } + } - proc_lock(p); TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) { if(((uth->uu_flag & UT_NO_SIGMASK)== 0) && (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) { if (check_actforsig(p->task, uth->uu_context.vc_thread, 1) == KERN_SUCCESS) { *thr = uth->uu_context.vc_thread; - proc_unlock(p); return(KERN_SUCCESS); } } } - proc_unlock(p); if (get_signalact(p->task, thr, 1) == KERN_SUCCESS) { return(KERN_SUCCESS); } @@ -1519,10 +1783,10 @@ static void psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) { int prop; - sig_t action = NULL; + user_addr_t action = USER_ADDR_NULL; proc_t sig_proc; thread_t sig_thread; - register task_t sig_task; + task_t sig_task; int mask; struct uthread *uth; kern_return_t kret; @@ -1531,7 +1795,8 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) kauth_cred_t my_cred; if ((u_int)signum >= NSIG || signum == 0) - panic("psignal signal number"); + panic("psignal: bad signal number %d", signum); + mask = sigmask(signum); prop = sigprop[signum]; @@ -1541,6 +1806,12 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) } #endif /* SIGNAL_DEBUG */ + /* catch unexpected initproc kills early for easier debuggging */ + if (signum == SIGKILL && p == initproc) + panic_plain("unexpected SIGKILL of %s %s", + (p->p_name[0] != '\0' ? p->p_name : "initproc"), + ((p->p_csflags & CS_KILLED) ? "(CS_KILLED)" : "")); + /* * We will need the task pointer later. Grab it now to * check for a zombie process. Also don't send signals @@ -1549,26 +1820,34 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) if (flavor & PSIG_VFORK) { sig_task = task; sig_thread = thread; - sig_proc= p; + sig_proc = p; } else if (flavor & PSIG_THREAD) { sig_task = get_threadtask(thread); sig_thread = thread; sig_proc = (proc_t)get_bsdtask_info(sig_task); + } else if (flavor & PSIG_TRY_THREAD) { + assert((thread == current_thread()) && (p == current_proc())); + sig_task = p->task; + sig_thread = thread; + sig_proc = p; } else { sig_task = p->task; + sig_thread = THREAD_NULL; sig_proc = p; - sig_thread = (struct thread *)0; } - if (((sig_task == TASK_NULL) || is_kerneltask(sig_task))) { + + if ((sig_task == TASK_NULL) || is_kerneltask(sig_task)) return; - } /* * do not send signals to the process that has the thread * doing a reboot(). Not doing so will mark that thread aborted - * and can cause IO failures wich will cause data loss. + * and can cause IO failures wich will cause data loss. There's + * also no need to send a signal to a process that is in the middle + * of being torn down. */ - if (ISSET(sig_proc->p_flag, P_REBOOT)) { + if (ISSET(sig_proc->p_flag, P_REBOOT) || ISSET(sig_proc->p_lflag, P_LEXIT)) { + DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum); return; } @@ -1576,30 +1855,39 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) proc_knote(sig_proc, NOTE_SIGNAL | signum); } - if ((flavor & PSIG_LOCKED)== 0) proc_signalstart(sig_proc, 0); - /* - * Deliver the signal to the first thread in the task. This - * allows single threaded applications which use signals to - * be able to be linked with multithreaded libraries. We have - * an implicit reference to the current thread, but need - * an explicit one otherwise. The thread reference keeps - * the corresponding task data structures around too. This - * reference is released by thread_deallocate. - */ - - + /* Don't send signals to a process that has ignored them. */ if (((flavor & PSIG_VFORK) == 0) && ((sig_proc->p_lflag & P_LTRACED) == 0) && (sig_proc->p_sigignore & mask)) { DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum); - goto psigout; + goto sigout_unlocked; } + /* + * The proc_lock prevents the targeted thread from being deallocated + * or handling the signal until we're done signaling it. + * + * Once the proc_lock is dropped, we have no guarantee the thread or uthread exists anymore. + * + * XXX: What if the thread goes inactive after the thread passes bsd ast point? + */ + proc_lock(sig_proc); + if (flavor & PSIG_VFORK) { action = SIG_DFL; act_set_astbsd(sig_thread); kret = KERN_SUCCESS; + } else if (flavor & PSIG_TRY_THREAD) { + uth = get_bsdthread_info(sig_thread); + if (((uth->uu_flag & UT_NO_SIGMASK) == 0) && + (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask)) && + ((kret = check_actforsig(sig_proc->task, sig_thread, 1)) == KERN_SUCCESS)) { + /* deliver to specified thread */ + } else { + /* deliver to any willing thread */ + kret = get_signalthread(sig_proc, signum, &sig_thread); + } } else if (flavor & PSIG_THREAD) { /* If successful return with ast set */ kret = check_actforsig(sig_task, sig_thread, 1); @@ -1607,14 +1895,13 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) /* If successful return with ast set */ kret = get_signalthread(sig_proc, signum, &sig_thread); } + if (kret != KERN_SUCCESS) { -#if SIGNAL_DEBUG - ram_printf(1); -#endif /* SIGNAL_DEBUG */ - goto psigout; + DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum); + proc_unlock(sig_proc); + goto sigout_unlocked; } - uth = get_bsdthread_info(sig_thread); /* @@ -1633,7 +1920,8 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) * action will be SIG_DFL here.) */ if (sig_proc->p_sigignore & mask) - goto psigout; + goto sigout_locked; + if (uth->uu_sigwait & mask) action = KERN_SIG_WAIT; else if (uth->uu_sigmask & mask) @@ -1645,9 +1933,7 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) } } - - proc_lock(sig_proc); - + /* TODO: p_nice isn't hooked up to the scheduler... */ if (sig_proc->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) && (sig_proc->p_lflag & P_LTRACED) == 0) sig_proc->p_nice = NZERO; @@ -1663,41 +1949,33 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) * is default; don't stop the process below if sleeping, * and don't clear any pending SIGCONT. */ - proc_unlock(sig_proc); pg = proc_pgrp(sig_proc); if (prop & SA_TTYSTOP && pg->pg_jobc == 0 && action == SIG_DFL) { pg_rele(pg); - goto psigout; + goto sigout_locked; } pg_rele(pg); - proc_lock(sig_proc); uth->uu_siglist &= ~contsigmask; } uth->uu_siglist |= mask; - /* - * Repost AST incase sigthread has processed - * ast and missed signal post. - */ - if (action == KERN_SIG_CATCH) - act_set_astbsd(sig_thread); - /* * Defer further processing for signals which are held, * except that stopped processes must be continued by SIGCONT. */ /* vfork will not go thru as action is SIG_DFL */ - if ((action == KERN_SIG_HOLD) && ((prop & SA_CONT) == 0 || sig_proc->p_stat != SSTOP)) { - proc_unlock(sig_proc); - goto psigout; - } + if ((action == KERN_SIG_HOLD) && ((prop & SA_CONT) == 0 || sig_proc->p_stat != SSTOP)) + goto sigout_locked; + /* * SIGKILL priority twiddling moved here from above because * it needs sig_thread. Could merge it into large switch * below if we didn't care about priority for tracing * as SIGKILL's action is always SIG_DFL. + * + * TODO: p_nice isn't hooked up to the scheduler... */ if ((signum == SIGKILL) && (sig_proc->p_nice > NZERO)) { sig_proc->p_nice = NZERO; @@ -1711,29 +1989,38 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) if (sig_proc->p_lflag & P_LTRACED) { if (sig_proc->p_stat != SSTOP) goto runlocked; - else { - proc_unlock(sig_proc); - goto psigout; - } + else + goto sigout_locked; } + if ((flavor & PSIG_VFORK) != 0) goto runlocked; if (action == KERN_SIG_WAIT) { +#if CONFIG_DTRACE + /* + * DTrace proc signal-clear returns a siginfo_t. Collect the needed info. + */ + r_uid = kauth_getruid(); /* per thread credential; protected by our thread context */ + + bzero((caddr_t)&(uth->t_dtrace_siginfo), sizeof(uth->t_dtrace_siginfo)); + + uth->t_dtrace_siginfo.si_signo = signum; + uth->t_dtrace_siginfo.si_pid = current_proc()->p_pid; + uth->t_dtrace_siginfo.si_status = W_EXITCODE(signum, 0); + uth->t_dtrace_siginfo.si_uid = r_uid; + uth->t_dtrace_siginfo.si_code = 0; +#endif uth->uu_sigwait = mask; uth->uu_siglist &= ~mask; wakeup(&uth->uu_sigwait); /* if it is SIGCONT resume whole process */ if (prop & SA_CONT) { - OSBitOrAtomic(P_CONTINUED, (UInt32 *)&sig_proc->p_flag); + OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag); sig_proc->p_contproc = current_proc()->p_pid; - - proc_unlock(sig_proc); - (void) task_resume(sig_task); - goto psigout; + (void) task_resume_internal(sig_task); } - proc_unlock(sig_proc); - goto psigout; + goto sigout_locked; } if (action != SIG_DFL) { @@ -1743,14 +2030,11 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) * (except for SIGCONT). */ if (prop & SA_CONT) { - OSBitOrAtomic(P_CONTINUED, (UInt32 *)&sig_proc->p_flag); - proc_unlock(sig_proc); - (void) task_resume(sig_task); - proc_lock(sig_proc); + OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag); + (void) task_resume_internal(sig_task); sig_proc->p_stat = SRUN; } else if (sig_proc->p_stat == SSTOP) { - proc_unlock(sig_proc); - goto psigout; + goto sigout_locked; } /* * Fill out siginfo structure information to pass to the @@ -1765,9 +2049,7 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) * Note: Avoid the SIGCHLD recursion case! */ if (signum != SIGCHLD) { - proc_unlock(sig_proc); r_uid = kauth_getruid(); - proc_lock(sig_proc); sig_proc->si_pid = current_proc()->p_pid; sig_proc->si_status = W_EXITCODE(signum, 0); @@ -1787,14 +2069,13 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) * stopped from the keyboard. */ if (!(prop & SA_STOP) && sig_proc->p_pptr == initproc) { - proc_unlock(sig_proc); - psignal_locked(sig_proc, SIGKILL); - proc_lock(sig_proc); uth->uu_siglist &= ~mask; proc_unlock(sig_proc); - goto psigout; + /* siglock still locked, proc_lock not locked */ + psignal_locked(sig_proc, SIGKILL); + goto sigout_unlocked; } - + /* * Stop the task * if task hasn't already been stopped by @@ -1804,7 +2085,7 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) if (sig_proc->p_stat != SSTOP) { sig_proc->p_xstat = signum; sig_proc->p_stat = SSTOP; - OSBitAndAtomic(~((uint32_t)P_CONTINUED), (UInt32 *)&sig_proc->p_flag); + OSBitAndAtomic(~((uint32_t)P_CONTINUED), &sig_proc->p_flag); sig_proc->p_lflag &= ~P_LWAITED; proc_unlock(sig_proc); @@ -1813,7 +2094,7 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) if (( pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) { my_cred = kauth_cred_proc_ref(sig_proc); - r_uid = my_cred->cr_ruid; + r_uid = kauth_cred_getruid(my_cred); kauth_cred_unref(&my_cred); proc_lock(sig_proc); @@ -1833,19 +2114,18 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) psignal(pp, SIGCHLD); } - if (pp != PROC_NULL) + if (pp != PROC_NULL) { proc_parentdropref(pp, 0); - } else - proc_unlock(sig_proc); - goto psigout; + } + + goto sigout_unlocked; + } + + goto sigout_locked; } DTRACE_PROC3(signal__send, thread_t, sig_thread, proc_t, p, int, signum); - /* - * enters switch with sig_proc lock held but dropped when - * gets out of switch - */ switch (signum) { /* * Signals ignored by default have been dealt @@ -1862,22 +2142,28 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) * Process will be running after 'run' */ sig_proc->p_stat = SRUN; - proc_unlock(sig_proc); + /* + * In scenarios where suspend/resume are racing + * the signal we are missing AST_BSD by the time + * we get here, set again to avoid races. This + * was the scenario with spindump enabled shutdowns. + * We would need to cover this approp down the line. + */ + act_set_astbsd(sig_thread); thread_abort(sig_thread); - goto psigout; + goto sigout_locked; case SIGCONT: /* * Let the process run. If it's sleeping on an * event, it remains so. */ - OSBitOrAtomic(P_CONTINUED, (UInt32 *)&sig_proc->p_flag); + OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag); sig_proc->p_contproc = sig_proc->p_pid; - proc_unlock(sig_proc); - (void) task_resume(sig_task); - proc_lock(sig_proc); + (void) task_resume_internal(sig_task); + /* * When processing a SIGCONT, we need to check * to see if there are signals pending that @@ -1896,8 +2182,7 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) uth->uu_siglist &= ~mask; sig_proc->p_stat = SRUN; - proc_unlock(sig_proc); - goto psigout; + goto sigout_locked; default: /* @@ -1907,9 +2192,8 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) */ if (((flavor & (PSIG_VFORK|PSIG_THREAD)) == 0) && (action == SIG_DFL) && (prop & SA_KILL)) { sig_proc->p_stat = SRUN; - proc_unlock(sig_proc); thread_abort(sig_thread); - goto psigout; + goto sigout_locked; } /* @@ -1917,8 +2201,7 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum) * resume it. */ if (sig_proc->p_stat == SSTOP) { - proc_unlock(sig_proc); - goto psigout; + goto sigout_locked; } goto runlocked; } @@ -1932,22 +2215,25 @@ runlocked: */ if (sig_proc->p_stat == SSTOP) { if ((sig_proc->p_lflag & P_LTRACED) != 0 && sig_proc->p_xstat != 0) - uth->uu_siglist |= sigmask(sig_proc->p_xstat); + uth->uu_siglist |= sigmask(sig_proc->p_xstat); + if ((flavor & PSIG_VFORK) != 0) { sig_proc->p_stat = SRUN; } - proc_unlock(sig_proc); } else { /* * setrunnable(p) in BSD and * Wake up the thread if it is interruptible. */ sig_proc->p_stat = SRUN; - proc_unlock(sig_proc); if ((flavor & PSIG_VFORK) == 0) thread_abort_safely(sig_thread); } -psigout: + +sigout_locked: + proc_unlock(sig_proc); + +sigout_unlocked: if ((flavor & PSIG_LOCKED)== 0) { proc_signalend(sig_proc, 0); } @@ -1977,6 +2263,12 @@ psignal_uthread(thread_t thread, int signum) psignal_internal(PROC_NULL, TASK_NULL, thread, PSIG_THREAD, signum); } +/* same as psignal(), but prefer delivery to 'thread' if possible */ +static void +psignal_try_thread(proc_t p, thread_t thread, int signum) +{ + psignal_internal(p, NULL, thread, PSIG_TRY_THREAD, signum); +} /* * If the current process has received a signal (should be caught or cause @@ -1991,7 +2283,7 @@ psignal_uthread(thread_t thread, int signum) * postsig(signum); */ int -issignal(proc_t p) +issignal_locked(proc_t p) { int signum, mask, prop, sigbits; thread_t cur_act; @@ -2008,13 +2300,11 @@ issignal(proc_t p) ram_printf(3); } #endif /* SIGNAL_DEBUG */ - proc_lock(p); /* * Try to grab the signal lock. */ if (sig_try_locked(p) <= 0) { - proc_unlock(p); return(0); } @@ -2056,7 +2346,7 @@ issignal(proc_t p) p->sigwait = TRUE; p->sigwait_thread = cur_act; p->p_stat = SSTOP; - OSBitAndAtomic(~((uint32_t)P_CONTINUED), (UInt32 *)&p->p_flag); + OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag); p->p_lflag &= ~P_LWAITED; ut->uu_siglist &= ~mask; /* clear the old signal */ proc_signalend(p, 1); @@ -2067,7 +2357,7 @@ issignal(proc_t p) } else { proc_unlock(p); my_cred = kauth_cred_proc_ref(p); - r_uid = my_cred->cr_ruid; + r_uid = kauth_cred_getruid(my_cred); kauth_cred_unref(&my_cred); pp = proc_parentholdref(p); @@ -2085,18 +2375,15 @@ issignal(proc_t p) /* * XXX Have to really stop for debuggers; * XXX stop() doesn't do the right thing. - * XXX Inline the task_suspend because we - * XXX have to diddle Unix state in the - * XXX middle of it. */ task = p->task; - task_suspend(task); + task_suspend_internal(task); proc_lock(p); p->sigwait = TRUE; p->sigwait_thread = cur_act; p->p_stat = SSTOP; - OSBitAndAtomic(~((uint32_t)P_CONTINUED), (UInt32 *)&p->p_flag); + OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag); p->p_lflag &= ~P_LWAITED; ut->uu_siglist &= ~mask; /* clear the old signal */ @@ -2144,7 +2431,10 @@ issignal(proc_t p) */ proc_signalend(p, 1); proc_unlock(p); + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE, + p->p_pid, W_EXITCODE(0, SIGKILL), 2, 0, 0); exit1(p, W_EXITCODE(0, SIGKILL), (int *)NULL); + proc_lock(p); return(0); } @@ -2182,21 +2472,6 @@ issignal(proc_t p) switch ((long)p->p_sigacts->ps_sigact[signum]) { case (long)SIG_DFL: - /* - * Don't take default actions on system processes. - */ - if (p->p_ppid == 0) { -#if DIAGNOSTIC - /* - * Are you sure you want to ignore SIGSEGV - * in init? XXX - */ - printf("Process (pid %d) got signal %d\n", - p->p_pid, signum); -#endif - break; /* == ignore */ - } - /* * If there is a pending stop signal to process * with default action, stop here, @@ -2229,7 +2504,7 @@ issignal(proc_t p) stop(p, pp); if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) { my_cred = kauth_cred_proc_ref(p); - r_uid = my_cred->cr_ruid; + r_uid = kauth_cred_getruid(my_cred); kauth_cred_unref(&my_cred); proc_lock(pp); @@ -2285,8 +2560,7 @@ issignal(proc_t p) } /* NOTREACHED */ out: - proc_signalend(p,1); - proc_unlock(p); + proc_signalend(p, 1); return(retval); } @@ -2322,6 +2596,7 @@ CURSIG(proc_t p) signum = ffs((long)sigbits); mask = sigmask(signum); prop = sigprop[signum]; + sigbits &= ~mask; /* take the signal out */ /* * We should see pending but ignored signals @@ -2330,14 +2605,8 @@ CURSIG(proc_t p) if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) { continue; } + if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) { - /* - * Put the new signal into p_siglist. If the - * signal is being masked, look for other signals. - */ - mask = sigmask(signum); - if (ut->uu_sigmask & mask) - continue; return(signum); } @@ -2350,21 +2619,6 @@ CURSIG(proc_t p) switch ((long)p->p_sigacts->ps_sigact[signum]) { case (long)SIG_DFL: - /* - * Don't take default actions on system processes. - */ - if (p->p_ppid == 0) { -#if DIAGNOSTIC - /* - * Are you sure you want to ignore SIGSEGV - * in init? XXX - */ - printf("Process (pid %d) got signal %d\n", - p->p_pid, signum); -#endif - break; /* == ignore */ - } - /* * If there is a pending stop signal to process * with default action, stop here, @@ -2415,7 +2669,6 @@ CURSIG(proc_t p) */ return (signum); } - sigbits &= ~mask; /* take the signal! */ } /* NOTREACHED */ } @@ -2428,13 +2681,13 @@ CURSIG(proc_t p) static void stop(proc_t p, proc_t parent) { - OSBitAndAtomic(~((uint32_t)P_CONTINUED), (UInt32 *)&p->p_flag); + OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag); if ((parent != PROC_NULL) && (parent->p_stat != SSTOP)) { proc_list_lock(); wakeup((caddr_t)parent); proc_list_unlock(); } - (void) task_suspend(p->task); /*XXX*/ + (void) task_suspend_internal(p->task); } /* @@ -2442,12 +2695,12 @@ stop(proc_t p, proc_t parent) * from the current set of pending signals. */ void -postsig(int signum) +postsig_locked(int signum) { proc_t p = current_proc(); struct sigacts *ps = p->p_sigacts; user_addr_t catcher; - u_long code; + uint32_t code; int mask, returnmask; struct uthread * ut; @@ -2461,12 +2714,10 @@ postsig(int signum) panic("psig not on master"); #endif - proc_lock(p); /* * Try to grab the signal lock. */ if (sig_try_locked(p) <= 0) { - proc_unlock(p); return; } @@ -2481,31 +2732,45 @@ postsig(int signum) * Default catcher, where the default is to kill * the process. (Other cases were ignored above.) */ - siginfo_t sinfo; - bzero((caddr_t)&sinfo, sizeof(siginfo_t)); - sig_lock_to_exit(p); p->p_acflag |= AXSIG; if (sigprop[signum] & SA_CORE) { p->p_sigacts->ps_sig = signum; proc_signalend(p, 1); proc_unlock(p); - if (coredump(p) == 0) + if (coredump(p, 0, 0) == 0) signum |= WCOREFLAG; } else { proc_signalend(p, 1); proc_unlock(p); } - sinfo.si_signo = signum; - sinfo.si_pid = p->si_pid; - sinfo.si_uid = p->si_uid; - sinfo.si_status = WEXITSTATUS(p->si_status); +#if CONFIG_DTRACE + bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo)); + + ut->t_dtrace_siginfo.si_signo = signum; + ut->t_dtrace_siginfo.si_pid = p->si_pid; + ut->t_dtrace_siginfo.si_uid = p->si_uid; + ut->t_dtrace_siginfo.si_status = WEXITSTATUS(p->si_status); + + /* Fire DTrace proc:::fault probe when signal is generated by hardware. */ + switch (signum) { + case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP: + DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo)); + break; + default: + break; + } + - DTRACE_PROC3(signal__handle, int, signum, siginfo_t *, &sinfo, + DTRACE_PROC3(signal__handle, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo), void (*)(void), SIG_DFL); +#endif + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE, + p->p_pid, W_EXITCODE(0, signum), 3, 0, 0); exit1(p, W_EXITCODE(0, signum), (int *)NULL); + proc_lock(p); return; } else { /* @@ -2542,12 +2807,6 @@ postsig(int signum) ps->ps_siginfo &= ~mask; ps->ps_signodefer &= ~mask; } -#ifdef __ppc__ - /* Needs to disable to run in user mode */ - if (signum == SIGFPE) { - thread_enable_fpe(current_thread(), 0); - } -#endif /* __ppc__ */ if (ps->ps_sig != signum) { code = 0; @@ -2555,11 +2814,10 @@ postsig(int signum) code = ps->ps_code; ps->ps_code = 0; } - OSIncrementAtomic(&p->p_stats->p_ru.ru_nsignals); + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nsignals); sendsig(p, catcher, signum, returnmask, code); } proc_signalend(p, 1); - proc_unlock(p); } /* @@ -2628,6 +2886,29 @@ filt_signal(struct knote *kn, long hint) return (kn->kn_data != 0); } +static void +filt_signaltouch(struct knote *kn, struct kevent_internal_s *kev, long type) +{ + proc_klist_lock(); + switch (type) { + case EVENT_REGISTER: + kn->kn_sfflags = kev->fflags; + kn->kn_sdata = kev->data; + break; + case EVENT_PROCESS: + *kev = kn->kn_kevent; + if (kn->kn_flags & EV_CLEAR) { + kn->kn_data = 0; + kn->kn_fflags = 0; + } + break; + default: + panic("filt_signaltouch() - invalid type (%ld)", type); + break; + } + proc_klist_unlock(); +} + void bsd_ast(thread_t thread) { @@ -2643,7 +2924,7 @@ bsd_ast(thread_t thread) if ((p->p_flag & P_OWEUPC) && (p->p_flag & P_PROFIL)) { pc = get_useraddr(); addupc_task(p, pc, 1); - OSBitAndAtomic(~((uint32_t)P_OWEUPC), (UInt32 *)&p->p_flag); + OSBitAndAtomic(~((uint32_t)P_OWEUPC), &p->p_flag); } if (timerisset(&p->p_vtimer_user.it_value)) { @@ -2657,8 +2938,8 @@ bsd_ast(thread_t thread) else task_vtimer_clear(p->task, TASK_VTIMER_USER); - psignal(p, SIGVTALRM); - } + psignal_try_thread(p, thread, SIGVTALRM); + } } if (timerisset(&p->p_vtimer_prof.it_value)) { @@ -2672,9 +2953,9 @@ bsd_ast(thread_t thread) else task_vtimer_clear(p->task, TASK_VTIMER_PROF); - psignal(p, SIGPROF); + psignal_try_thread(p, thread, SIGPROF); } -} + } if (timerisset(&p->p_rlim_cpu)) { struct timeval tv; @@ -2693,14 +2974,51 @@ bsd_ast(thread_t thread) task_vtimer_clear(p->task, TASK_VTIMER_RLIM); - psignal(p, SIGXCPU); + psignal_try_thread(p, thread, SIGXCPU); + } + } + +#if CONFIG_DTRACE + if (ut->t_dtrace_sig) { + uint8_t dt_action_sig = ut->t_dtrace_sig; + ut->t_dtrace_sig = 0; + psignal(p, dt_action_sig); + } + + if (ut->t_dtrace_stop) { + ut->t_dtrace_stop = 0; + proc_lock(p); + p->p_dtrace_stop = 1; + proc_unlock(p); + (void)task_suspend_internal(p->task); + } + + if (ut->t_dtrace_resumepid) { + proc_t resumeproc = proc_find(ut->t_dtrace_resumepid); + ut->t_dtrace_resumepid = 0; + if (resumeproc != PROC_NULL) { + proc_lock(resumeproc); + /* We only act on processes stopped by dtrace */ + if (resumeproc->p_dtrace_stop) { + resumeproc->p_dtrace_stop = 0; + proc_unlock(resumeproc); + task_resume_internal(resumeproc->task); + } + else { + proc_unlock(resumeproc); + } + proc_rele(resumeproc); } } + +#endif /* CONFIG_DTRACE */ + proc_lock(p); if (CHECK_SIGNALS(p, current_thread(), ut)) { - while ( (signum = issignal(p)) ) - postsig(signum); + while ( (signum = issignal_locked(p)) ) + postsig_locked(signum); } + proc_unlock(p); if (!bsd_init_done) { bsd_init_done = 1; @@ -2812,79 +3130,41 @@ pgsigio(pid_t pgid, int sig) proc_rele(p); } - void proc_signalstart(proc_t p, int locked) { - if (locked == 0) + if (!locked) proc_lock(p); - while ((p->p_lflag & P_LINSIGNAL) == P_LINSIGNAL) { - p->p_lflag |= P_LSIGNALWAIT; + + if(p->p_signalholder == current_thread()) + panic("proc_signalstart: thread attempting to signal a process for which it holds the signal lock"); + + p->p_sigwaitcnt++; + while ((p->p_lflag & P_LINSIGNAL) == P_LINSIGNAL) msleep(&p->p_sigmask, &p->p_mlock, 0, "proc_signstart", NULL); - } + p->p_sigwaitcnt--; + p->p_lflag |= P_LINSIGNAL; -#if DIAGNOSTIC -#if SIGNAL_DEBUG -#ifdef __ppc__ - { - int sp, *fp, numsaved; - - __asm__ volatile("mr %0,r1" : "=r" (sp)); - - fp = (int *)*((int *)sp); - for (numsaved = 0; numsaved < 3; numsaved++) { - p->lockpc[numsaved] = fp[2]; - if ((int)fp <= 0) - break; - fp = (int *)*fp; - } - } -#endif /* __ppc__ */ -#endif /* SIGNAL_DEBUG */ -#endif /* DIAGNOSTIC */ p->p_signalholder = current_thread(); - if (locked == 0) + if (!locked) proc_unlock(p); - } void proc_signalend(proc_t p, int locked) { - if (locked == 0) + if (!locked) proc_lock(p); p->p_lflag &= ~P_LINSIGNAL; -#if DIAGNOSTIC -#if SIGNAL_DEBUG -#ifdef __ppc__ - { - int sp, *fp, numsaved; - - __asm__ volatile("mr %0,r1" : "=r" (sp)); - - fp = (int *)*((int *)sp); - for (numsaved = 0; numsaved < 3; numsaved++) { - p->unlockpc[numsaved] = fp[2]; - if ((int)fp <= 0) - break; - fp = (int *)*fp; - } - } -#endif /* __ppc__ */ -#endif /* SIGNAL_DEBUG */ -#endif /* DIAGNOSTIC */ - - if ((p->p_lflag & P_LSIGNALWAIT) == P_LSIGNALWAIT) { - p->p_lflag &= ~P_LSIGNALWAIT; + if (p->p_sigwaitcnt > 0) wakeup(&p->p_sigmask); - } + p->p_signalholder = NULL; - if (locked == 0) + if (!locked) proc_unlock(p); } - void sig_lock_to_exit(proc_t p) { @@ -2892,7 +3172,10 @@ sig_lock_to_exit(proc_t p) p->exit_thread = self; proc_unlock(p); - (void) task_suspend(p->task); + + task_hold(p->task); + task_wait(p->task, FALSE); + proc_lock(p); }