+ thread = current_thread();
+ uth = (struct uthread *)get_bsdthread_info(thread);
+
+ switch (action) {
+ case 1:
+ uth->uu_flag &= ~UT_CANCELDISABLE;
+ return(0);
+ case 2:
+ uth->uu_flag |= UT_CANCELDISABLE;
+ return(0);
+ case 0:
+ default:
+ /* if the thread is in vfork do not cancel */
+ if((uth->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
+ uth->uu_flag &= ~UT_CANCEL;
+ uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
+ return(0);
+ }
+ return(EINVAL);
+ }
+ return(EINVAL);
+}
+
+void
+__posix_sem_syscall_return(kern_return_t kern_result)
+{
+ int error = 0;
+
+ if (kern_result == KERN_SUCCESS)
+ error = 0;
+ else if (kern_result == KERN_ABORTED)
+ error = EINTR;
+ else if (kern_result == KERN_OPERATION_TIMED_OUT)
+ error = ETIMEDOUT;
+ else
+ error = EINVAL;
+ unix_syscall_return(error);
+ /* does not return */
+}
+
+#if OLD_SEMWAIT_SIGNAL
+/*
+ * Returns: 0 Success
+ * EINTR
+ * ETIMEDOUT
+ * EINVAL
+ * EFAULT if timespec is NULL
+ */
+int
+__old_semwait_signal(proc_t p, struct __old_semwait_signal_args *uap,
+ int32_t *retval)
+{
+ __pthread_testcancel(0);
+ return(__old_semwait_signal_nocancel(p, (struct __old_semwait_signal_nocancel_args *)uap, retval));
+}
+
+int
+__old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_args *uap,
+ __unused int32_t *retval)
+{
+
+ kern_return_t kern_result;
+ int error;
+ mach_timespec_t then;
+ struct timespec now;
+ struct user_timespec ts;
+ boolean_t truncated_timeout = FALSE;
+
+ if(uap->timeout) {
+
+ if (IS_64BIT_PROCESS(p)) {
+ struct user64_timespec ts64;
+ error = copyin(uap->ts, &ts64, sizeof(ts64));
+ ts.tv_sec = ts64.tv_sec;
+ ts.tv_nsec = ts64.tv_nsec;
+ } else {
+ struct user32_timespec ts32;
+ error = copyin(uap->ts, &ts32, sizeof(ts32));
+ ts.tv_sec = ts32.tv_sec;
+ ts.tv_nsec = ts32.tv_nsec;
+ }
+
+ if (error) {
+ return error;
+ }
+
+ if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
+ ts.tv_sec = 0xFFFFFFFF;
+ ts.tv_nsec = 0;
+ truncated_timeout = TRUE;
+ }
+
+ if (uap->relative) {
+ then.tv_sec = ts.tv_sec;
+ then.tv_nsec = ts.tv_nsec;
+ } else {
+ nanotime(&now);
+
+ /* if time has elapsed, set time to null timepsec to bailout rightaway */
+ if (now.tv_sec == ts.tv_sec ?
+ now.tv_nsec > ts.tv_nsec :
+ now.tv_sec > ts.tv_sec) {
+ then.tv_sec = 0;
+ then.tv_nsec = 0;
+ } else {
+ then.tv_sec = ts.tv_sec - now.tv_sec;
+ then.tv_nsec = ts.tv_nsec - now.tv_nsec;
+ if (then.tv_nsec < 0) {
+ then.tv_nsec += NSEC_PER_SEC;
+ then.tv_sec--;
+ }
+ }
+ }
+
+ if (uap->mutex_sem == 0)
+ kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
+ else
+ kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
+
+ } else {
+
+ if (uap->mutex_sem == 0)
+ kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
+ else
+
+ kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
+ }
+
+ if (kern_result == KERN_SUCCESS && !truncated_timeout)
+ return(0);
+ else if (kern_result == KERN_SUCCESS && truncated_timeout)
+ return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
+ else if (kern_result == KERN_ABORTED)
+ return(EINTR);
+ else if (kern_result == KERN_OPERATION_TIMED_OUT)
+ return(ETIMEDOUT);
+ else
+ return(EINVAL);
+}
+#endif /* OLD_SEMWAIT_SIGNAL*/
+
+/*
+ * Returns: 0 Success
+ * EINTR
+ * ETIMEDOUT
+ * EINVAL
+ * EFAULT if timespec is NULL
+ */
+int
+__semwait_signal(proc_t p, struct __semwait_signal_args *uap,
+ int32_t *retval)
+{
+ __pthread_testcancel(0);
+ return(__semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval));
+}
+
+int
+__semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_args *uap,
+ __unused int32_t *retval)
+{
+
+ kern_return_t kern_result;
+ mach_timespec_t then;
+ struct timespec now;
+ struct user_timespec ts;
+ boolean_t truncated_timeout = FALSE;
+
+ if(uap->timeout) {
+
+ ts.tv_sec = uap->tv_sec;
+ ts.tv_nsec = uap->tv_nsec;
+
+ if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
+ ts.tv_sec = 0xFFFFFFFF;
+ ts.tv_nsec = 0;
+ truncated_timeout = TRUE;
+ }
+
+ if (uap->relative) {
+ then.tv_sec = ts.tv_sec;
+ then.tv_nsec = ts.tv_nsec;
+ } else {
+ nanotime(&now);
+
+ /* if time has elapsed, set time to null timepsec to bailout rightaway */
+ if (now.tv_sec == ts.tv_sec ?
+ now.tv_nsec > ts.tv_nsec :
+ now.tv_sec > ts.tv_sec) {
+ then.tv_sec = 0;
+ then.tv_nsec = 0;
+ } else {
+ then.tv_sec = ts.tv_sec - now.tv_sec;
+ then.tv_nsec = ts.tv_nsec - now.tv_nsec;
+ if (then.tv_nsec < 0) {
+ then.tv_nsec += NSEC_PER_SEC;
+ then.tv_sec--;
+ }
+ }
+ }
+
+ if (uap->mutex_sem == 0)
+ kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
+ else
+ kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
+
+ } else {
+
+ if (uap->mutex_sem == 0)
+ kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
+ else
+
+ kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
+ }
+
+ if (kern_result == KERN_SUCCESS && !truncated_timeout)
+ return(0);
+ else if (kern_result == KERN_SUCCESS && truncated_timeout)
+ return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
+ else if (kern_result == KERN_ABORTED)
+ return(EINTR);
+ else if (kern_result == KERN_OPERATION_TIMED_OUT)
+ return(ETIMEDOUT);
+ else
+ return(EINVAL);
+}
+
+
+int
+__pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap,
+ __unused int32_t *retval)
+{
+ thread_t target_act;
+ int error = 0;
+ int signum = uap->sig;
+ struct uthread *uth;
+
+ target_act = (thread_t)port_name_to_thread(uap->thread_port);
+
+ if (target_act == THREAD_NULL)
+ return (ESRCH);
+ if ((u_int)signum >= NSIG) {
+ error = EINVAL;
+ goto out;
+ }
+
+ uth = (struct uthread *)get_bsdthread_info(target_act);
+
+ if (uth->uu_flag & UT_NO_SIGMASK) {
+ error = ESRCH;
+ goto out;
+ }
+
+ if (signum)
+ psignal_uthread(target_act, signum);
+out:
+ thread_deallocate(target_act);
+ return (error);
+}
+
+
+int
+__pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap,
+ __unused int32_t *retval)
+{
+ user_addr_t set = uap->set;
+ user_addr_t oset = uap->oset;
+ sigset_t nset;
+ int error = 0;
+ struct uthread *ut;
+ sigset_t oldset;
+
+ ut = (struct uthread *)get_bsdthread_info(current_thread());
+ oldset = ut->uu_sigmask;
+
+ if (set == USER_ADDR_NULL) {
+ /* need only old mask */
+ goto out;
+ }
+
+ error = copyin(set, &nset, sizeof(sigset_t));
+ if (error)
+ goto out;
+
+ switch (uap->how) {
+ case SIG_BLOCK:
+ ut->uu_sigmask |= (nset & ~sigcantmask);
+ break;
+
+ case SIG_UNBLOCK:
+ ut->uu_sigmask &= ~(nset);
+ signal_setast(current_thread());
+ break;
+
+ case SIG_SETMASK:
+ ut->uu_sigmask = (nset & ~sigcantmask);
+ signal_setast(current_thread());
+ break;
+
+ default:
+ error = EINVAL;
+
+ }
+out:
+ if (!error && oset != USER_ADDR_NULL)
+ copyout(&oldset, oset, sizeof(sigset_t));
+
+ return(error);
+}
+
+/*
+ * Returns: 0 Success
+ * EINVAL
+ * copyin:EFAULT
+ * copyout:EFAULT
+ */
+int
+__sigwait(proc_t p, struct __sigwait_args *uap, int32_t *retval)
+{
+ __pthread_testcancel(1);
+ return(__sigwait_nocancel(p, (struct __sigwait_nocancel_args *)uap, retval));
+}
+
+int
+__sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused int32_t *retval)
+{
+ struct uthread *ut;
+ struct uthread *uth;
+ int error = 0;
+ sigset_t mask;
+ sigset_t siglist;
+ sigset_t sigw=0;
+ int signum;
+
+ ut = (struct uthread *)get_bsdthread_info(current_thread());
+
+ if (uap->set == USER_ADDR_NULL)
+ return(EINVAL);
+
+ error = copyin(uap->set, &mask, sizeof(sigset_t));
+ if (error)
+ return(error);
+
+ siglist = (mask & ~sigcantmask);
+
+ if (siglist == 0)
+ return(EINVAL);
+
+ proc_lock(p);
+ if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) {
+ proc_unlock(p);
+ return(EINVAL);
+ } else {
+ proc_signalstart(p, 1);
+ TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
+ if ( (sigw = uth->uu_siglist & siglist) ) {
+ break;
+ }
+ }
+ proc_signalend(p, 1);
+ }
+
+ if (sigw) {
+ /* The signal was pending on a thread */
+ goto sigwait1;
+ }
+ /*
+ * When returning from sigwait, we want
+ * the old mask to be restored after the
+ * signal handler has finished. Thus, we
+ * save it here and mark the sigacts structure
+ * to indicate this.
+ */
+ uth = ut; /* wait for it to be delivered to us */
+ ut->uu_oldmask = ut->uu_sigmask;
+ ut->uu_flag |= UT_SAS_OLDMASK;
+ if (siglist == (sigset_t)0) {
+ proc_unlock(p);
+ return(EINVAL);
+ }
+ /* SIGKILL and SIGSTOP are not maskable as well */
+ ut->uu_sigmask = ~(siglist|sigcantmask);
+ ut->uu_sigwait = siglist;
+
+ /* No Continuations for now */
+ error = msleep((caddr_t)&ut->uu_sigwait, &p->p_mlock, PPAUSE|PCATCH, "pause", 0);
+
+ if (error == ERESTART)
+ error = 0;
+
+ sigw = (ut->uu_sigwait & siglist);
+ ut->uu_sigmask = ut->uu_oldmask;
+ ut->uu_oldmask = 0;
+ ut->uu_flag &= ~UT_SAS_OLDMASK;
+sigwait1:
+ ut->uu_sigwait = 0;
+ if (!error) {
+ signum = ffs((unsigned int)sigw);
+ if (!signum)
+ panic("sigwait with no signal wakeup");
+ /* Clear the pending signal in the thread it was delivered */
+ uth->uu_siglist &= ~(sigmask(signum));
+
+#if CONFIG_DTRACE
+ DTRACE_PROC2(signal__clear, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo));
+#endif
+
+ proc_unlock(p);
+ if (uap->sig != USER_ADDR_NULL)
+ error = copyout(&signum, uap->sig, sizeof(int));
+ } else
+ proc_unlock(p);
+
+ return(error);
+
+}
+
+int
+sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused int32_t *retval)
+{
+ struct kern_sigaltstack ss;
+ struct kern_sigaltstack *pstk;
+ int error;
+ struct uthread *uth;
+ int onstack;
+
+ uth = (struct uthread *)get_bsdthread_info(current_thread());
+
+ pstk = &uth->uu_sigstk;
+ if ((uth->uu_flag & UT_ALTSTACK) == 0)
+ uth->uu_sigstk.ss_flags |= SA_DISABLE;
+ onstack = pstk->ss_flags & SA_ONSTACK;
+ if (uap->oss) {
+ if (IS_64BIT_PROCESS(p)) {
+ struct user64_sigaltstack ss64;
+ sigaltstack_kern_to_user64(pstk, &ss64);
+ error = copyout(&ss64, uap->oss, sizeof(ss64));
+ } else {
+ struct user32_sigaltstack ss32;
+ sigaltstack_kern_to_user32(pstk, &ss32);
+ error = copyout(&ss32, uap->oss, sizeof(ss32));
+ }
+ if (error)
+ return (error);
+ }
+ if (uap->nss == USER_ADDR_NULL)
+ return (0);
+ if (IS_64BIT_PROCESS(p)) {
+ struct user64_sigaltstack ss64;
+ error = copyin(uap->nss, &ss64, sizeof(ss64));
+ sigaltstack_user64_to_kern(&ss64, &ss);
+ } else {
+ struct user32_sigaltstack ss32;
+ error = copyin(uap->nss, &ss32, sizeof(ss32));
+ sigaltstack_user32_to_kern(&ss32, &ss);
+ }
+ if (error)
+ return (error);
+ if ((ss.ss_flags & ~SA_DISABLE) != 0) {
+ return(EINVAL);
+ }
+
+ if (ss.ss_flags & SA_DISABLE) {
+ /* if we are here we are not in the signal handler ;so no need to check */
+ if (uth->uu_sigstk.ss_flags & SA_ONSTACK)
+ return (EINVAL);
+ uth->uu_flag &= ~UT_ALTSTACK;
+ uth->uu_sigstk.ss_flags = ss.ss_flags;
+ return (0);
+ }
+ if (onstack)
+ return (EPERM);
+/* The older stacksize was 8K, enforce that one so no compat problems */
+#define OLDMINSIGSTKSZ 8*1024
+ if (ss.ss_size < OLDMINSIGSTKSZ)
+ return (ENOMEM);
+ uth->uu_flag |= UT_ALTSTACK;
+ uth->uu_sigstk= ss;
+ return (0);
+}
+
+int
+kill(proc_t cp, struct kill_args *uap, __unused int32_t *retval)
+{
+ proc_t p;
+ kauth_cred_t uc = kauth_cred_get();
+ int posix = uap->posix; /* !0 if posix behaviour desired */
+
+ AUDIT_ARG(pid, uap->pid);
+ AUDIT_ARG(signum, uap->signum);
+
+ if ((u_int)uap->signum >= NSIG)
+ return (EINVAL);