+ switch (action) {
+ case 1:
+ uth->uu_flag &= ~UT_CANCELDISABLE;
+ return(0);
+ case 2:
+ uth->uu_flag |= UT_CANCELDISABLE;
+ return(0);
+ case 0:
+ default:
+ /* if the thread is in vfork do not cancel */
+ if((uth->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
+ uth->uu_flag &= ~UT_CANCEL;
+ uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
+ return(0);
+ }
+ return(EINVAL);
+ }
+ return(EINVAL);
+}
+
+__attribute__((noreturn))
+void
+__posix_sem_syscall_return(kern_return_t kern_result)
+{
+ int error = 0;
+
+ if (kern_result == KERN_SUCCESS)
+ error = 0;
+ else if (kern_result == KERN_ABORTED)
+ error = EINTR;
+ else if (kern_result == KERN_OPERATION_TIMED_OUT)
+ error = ETIMEDOUT;
+ else
+ error = EINVAL;
+ unix_syscall_return(error);
+ /* does not return */
+}
+
+#if OLD_SEMWAIT_SIGNAL
+/*
+ * Returns: 0 Success
+ * EINTR
+ * ETIMEDOUT
+ * EINVAL
+ * EFAULT if timespec is NULL
+ */
+int
+__old_semwait_signal(proc_t p, struct __old_semwait_signal_args *uap,
+ int32_t *retval)
+{
+ __pthread_testcancel(0);
+ return(__old_semwait_signal_nocancel(p, (struct __old_semwait_signal_nocancel_args *)uap, retval));
+}
+
+int
+__old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_args *uap,
+ __unused int32_t *retval)
+{
+
+ kern_return_t kern_result;
+ int error;
+ mach_timespec_t then;
+ struct timespec now;
+ struct user_timespec ts;
+ boolean_t truncated_timeout = FALSE;
+
+ if(uap->timeout) {
+
+ if (IS_64BIT_PROCESS(p)) {
+ struct user64_timespec ts64;
+ error = copyin(uap->ts, &ts64, sizeof(ts64));
+ ts.tv_sec = ts64.tv_sec;
+ ts.tv_nsec = ts64.tv_nsec;
+ } else {
+ struct user32_timespec ts32;
+ error = copyin(uap->ts, &ts32, sizeof(ts32));
+ ts.tv_sec = ts32.tv_sec;
+ ts.tv_nsec = ts32.tv_nsec;
+ }
+
+ if (error) {
+ return error;
+ }
+
+ if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
+ ts.tv_sec = 0xFFFFFFFF;
+ ts.tv_nsec = 0;
+ truncated_timeout = TRUE;
+ }
+
+ if (uap->relative) {
+ then.tv_sec = ts.tv_sec;
+ then.tv_nsec = ts.tv_nsec;
+ } else {
+ nanotime(&now);
+
+ /* if time has elapsed, set time to null timepsec to bailout rightaway */
+ if (now.tv_sec == ts.tv_sec ?
+ now.tv_nsec > ts.tv_nsec :
+ now.tv_sec > ts.tv_sec) {
+ then.tv_sec = 0;
+ then.tv_nsec = 0;
+ } else {
+ then.tv_sec = ts.tv_sec - now.tv_sec;
+ then.tv_nsec = ts.tv_nsec - now.tv_nsec;
+ if (then.tv_nsec < 0) {
+ then.tv_nsec += NSEC_PER_SEC;
+ then.tv_sec--;
+ }
+ }
+ }
+
+ if (uap->mutex_sem == 0)
+ kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
+ else
+ kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
+
+ } else {
+
+ if (uap->mutex_sem == 0)
+ kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
+ else
+
+ kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
+ }
+
+ if (kern_result == KERN_SUCCESS && !truncated_timeout)
+ return(0);
+ else if (kern_result == KERN_SUCCESS && truncated_timeout)
+ return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
+ else if (kern_result == KERN_ABORTED)
+ return(EINTR);
+ else if (kern_result == KERN_OPERATION_TIMED_OUT)
+ return(ETIMEDOUT);
+ else
+ return(EINVAL);
+}
+#endif /* OLD_SEMWAIT_SIGNAL*/
+
+/*
+ * Returns: 0 Success
+ * EINTR
+ * ETIMEDOUT
+ * EINVAL
+ * EFAULT if timespec is NULL
+ */
+int
+__semwait_signal(proc_t p, struct __semwait_signal_args *uap,
+ int32_t *retval)
+{
+ __pthread_testcancel(0);
+ return(__semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval));
+}
+
+int
+__semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_args *uap,
+ __unused int32_t *retval)
+{
+
+ kern_return_t kern_result;
+ mach_timespec_t then;
+ struct timespec now;
+ struct user_timespec ts;
+ boolean_t truncated_timeout = FALSE;
+
+ if(uap->timeout) {
+
+ ts.tv_sec = uap->tv_sec;
+ ts.tv_nsec = uap->tv_nsec;
+
+ if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
+ ts.tv_sec = 0xFFFFFFFF;
+ ts.tv_nsec = 0;
+ truncated_timeout = TRUE;
+ }
+
+ if (uap->relative) {
+ then.tv_sec = ts.tv_sec;
+ then.tv_nsec = ts.tv_nsec;
+ } else {
+ nanotime(&now);
+
+ /* if time has elapsed, set time to null timepsec to bailout rightaway */
+ if (now.tv_sec == ts.tv_sec ?
+ now.tv_nsec > ts.tv_nsec :
+ now.tv_sec > ts.tv_sec) {
+ then.tv_sec = 0;
+ then.tv_nsec = 0;
+ } else {
+ then.tv_sec = ts.tv_sec - now.tv_sec;
+ then.tv_nsec = ts.tv_nsec - now.tv_nsec;
+ if (then.tv_nsec < 0) {
+ then.tv_nsec += NSEC_PER_SEC;
+ then.tv_sec--;
+ }
+ }
+ }
+
+ if (uap->mutex_sem == 0)
+ kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
+ else
+ kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
+
+ } else {
+
+ if (uap->mutex_sem == 0)
+ kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
+ else
+
+ kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
+ }
+
+ if (kern_result == KERN_SUCCESS && !truncated_timeout)
+ return(0);
+ else if (kern_result == KERN_SUCCESS && truncated_timeout)
+ return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */
+ else if (kern_result == KERN_ABORTED)
+ return(EINTR);
+ else if (kern_result == KERN_OPERATION_TIMED_OUT)
+ return(ETIMEDOUT);
+ else
+ return(EINVAL);
+}
+
+
+int
+__pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap,
+ __unused int32_t *retval)
+{
+ thread_t target_act;
+ int error = 0;
+ int signum = uap->sig;
+ struct uthread *uth;
+
+ target_act = (thread_t)port_name_to_thread(uap->thread_port);
+
+ if (target_act == THREAD_NULL)
+ return (ESRCH);
+ if ((u_int)signum >= NSIG) {
+ error = EINVAL;
+ goto out;
+ }
+
+ uth = (struct uthread *)get_bsdthread_info(target_act);
+
+ if (uth->uu_flag & UT_NO_SIGMASK) {
+ error = ESRCH;
+ goto out;
+ }
+
+ if (signum)
+ psignal_uthread(target_act, signum);
+out:
+ thread_deallocate(target_act);
+ return (error);
+}
+
+
+int
+__pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap,
+ __unused int32_t *retval)
+{
+ user_addr_t set = uap->set;
+ user_addr_t oset = uap->oset;
+ sigset_t nset;
+ int error = 0;
+ struct uthread *ut;
+ sigset_t oldset;
+
+ ut = (struct uthread *)get_bsdthread_info(current_thread());
+ oldset = ut->uu_sigmask;
+
+ if (set == USER_ADDR_NULL) {
+ /* need only old mask */