+ if (curp != targetp) {
+#if CONFIG_MACF
+ if ((error = mac_proc_check_sched(curp, targetp)))
+ goto out;
+#endif
+ }
+
+ role = proc_get_task_policy(proc_task(targetp), THREAD_NULL,
+ TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
+
+ switch (role) {
+ case TASK_FOREGROUND_APPLICATION:
+ *priority = PRIO_DARWIN_ROLE_UI_FOCAL;
+ break;
+ case TASK_BACKGROUND_APPLICATION:
+ *priority = PRIO_DARWIN_ROLE_UI;
+ break;
+ case TASK_NONUI_APPLICATION:
+ *priority = PRIO_DARWIN_ROLE_NON_UI;
+ break;
+ case TASK_UNSPECIFIED:
+ default:
+ *priority = PRIO_DARWIN_ROLE_DEFAULT;
+ break;
+ }
+
+out:
+ kauth_cred_unref(&target_cred);
+ return (error);
+}
+
+
+static int
+get_background_proc(struct proc *curp, struct proc *targetp, int *priority)
+{
+ int external = 0;
+ int error = 0;
+ kauth_cred_t ucred, target_cred;
+
+ ucred = kauth_cred_get();
+ target_cred = kauth_cred_proc_ref(targetp);
+
+ if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
+ kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
+ kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
+ error = EPERM;
+ goto out;
+ }
+
+ external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
+
+ *priority = proc_get_task_policy(current_task(), THREAD_NULL, external, TASK_POLICY_DARWIN_BG);
+
+out:
+ kauth_cred_unref(&target_cred);
+ return (error);
+}
+
+static int
+do_background_proc(struct proc *curp, struct proc *targetp, int priority)
+{
+#if !CONFIG_MACF
+#pragma unused(curp)
+#endif
+ int error = 0;
+ kauth_cred_t ucred;
+ kauth_cred_t target_cred;
+ int external;
+ int enable;
+
+ ucred = kauth_cred_get();
+ target_cred = kauth_cred_proc_ref(targetp);
+
+ if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
+ kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
+ kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred))
+ {
+ error = EPERM;
+ goto out;
+ }
+
+#if CONFIG_MACF
+ error = mac_proc_check_sched(curp, targetp);
+ if (error)
+ goto out;
+#endif
+
+ external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
+
+ switch (priority) {
+ case PRIO_DARWIN_BG:
+ enable = TASK_POLICY_ENABLE;
+ break;
+ case PRIO_DARWIN_NONUI:
+ /* ignored for compatibility */
+ goto out;
+ default:
+ /* TODO: EINVAL if priority != 0 */
+ enable = TASK_POLICY_DISABLE;
+ break;
+ }
+
+ proc_set_task_policy(proc_task(targetp), THREAD_NULL, external, TASK_POLICY_DARWIN_BG, enable);
+
+out:
+ kauth_cred_unref(&target_cred);
+ return (error);
+}
+
+static void
+do_background_socket(struct proc *p, thread_t thread)
+{
+#if SOCKETS
+ struct filedesc *fdp;
+ struct fileproc *fp;
+ int i, background;
+
+ proc_fdlock(p);
+
+ if (thread != THREAD_NULL)
+ background = proc_get_effective_thread_policy(thread, TASK_POLICY_ALL_SOCKETS_BG);
+ else
+ background = proc_get_effective_task_policy(proc_task(p), TASK_POLICY_ALL_SOCKETS_BG);
+
+ if (background) {
+ /*
+ * For PRIO_DARWIN_PROCESS (thread is NULL), simply mark
+ * the sockets with the background flag. There's nothing
+ * to do here for the PRIO_DARWIN_THREAD case.
+ */
+ if (thread == THREAD_NULL) {
+ fdp = p->p_fd;
+
+ for (i = 0; i < fdp->fd_nfiles; i++) {
+ struct socket *sockp;
+
+ fp = fdp->fd_ofiles[i];
+ if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0 ||
+ FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) {
+ continue;
+ }
+ sockp = (struct socket *)fp->f_fglob->fg_data;
+ socket_set_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
+ sockp->so_background_thread = NULL;
+ }
+ }
+ } else {
+ /* disable networking IO throttle.
+ * NOTE - It is a known limitation of the current design that we
+ * could potentially clear TRAFFIC_MGT_SO_BACKGROUND bit for
+ * sockets created by other threads within this process.
+ */
+ fdp = p->p_fd;
+ for ( i = 0; i < fdp->fd_nfiles; i++ ) {
+ struct socket *sockp;
+
+ fp = fdp->fd_ofiles[ i ];
+ if ( fp == NULL || (fdp->fd_ofileflags[ i ] & UF_RESERVED) != 0 ||
+ FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET ) {
+ continue;
+ }
+ sockp = (struct socket *)fp->f_fglob->fg_data;
+ /* skip if only clearing this thread's sockets */
+ if ((thread) && (sockp->so_background_thread != thread)) {
+ continue;
+ }
+ socket_clear_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
+ sockp->so_background_thread = NULL;
+ }
+ }
+
+ proc_fdunlock(p);
+#else
+#pragma unused(p, thread)
+#endif
+}
+
+
+/*
+ * do_background_thread
+ * Returns: 0 Success
+ * EPERM Tried to background while in vfork
+ * XXX - todo - does this need a MACF hook?
+ */
+static int
+do_background_thread(struct proc *curp, thread_t thread, int priority)
+{
+ struct uthread *ut;
+ int enable, external;
+ int rv = 0;
+
+ ut = get_bsdthread_info(thread);
+
+ /* Backgrounding is unsupported for threads in vfork */
+ if ((ut->uu_flag & UT_VFORK) != 0)
+ return(EPERM);
+
+ if (thread_is_static_param(thread)) {
+ return(EPERM);
+ }
+
+ /* Not allowed to combine QoS and DARWIN_BG, doing so strips the QoS */
+ if (thread_has_qos_policy(thread)) {
+ thread_remove_qos_policy(thread);
+ rv = EIDRM;
+ }
+
+ /* TODO: Fail if someone passes something besides 0 or PRIO_DARWIN_BG */
+ enable = (priority == PRIO_DARWIN_BG) ? TASK_POLICY_ENABLE : TASK_POLICY_DISABLE;
+ external = (current_thread() == thread) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
+
+ proc_set_task_policy_thread(curp->task, thread_tid(thread), external,
+ TASK_POLICY_DARWIN_BG, enable);
+
+ return rv;
+}
+
+
+/*
+ * Returns: 0 Success
+ * copyin:EFAULT
+ * dosetrlimit:
+ */
+/* ARGSUSED */
+int
+setrlimit(struct proc *p, struct setrlimit_args *uap, __unused int32_t *retval)
+{
+ struct rlimit alim;
+ int error;
+
+ if ((error = copyin(uap->rlp, (caddr_t)&alim,
+ sizeof (struct rlimit))))
+ return (error);
+
+ return (dosetrlimit(p, uap->which, &alim));
+}
+
+/*
+ * Returns: 0 Success
+ * EINVAL
+ * ENOMEM Cannot copy limit structure
+ * suser:EPERM
+ *
+ * Notes: EINVAL is returned both for invalid arguments, and in the
+ * case that the current usage (e.g. RLIMIT_STACK) is already
+ * in excess of the requested limit.
+ */
+int
+dosetrlimit(struct proc *p, u_int which, struct rlimit *limp)
+{
+ struct rlimit *alimp;
+ int error;
+ kern_return_t kr;
+ int posix = (which & _RLIMIT_POSIX_FLAG) ? 1 : 0;
+
+ /* Mask out POSIX flag, saved above */
+ which &= ~_RLIMIT_POSIX_FLAG;
+
+ if (which >= RLIM_NLIMITS)
+ return (EINVAL);
+
+ alimp = &p->p_rlimit[which];
+ if (limp->rlim_cur > limp->rlim_max)
+ return EINVAL;
+
+ if (limp->rlim_cur > alimp->rlim_max ||
+ limp->rlim_max > alimp->rlim_max)
+ if ((error = suser(kauth_cred_get(), &p->p_acflag))) {
+ return (error);
+ }
+
+ proc_limitblock(p);
+
+ if ((error = proc_limitreplace(p)) != 0) {
+ proc_limitunblock(p);
+ return(error);
+ }
+
+ alimp = &p->p_rlimit[which];
+
+ switch (which) {
+
+ case RLIMIT_CPU:
+ if (limp->rlim_cur == RLIM_INFINITY) {
+ task_vtimer_clear(p->task, TASK_VTIMER_RLIM);
+ timerclear(&p->p_rlim_cpu);
+ }
+ else {
+ task_absolutetime_info_data_t tinfo;
+ mach_msg_type_number_t count;
+ struct timeval ttv, tv;
+ clock_sec_t tv_sec;
+ clock_usec_t tv_usec;
+
+ count = TASK_ABSOLUTETIME_INFO_COUNT;
+ task_info(p->task, TASK_ABSOLUTETIME_INFO,
+ (task_info_t)&tinfo, &count);
+ absolutetime_to_microtime(tinfo.total_user + tinfo.total_system,
+ &tv_sec, &tv_usec);
+ ttv.tv_sec = tv_sec;
+ ttv.tv_usec = tv_usec;
+
+ tv.tv_sec = (limp->rlim_cur > __INT_MAX__ ? __INT_MAX__ : limp->rlim_cur);
+ tv.tv_usec = 0;
+ timersub(&tv, &ttv, &p->p_rlim_cpu);
+
+ timerclear(&tv);
+ if (timercmp(&p->p_rlim_cpu, &tv, >))
+ task_vtimer_set(p->task, TASK_VTIMER_RLIM);
+ else {
+ task_vtimer_clear(p->task, TASK_VTIMER_RLIM);
+
+ timerclear(&p->p_rlim_cpu);
+
+ psignal(p, SIGXCPU);
+ }
+ }
+ break;
+
+ case RLIMIT_DATA:
+ if (limp->rlim_cur > maxdmap)
+ limp->rlim_cur = maxdmap;
+ if (limp->rlim_max > maxdmap)
+ limp->rlim_max = maxdmap;
+ break;
+
+ case RLIMIT_STACK:
+ /* Disallow illegal stack size instead of clipping */
+ if (limp->rlim_cur > maxsmap ||
+ limp->rlim_max > maxsmap) {
+ if (posix) {
+ error = EINVAL;
+ goto out;
+ }
+ else {
+ /*
+ * 4797860 - workaround poorly written installers by
+ * doing previous implementation (< 10.5) when caller
+ * is non-POSIX conforming.
+ */
+ if (limp->rlim_cur > maxsmap)
+ limp->rlim_cur = maxsmap;
+ if (limp->rlim_max > maxsmap)
+ limp->rlim_max = maxsmap;
+ }
+ }
+
+ /*
+ * Stack is allocated to the max at exec time with only
+ * "rlim_cur" bytes accessible. If stack limit is going
+ * up make more accessible, if going down make inaccessible.
+ */
+ if (limp->rlim_cur > alimp->rlim_cur) {
+ user_addr_t addr;
+ user_size_t size;
+
+ /* grow stack */
+ size = round_page_64(limp->rlim_cur);
+ size -= round_page_64(alimp->rlim_cur);
+
+ addr = p->user_stack - round_page_64(limp->rlim_cur);
+ kr = mach_vm_protect(current_map(),
+ addr, size,
+ FALSE, VM_PROT_DEFAULT);
+ if (kr != KERN_SUCCESS) {
+ error = EINVAL;
+ goto out;
+ }
+ } else if (limp->rlim_cur < alimp->rlim_cur) {
+ user_addr_t addr;
+ user_size_t size;
+ user_addr_t cur_sp;
+
+ /* shrink stack */
+
+ /*
+ * First check if new stack limit would agree
+ * with current stack usage.
+ * Get the current thread's stack pointer...
+ */
+ cur_sp = thread_adjuserstack(current_thread(),
+ 0);
+ if (cur_sp <= p->user_stack &&
+ cur_sp > (p->user_stack -
+ round_page_64(alimp->rlim_cur))) {
+ /* stack pointer is in main stack */
+ if (cur_sp <= (p->user_stack -
+ round_page_64(limp->rlim_cur))) {
+ /*
+ * New limit would cause
+ * current usage to be invalid:
+ * reject new limit.
+ */
+ error = EINVAL;
+ goto out;
+ }
+ } else {
+ /* not on the main stack: reject */
+ error = EINVAL;
+ goto out;
+ }
+
+ size = round_page_64(alimp->rlim_cur);
+ size -= round_page_64(limp->rlim_cur);
+
+ addr = p->user_stack - round_page_64(alimp->rlim_cur);
+
+ kr = mach_vm_protect(current_map(),
+ addr, size,
+ FALSE, VM_PROT_NONE);
+ if (kr != KERN_SUCCESS) {
+ error = EINVAL;
+ goto out;
+ }
+ } else {
+ /* no change ... */
+ }
+ break;
+
+ case RLIMIT_NOFILE:
+ /*
+ * Only root can set the maxfiles limits, as it is
+ * systemwide resource. If we are expecting POSIX behavior,
+ * instead of clamping the value, return EINVAL. We do this
+ * because historically, people have been able to attempt to
+ * set RLIM_INFINITY to get "whatever the maximum is".
+ */
+ if ( kauth_cred_issuser(kauth_cred_get()) ) {
+ if (limp->rlim_cur != alimp->rlim_cur &&
+ limp->rlim_cur > (rlim_t)maxfiles) {
+ if (posix) {
+ error = EINVAL;
+ goto out;
+ }
+ limp->rlim_cur = maxfiles;
+ }
+ if (limp->rlim_max != alimp->rlim_max &&
+ limp->rlim_max > (rlim_t)maxfiles)
+ limp->rlim_max = maxfiles;
+ }
+ else {
+ if (limp->rlim_cur != alimp->rlim_cur &&
+ limp->rlim_cur > (rlim_t)maxfilesperproc) {
+ if (posix) {
+ error = EINVAL;
+ goto out;
+ }
+ limp->rlim_cur = maxfilesperproc;
+ }
+ if (limp->rlim_max != alimp->rlim_max &&
+ limp->rlim_max > (rlim_t)maxfilesperproc)
+ limp->rlim_max = maxfilesperproc;
+ }
+ break;
+
+ case RLIMIT_NPROC:
+ /*
+ * Only root can set to the maxproc limits, as it is
+ * systemwide resource; all others are limited to
+ * maxprocperuid (presumably less than maxproc).
+ */
+ if ( kauth_cred_issuser(kauth_cred_get()) ) {
+ if (limp->rlim_cur > (rlim_t)maxproc)
+ limp->rlim_cur = maxproc;
+ if (limp->rlim_max > (rlim_t)maxproc)
+ limp->rlim_max = maxproc;
+ }
+ else {
+ if (limp->rlim_cur > (rlim_t)maxprocperuid)
+ limp->rlim_cur = maxprocperuid;
+ if (limp->rlim_max > (rlim_t)maxprocperuid)
+ limp->rlim_max = maxprocperuid;
+ }
+ break;
+
+ case RLIMIT_MEMLOCK:
+ /*
+ * Tell the Mach VM layer about the new limit value.
+ */
+
+ vm_map_set_user_wire_limit(current_map(), limp->rlim_cur);
+ break;
+
+ } /* switch... */
+ proc_lock(p);
+ *alimp = *limp;
+ proc_unlock(p);
+ error = 0;
+out:
+ proc_limitunblock(p);
+ return (error);
+}
+
+/* ARGSUSED */
+int
+getrlimit(struct proc *p, struct getrlimit_args *uap, __unused int32_t *retval)
+{
+ struct rlimit lim;
+
+ /*
+ * Take out flag now in case we need to use it to trigger variant
+ * behaviour later.
+ */
+ uap->which &= ~_RLIMIT_POSIX_FLAG;
+
+ if (uap->which >= RLIM_NLIMITS)
+ return (EINVAL);
+ proc_limitget(p, uap->which, &lim);
+ return (copyout((caddr_t)&lim,
+ uap->rlp, sizeof (struct rlimit)));
+}
+
+/*
+ * Transform the running time and tick information in proc p into user,
+ * system, and interrupt time usage.
+ */
+/* No lock on proc is held for this.. */
+void
+calcru(struct proc *p, struct timeval *up, struct timeval *sp, struct timeval *ip)
+{
+ task_t task;
+
+ timerclear(up);
+ timerclear(sp);
+ if (ip != NULL)
+ timerclear(ip);
+
+ task = p->task;
+ if (task) {
+ mach_task_basic_info_data_t tinfo;
+ task_thread_times_info_data_t ttimesinfo;
+ task_events_info_data_t teventsinfo;
+ mach_msg_type_number_t task_info_count, task_ttimes_count;
+ mach_msg_type_number_t task_events_count;
+ struct timeval ut,st;
+
+ task_info_count = MACH_TASK_BASIC_INFO_COUNT;
+ task_info(task, MACH_TASK_BASIC_INFO,
+ (task_info_t)&tinfo, &task_info_count);
+ ut.tv_sec = tinfo.user_time.seconds;
+ ut.tv_usec = tinfo.user_time.microseconds;
+ st.tv_sec = tinfo.system_time.seconds;
+ st.tv_usec = tinfo.system_time.microseconds;
+ timeradd(&ut, up, up);
+ timeradd(&st, sp, sp);
+
+ task_ttimes_count = TASK_THREAD_TIMES_INFO_COUNT;
+ task_info(task, TASK_THREAD_TIMES_INFO,
+ (task_info_t)&ttimesinfo, &task_ttimes_count);