int dosetrlimit(struct proc *p, u_int which, struct rlimit *limp);
int uthread_get_background_state(uthread_t);
static void do_background_socket(struct proc *p, thread_t thread, int priority);
-static int do_background_thread(struct proc *curp, int priority);
-static int do_background_task(struct proc *curp, int priority);
+static int do_background_thread(struct proc *curp, thread_t thread, int priority);
+static int do_background_proc(struct proc *curp, struct proc *targetp, int priority);
+void proc_apply_task_networkbg_internal(proc_t, thread_t);
+void proc_restore_task_networkbg_internal(proc_t, thread_t);
rlim_t maxdmap = MAXDSIZ; /* XXX */
rlim_t maxsmap = MAXSSIZ - PAGE_SIZE; /* XXX */
*/
__private_extern__ int maxfilesperproc = OPEN_MAX; /* per-proc open files limit */
-SYSCTL_INT( _kern, KERN_MAXPROCPERUID, maxprocperuid, CTLFLAG_RW,
+SYSCTL_INT(_kern, KERN_MAXPROCPERUID, maxprocperuid, CTLFLAG_RW | CTLFLAG_LOCKED,
&maxprocperuid, 0, "Maximum processes allowed per userid" );
-SYSCTL_INT( _kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
+SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW | CTLFLAG_LOCKED,
&maxfilesperproc, 0, "Maximum files allowed open per process" );
/* Args and fn for proc_iteration callback used in setpriority */
}
case PRIO_DARWIN_THREAD: {
+ /* process marked for termination no priority management */
+ if ((curp->p_lflag & P_LPTERMINATE) != 0)
+ return(EINVAL);
/* we currently only support the current thread */
if (uap->who != 0) {
return (EINVAL);
}
- error = do_background_thread(curp, uap->prio);
- (void) do_background_socket(curp, current_thread(), uap->prio);
+ error = do_background_thread(curp, current_thread(), uap->prio);
+ if (!error) {
+ (void) do_background_socket(curp, current_thread(), uap->prio);
+ }
found++;
break;
}
refheld = 1;
}
- error = do_background_task(p, uap->prio);
- (void) do_background_socket(p, NULL, uap->prio);
+ /* process marked for termination no priority management */
+ if ((p->p_lflag & P_LPTERMINATE) != 0) {
+ error = EINVAL;
+ } else {
+ error = do_background_proc(curp, p, uap->prio);
+ if (!error) {
+ (void) do_background_socket(p, NULL, uap->prio);
+ }
+ }
found++;
if (refheld != 0)
proc_rele(p);
ucred = kauth_cred_proc_ref(curp);
my_cred = kauth_cred_proc_ref(chgp);
- if (suser(ucred, NULL) && ucred->cr_ruid &&
+ if (suser(ucred, NULL) && kauth_cred_getruid(ucred) &&
kauth_cred_getuid(ucred) != kauth_cred_getuid(my_cred) &&
- ucred->cr_ruid != kauth_cred_getuid(my_cred)) {
+ kauth_cred_getruid(ucred) != kauth_cred_getuid(my_cred)) {
error = EPERM;
goto out;
}
}
static int
-do_background_task(struct proc *p, int priority)
+do_background_proc(struct proc *curp, struct proc *targetp, int priority)
{
int error = 0;
- task_category_policy_data_t info;
-
- /* set the max scheduling priority on the task */
- if (priority & PRIO_DARWIN_BG) {
- info.role = TASK_THROTTLE_APPLICATION;
- } else {
- info.role = TASK_DEFAULT_APPLICATION;
- }
+ kauth_cred_t ucred;
+ kauth_cred_t target_cred;
- error = task_policy_set(p->task,
- TASK_CATEGORY_POLICY,
- (task_policy_t) &info,
- TASK_CATEGORY_POLICY_COUNT);
+ ucred = kauth_cred_get();
+ target_cred = kauth_cred_proc_ref(targetp);
- if (error)
+ if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
+ kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
+ kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred))
+ {
+ error = EPERM;
goto out;
-
- proc_lock(p);
-
- /* mark proc structure as backgrounded */
- if (priority & PRIO_DARWIN_BG) {
- p->p_lflag |= P_LBACKGROUND;
- } else {
- p->p_lflag &= ~P_LBACKGROUND;
}
- /* set or reset the disk I/O priority */
- p->p_iopol_disk = (priority == PRIO_DARWIN_BG ?
- IOPOL_THROTTLE : IOPOL_DEFAULT);
+#if CONFIG_MACF
+ error = mac_proc_check_sched(curp, targetp);
+ if (error)
+ goto out;
+#endif
- proc_unlock(p);
+ if (priority == PRIO_DARWIN_NONUI)
+ error = proc_apply_task_gpuacc(targetp->task, TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS);
+ else
+ error = proc_set_and_apply_bgtaskpolicy(targetp->task, priority);
+ if (error)
+ goto out;
out:
+ kauth_cred_unref(&target_cred);
return (error);
}
struct fileproc *fp;
int i;
- if (priority & PRIO_DARWIN_BG) {
+ if (priority == PRIO_DARWIN_BG) {
/*
* For PRIO_DARWIN_PROCESS (thread is NULL), simply mark
* the sockets with the background flag. There's nothing
}
} else {
- u_int32_t traffic_mgt;
- /*
- * See comments on do_background_thread(). Deregulate network
- * traffics only for setpriority(PRIO_DARWIN_THREAD).
- */
- traffic_mgt = (thread == NULL) ? 0 : TRAFFIC_MGT_SO_BG_REGULATE;
/* disable networking IO throttle.
* NOTE - It is a known limitation of the current design that we
if ((thread) && (sockp->so_background_thread != thread)) {
continue;
}
- socket_clear_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND | traffic_mgt);
+ socket_clear_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
sockp->so_background_thread = NULL;
}
proc_fdunlock(p);
* and only TRAFFIC_MGT_SO_BACKGROUND is set via do_background_socket().
*/
static int
-do_background_thread(struct proc *curp __unused, int priority)
+do_background_thread(struct proc *curp __unused, thread_t thread, int priority)
{
- thread_t thread;
struct uthread *ut;
- thread_precedence_policy_data_t policy;
+ int error = 0;
- thread = current_thread();
ut = get_bsdthread_info(thread);
- if ( (priority & PRIO_DARWIN_BG) == 0 ) {
- /* turn off backgrounding of thread */
- if ( (ut->uu_flag & UT_BACKGROUND) == 0 ) {
- /* already off */
- return(0);
- }
-
- /*
- * Clear background bit in thread and disable disk IO
- * throttle as well as network traffic management.
- * The corresponding socket flags for sockets created by
- * this thread will be cleared in do_background_socket().
- */
- ut->uu_flag &= ~(UT_BACKGROUND | UT_BACKGROUND_TRAFFIC_MGT);
- ut->uu_iopol_disk = IOPOL_NORMAL;
-
- /* reset thread priority (we did not save previous value) */
- policy.importance = 0;
- thread_policy_set( thread, THREAD_PRECEDENCE_POLICY,
- (thread_policy_t)&policy,
- THREAD_PRECEDENCE_POLICY_COUNT );
- return(0);
- }
-
- /* background this thread */
- if ( (ut->uu_flag & UT_BACKGROUND) != 0 ) {
- /* already backgrounded */
- return(0);
+ /* Backgrounding is unsupported for threads in vfork */
+ if ( (ut->uu_flag & UT_VFORK) != 0) {
+ return(EPERM);
}
- /*
- * Tag thread as background and throttle disk IO, as well
- * as regulate network traffics. Future sockets created
- * by this thread will have their corresponding socket
- * flags set at socket create time.
- */
- ut->uu_flag |= (UT_BACKGROUND | UT_BACKGROUND_TRAFFIC_MGT);
- ut->uu_iopol_disk = IOPOL_THROTTLE;
-
- policy.importance = INT_MIN;
- thread_policy_set( thread, THREAD_PRECEDENCE_POLICY,
- (thread_policy_t)&policy,
- THREAD_PRECEDENCE_POLICY_COUNT );
-
- /* throttle networking IO happens in socket( ) syscall.
- * If UT_{BACKGROUND,BACKGROUND_TRAFFIC_MGT} is set in the current
- * thread then TRAFFIC_MGT_SO_{BACKGROUND,BG_REGULATE} is set.
- * Existing sockets are taken care of by do_background_socket().
- */
- return(0);
+ error = proc_set_and_apply_bgthreadpolicy(curp->task, thread_tid(thread), priority);
+ return(error);
+
}
-/*
- * If the thread or its proc has been put into the background
- * with setpriority(PRIO_DARWIN_{THREAD,PROCESS}, *, PRIO_DARWIN_BG),
- * report that status.
- *
- * Returns: PRIO_DARWIN_BG if background
- * 0 if foreground
- */
+#if CONFIG_EMBEDDED
+int mach_do_background_thread(thread_t thread, int prio);
+
int
-uthread_get_background_state(uthread_t uth)
+mach_do_background_thread(thread_t thread, int prio)
{
- proc_t p = uth->uu_proc;
- if (p && (p->p_lflag & P_LBACKGROUND))
- return PRIO_DARWIN_BG;
-
- if (uth->uu_flag & UT_BACKGROUND)
- return PRIO_DARWIN_BG;
+ int error = 0;
+ struct proc *curp = NULL;
+ struct proc *targetp = NULL;
+ kauth_cred_t ucred;
+
+ targetp = get_bsdtask_info(get_threadtask(thread));
+ if (!targetp) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ curp = proc_self();
+ if (curp == PROC_NULL) {
+ return KERN_FAILURE;
+ }
+
+ ucred = kauth_cred_proc_ref(curp);
+
+ if (suser(ucred, NULL) && curp != targetp) {
+ error = KERN_PROTECTION_FAILURE;
+ goto out;
+ }
+
+ error = do_background_thread(curp, thread, prio);
+ if (!error) {
+ (void) do_background_socket(curp, thread, prio);
+ } else {
+ if (error == EPERM) {
+ error = KERN_PROTECTION_FAILURE;
+ } else {
+ error = KERN_FAILURE;
+ }
+ }
- return 0;
+out:
+ proc_rele(curp);
+ kauth_cred_unref(&ucred);
+ return error;
}
+#endif /* CONFIG_EMBEDDED */
/*
* Returns: 0 Success
size = round_page_64(limp->rlim_cur);
size -= round_page_64(alimp->rlim_cur);
-#if STACK_GROWTH_UP
- /* go to top of current stack */
- addr = p->user_stack + round_page_64(alimp->rlim_cur);
-#else /* STACK_GROWTH_UP */
addr = p->user_stack - round_page_64(limp->rlim_cur);
-#endif /* STACK_GROWTH_UP */
kr = mach_vm_protect(current_map(),
addr, size,
FALSE, VM_PROT_DEFAULT);
*/
cur_sp = thread_adjuserstack(current_thread(),
0);
-#if STACK_GROWTH_UP
- if (cur_sp >= p->user_stack &&
- cur_sp < (p->user_stack +
- round_page_64(alimp->rlim_cur))) {
- /* current stack pointer is in main stack */
- if (cur_sp >= (p->user_stack +
- round_page_64(limp->rlim_cur))) {
- /*
- * New limit would cause
- * current usage to be invalid:
- * reject new limit.
- */
- error = EINVAL;
- goto out;
- }
- } else {
- /* not on the main stack: reject */
- error = EINVAL;
- goto out;
- }
-
-#else /* STACK_GROWTH_UP */
if (cur_sp <= p->user_stack &&
cur_sp > (p->user_stack -
round_page_64(alimp->rlim_cur))) {
error = EINVAL;
goto out;
}
-#endif /* STACK_GROWTH_UP */
size = round_page_64(alimp->rlim_cur);
size -= round_page_64(limp->rlim_cur);
-#if STACK_GROWTH_UP
- addr = p->user_stack + round_page_64(limp->rlim_cur);
-#else /* STACK_GROWTH_UP */
addr = p->user_stack - round_page_64(alimp->rlim_cur);
-#endif /* STACK_GROWTH_UP */
kr = mach_vm_protect(current_map(),
addr, size,
task = p->task;
if (task) {
- task_basic_info_32_data_t tinfo;
+ mach_task_basic_info_data_t tinfo;
task_thread_times_info_data_t ttimesinfo;
task_events_info_data_t teventsinfo;
mach_msg_type_number_t task_info_count, task_ttimes_count;
mach_msg_type_number_t task_events_count;
struct timeval ut,st;
- task_info_count = TASK_BASIC_INFO_32_COUNT;
- task_info(task, TASK_BASIC2_INFO_32,
+ task_info_count = MACH_TASK_BASIC_INFO_COUNT;
+ task_info(task, MACH_TASK_BASIC_INFO,
(task_info_t)&tinfo, &task_info_count);
ut.tv_sec = tinfo.user_time.seconds;
ut.tv_usec = tinfo.user_time.microseconds;
if (p->p_stats->p_ru.ru_nivcsw < 0)
p->p_stats->p_ru.ru_nivcsw = 0;
- p->p_stats->p_ru.ru_maxrss = tinfo.resident_size;
+ p->p_stats->p_ru.ru_maxrss = tinfo.resident_size_max;
}
}
iopolicysys(__unused struct proc *p, __unused struct iopolicysys_args *uap, __unused int32_t *retval)
{
int error = 0;
- thread_t thread = THREAD_NULL;
- int *policy;
- struct uthread *ut = NULL;
struct _iopol_param_t iop_param;
+ int processwide = 0;
if ((error = copyin(uap->arg, &iop_param, sizeof(iop_param))) != 0)
- goto exit;
+ goto out;
if (iop_param.iop_iotype != IOPOL_TYPE_DISK) {
error = EINVAL;
- goto exit;
+ goto out;
}
switch (iop_param.iop_scope) {
case IOPOL_SCOPE_PROCESS:
- policy = &p->p_iopol_disk;
+ processwide = 1;
break;
case IOPOL_SCOPE_THREAD:
- thread = current_thread();
- ut = get_bsdthread_info(thread);
- policy = &ut->uu_iopol_disk;
+ processwide = 0;
break;
default:
error = EINVAL;
- goto exit;
+ goto out;
}
switch(uap->cmd) {
case IOPOL_NORMAL:
case IOPOL_THROTTLE:
case IOPOL_PASSIVE:
- proc_lock(p);
- *policy = iop_param.iop_policy;
- proc_unlock(p);
+ case IOPOL_UTILITY:
+ if(processwide != 0)
+ proc_apply_task_diskacc(current_task(), iop_param.iop_policy);
+ else
+ proc_apply_thread_selfdiskacc(iop_param.iop_policy);
+
break;
default:
error = EINVAL;
- goto exit;
+ goto out;
}
break;
+
case IOPOL_CMD_GET:
- switch (*policy) {
- case IOPOL_DEFAULT:
- case IOPOL_NORMAL:
- case IOPOL_THROTTLE:
- case IOPOL_PASSIVE:
- iop_param.iop_policy = *policy;
- break;
- default: // in-kernel
- // this should never happen
- printf("%s: unknown I/O policy %d\n", __func__, *policy);
- // restore to default value
- *policy = IOPOL_DEFAULT;
- iop_param.iop_policy = *policy;
- }
-
+ if(processwide != 0)
+ iop_param.iop_policy = proc_get_task_disacc(current_task());
+ else
+ iop_param.iop_policy = proc_get_thread_selfdiskacc();
+
error = copyout((caddr_t)&iop_param, uap->arg, sizeof(iop_param));
+
break;
default:
error = EINVAL; // unknown command
break;
}
- exit:
+out:
*retval = error;
return (error);
}
boolean_t thread_is_io_throttled(void);
boolean_t
-thread_is_io_throttled(void) {
+thread_is_io_throttled(void)
+{
+ return(proc_get_task_selfdiskacc() == IOPOL_THROTTLE);
+}
- int policy;
- struct uthread *ut;
+void
+proc_apply_task_networkbg(void * bsd_info)
+{
+ proc_t p = PROC_NULL;
+ proc_t curp = (proc_t)bsd_info;
+ pid_t pid;
+
+ pid = curp->p_pid;
+ p = proc_find(pid);
+ if (p != PROC_NULL) {
+ do_background_socket(p, NULL, PRIO_DARWIN_BG);
+ proc_rele(p);
+ }
+}
- ut = get_bsdthread_info(current_thread());
+void
+proc_restore_task_networkbg(void * bsd_info)
+{
+ proc_t p = PROC_NULL;
+ proc_t curp = (proc_t)bsd_info;
+ pid_t pid;
+
+ pid = curp->p_pid;
+ p = proc_find(pid);
+ if (p != PROC_NULL) {
+ do_background_socket(p, NULL, 0);
+ proc_rele(p);
+ }
- if(ut){
- policy = current_proc()->p_iopol_disk;
+}
- if (ut->uu_iopol_disk != IOPOL_DEFAULT)
- policy = ut->uu_iopol_disk;
+void
+proc_set_task_networkbg(void * bsdinfo, int setbg)
+{
+ if (setbg != 0)
+ proc_apply_task_networkbg(bsdinfo);
+ else
+ proc_restore_task_networkbg(bsdinfo);
+}
- if (policy == IOPOL_THROTTLE)
- return TRUE;
+void
+proc_apply_task_networkbg_internal(proc_t p, thread_t thread)
+{
+ if (p != PROC_NULL) {
+ do_background_socket(p, thread, PRIO_DARWIN_BG);
+ }
+}
+void
+proc_restore_task_networkbg_internal(proc_t p, thread_t thread)
+{
+ if (p != PROC_NULL) {
+ do_background_socket(p, thread, PRIO_DARWIN_BG);
}
- return FALSE;
}
+