X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/8f6c56a50524aa785f7e596d52dddfb331e18961..36401178fd6817c043cc00b0c00c7f723e58efae:/bsd/kern/kern_resource.c diff --git a/bsd/kern/kern_resource.c b/bsd/kern/kern_resource.c index 21da076ba..5e2f8c171 100644 --- a/bsd/kern/kern_resource.c +++ b/bsd/kern/kern_resource.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -65,6 +65,12 @@ * * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94 */ +/* + * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce + * support for mandatory and extensible security protections. This notice + * is included in support of clause 2.2 (b) of the Apple Public License, + * Version 2.0. + */ #include #include @@ -86,16 +92,27 @@ #include #include +#include #include #include +#include +#include /* for thread_policy_set( ) */ +#include +#include + +#include +#include /* for absolutetime_to_microtime() */ +#include /* for TRAFFIC_MGT_SO_BACKGROUND */ +#include /* for struct socket */ #include int donice(struct proc *curp, struct proc *chgp, int n); int dosetrlimit(struct proc *p, u_int which, struct rlimit *limp); +static int do_background_thread(struct proc *curp, int priority); rlim_t maxdmap = MAXDSIZ; /* XXX */ -rlim_t maxsmap = MAXSSIZ; /* XXX */ +rlim_t maxsmap = MAXSSIZ - PAGE_SIZE; /* XXX */ /* * Limits on the number of open files per process, and the number @@ -103,7 +120,6 @@ rlim_t maxsmap = MAXSSIZ; /* XXX */ * * Note: would be in kern/subr_param.c in FreeBSD. */ -int maxprocperuid = CHILD_MAX; /* max # of procs per user */ int maxfilesperproc = OPEN_MAX; /* per-proc open files limit */ SYSCTL_INT( _kern, KERN_MAXPROCPERUID, maxprocperuid, CTLFLAG_RW, @@ -112,6 +128,25 @@ SYSCTL_INT( _kern, KERN_MAXPROCPERUID, maxprocperuid, CTLFLAG_RW, SYSCTL_INT( _kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, &maxfilesperproc, 0, "Maximum files allowed open per process" ); +/* Args and fn for proc_iteration callback used in setpriority */ +struct puser_nice_args { + proc_t curp; + int prio; + id_t who; + int * foundp; + int * errorp; +}; +static int puser_donice_callback(proc_t p, void * arg); + + +/* Args and fn for proc_iteration callback used in setpriority */ +struct ppgrp_nice_args { + proc_t curp; + int prio; + int * foundp; + int * errorp; +}; +static int ppgrp_donice_callback(proc_t p, void * arg); /* * Resource controls and accounting. @@ -119,46 +154,86 @@ SYSCTL_INT( _kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, int getpriority(struct proc *curp, struct getpriority_args *uap, register_t *retval) { - register struct proc *p; - register int low = PRIO_MAX + 1; + struct proc *p; + int low = PRIO_MAX + 1; + kauth_cred_t my_cred; - if (uap->who < 0) + /* would also test (uap->who < 0), but id_t is unsigned */ + if (uap->who > 0x7fffffff) return (EINVAL); switch (uap->which) { case PRIO_PROCESS: - if (uap->who == 0) + if (uap->who == 0) { p = curp; - else - p = pfind(uap->who); - if (p == 0) - break; - low = p->p_nice; + low = p->p_nice; + } else { + p = proc_find(uap->who); + if (p == 0) + break; + low = p->p_nice; + proc_rele(p); + + } break; case PRIO_PGRP: { - register struct pgrp *pg; + struct pgrp *pg = PGRP_NULL; - if (uap->who == 0) - pg = curp->p_pgrp; - else if ((pg = pgfind(uap->who)) == NULL) + if (uap->who == 0) { + /* returns the pgrp to ref */ + pg = proc_pgrp(curp); + } else if ((pg = pgfind(uap->who)) == PGRP_NULL) { break; + } + /* No need for iteration as it is a simple scan */ + pgrp_lock(pg); for (p = pg->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) { if (p->p_nice < low) low = p->p_nice; } + pgrp_unlock(pg); + pg_rele(pg); break; } case PRIO_USER: if (uap->who == 0) uap->who = kauth_cred_getuid(kauth_cred_get()); - for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) - if (kauth_cred_getuid(p->p_ucred) == uap->who && + + proc_list_lock(); + + for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { + my_cred = kauth_cred_proc_ref(p); + if (kauth_cred_getuid(my_cred) == uap->who && p->p_nice < low) low = p->p_nice; + kauth_cred_unref(&my_cred); + } + + proc_list_unlock(); + + break; + + case PRIO_DARWIN_THREAD: { + thread_t thread; + struct uthread *ut; + + /* we currently only support the current thread */ + if (uap->who != 0) { + return (EINVAL); + } + + thread = current_thread(); + ut = get_bsdthread_info(thread); + + low = 0; + if ( (ut->uu_flag & UT_BACKGROUND) != 0 ) { + low = 1; + } break; + } default: return (EINVAL); @@ -169,18 +244,69 @@ getpriority(struct proc *curp, struct getpriority_args *uap, register_t *retval) return (0); } +/* call back function used for proc iteration in PRIO_USER */ +static int +puser_donice_callback(proc_t p, void * arg) +{ + int error, n; + struct puser_nice_args * pun = (struct puser_nice_args *)arg; + kauth_cred_t my_cred; + + my_cred = kauth_cred_proc_ref(p); + if (kauth_cred_getuid(my_cred) == pun->who) { + error = donice(pun->curp, p, pun->prio); + if (pun->errorp != NULL) + *pun->errorp = error; + if (pun->foundp != NULL) { + n = *pun->foundp; + *pun->foundp = n+1; + } + } + kauth_cred_unref(&my_cred); + + return(PROC_RETURNED); +} + +/* call back function used for proc iteration in PRIO_PGRP */ +static int +ppgrp_donice_callback(proc_t p, void * arg) +{ + int error; + struct ppgrp_nice_args * pun = (struct ppgrp_nice_args *)arg; + int n; + + error = donice(pun->curp, p, pun->prio); + if (pun->errorp != NULL) + *pun->errorp = error; + if (pun->foundp!= NULL) { + n = *pun->foundp; + *pun->foundp = n+1; + } + + return(PROC_RETURNED); +} + +/* + * Returns: 0 Success + * EINVAL + * ESRCH + * donice:EPERM + * donice:EACCES + */ /* ARGSUSED */ int setpriority(struct proc *curp, struct setpriority_args *uap, __unused register_t *retval) { - register struct proc *p; + struct proc *p; int found = 0, error = 0; + int refheld = 0; AUDIT_ARG(cmd, uap->which); AUDIT_ARG(owner, uap->who, 0); AUDIT_ARG(value, uap->prio); - if (uap->who < 0) + /* would also test (uap->who < 0), but id_t is unsigned */ + if (uap->who > 0x7fffffff) return (EINVAL); switch (uap->which) { @@ -188,38 +314,64 @@ setpriority(struct proc *curp, struct setpriority_args *uap, __unused register_t case PRIO_PROCESS: if (uap->who == 0) p = curp; - else - p = pfind(uap->who); - if (p == 0) - break; + else { + p = proc_find(uap->who); + if (p == 0) + break; + refheld = 1; + } error = donice(curp, p, uap->prio); found++; + if (refheld != 0) + proc_rele(p); break; case PRIO_PGRP: { - register struct pgrp *pg; + struct pgrp *pg = PGRP_NULL; + struct ppgrp_nice_args ppgrp; - if (uap->who == 0) - pg = curp->p_pgrp; - else if ((pg = pgfind(uap->who)) == NULL) + if (uap->who == 0) { + pg = proc_pgrp(curp); + } else if ((pg = pgfind(uap->who)) == PGRP_NULL) break; - for (p = pg->pg_members.lh_first; p != 0; - p = p->p_pglist.le_next) { - error = donice(curp, p, uap->prio); - found++; - } + + ppgrp.curp = curp; + ppgrp.prio = uap->prio; + ppgrp.foundp = &found; + ppgrp.errorp = &error; + + /* PGRP_DROPREF drops the reference on process group */ + pgrp_iterate(pg, PGRP_DROPREF, ppgrp_donice_callback, (void *)&ppgrp, NULL, NULL); + break; } - case PRIO_USER: + case PRIO_USER: { + struct puser_nice_args punice; + if (uap->who == 0) uap->who = kauth_cred_getuid(kauth_cred_get()); - for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) - if (kauth_cred_getuid(p->p_ucred) == uap->who) { - error = donice(curp, p, uap->prio); - found++; - } + + punice.curp = curp; + punice.prio = uap->prio; + punice.who = uap->who; + punice.foundp = &found; + error = 0; + punice.errorp = &error; + proc_iterate(PROC_ALLPROCLIST, puser_donice_callback, (void *)&punice, NULL, NULL); + break; + } + + case PRIO_DARWIN_THREAD: { + /* we currently only support the current thread */ + if (uap->who != 0) { + return (EINVAL); + } + error = do_background_thread(curp, uap->prio); + found++; + break; + } default: return (EINVAL); @@ -229,32 +381,145 @@ setpriority(struct proc *curp, struct setpriority_args *uap, __unused register_t return (error); } + +/* + * Returns: 0 Success + * EPERM + * EACCES + * mac_check_proc_sched:??? + */ int -donice(curp, chgp, n) - register struct proc *curp, *chgp; - register int n; +donice(struct proc *curp, struct proc *chgp, int n) { - kauth_cred_t ucred = curp->p_ucred; + int error = 0; + kauth_cred_t ucred; + kauth_cred_t my_cred; + + ucred = kauth_cred_proc_ref(curp); + my_cred = kauth_cred_proc_ref(chgp); if (suser(ucred, NULL) && ucred->cr_ruid && - kauth_cred_getuid(ucred) != kauth_cred_getuid(chgp->p_ucred) && - ucred->cr_ruid != kauth_cred_getuid(chgp->p_ucred)) - return (EPERM); + kauth_cred_getuid(ucred) != kauth_cred_getuid(my_cred) && + ucred->cr_ruid != kauth_cred_getuid(my_cred)) { + error = EPERM; + goto out; + } if (n > PRIO_MAX) n = PRIO_MAX; if (n < PRIO_MIN) n = PRIO_MIN; - if (n < chgp->p_nice && suser(ucred, &curp->p_acflag)) - return (EACCES); + if (n < chgp->p_nice && suser(ucred, &curp->p_acflag)) { + error = EACCES; + goto out; + } +#if CONFIG_MACF + error = mac_proc_check_sched(curp, chgp); + if (error) + goto out; +#endif + proc_lock(chgp); chgp->p_nice = n; + proc_unlock(chgp); (void)resetpriority(chgp); - return (0); +out: + kauth_cred_unref(&ucred); + kauth_cred_unref(&my_cred); + return (error); +} + +/* + * do_background_thread + * Returns: 0 Success + * XXX - todo - does this need a MACF hook? + */ +static int +do_background_thread(struct proc *curp, int priority) +{ + int i; + thread_t thread; + struct uthread *ut; + thread_precedence_policy_data_t policy; + struct filedesc *fdp; + struct fileproc *fp; + + thread = current_thread(); + ut = get_bsdthread_info(thread); + + if ( (priority & PRIO_DARWIN_BG) == 0 ) { + /* turn off backgrounding of thread */ + if ( (ut->uu_flag & UT_BACKGROUND) == 0 ) { + /* already off */ + return(0); + } + + /* clear background bit in thread and disable disk IO throttle */ + ut->uu_flag &= ~UT_BACKGROUND; + ut->uu_iopol_disk = IOPOL_NORMAL; + + /* reset thread priority (we did not save previous value) */ + policy.importance = 0; + thread_policy_set( thread, THREAD_PRECEDENCE_POLICY, + (thread_policy_t)&policy, + THREAD_PRECEDENCE_POLICY_COUNT ); + + /* disable networking IO throttle. + * NOTE - It is a known limitation of the current design that we + * could potentially clear TRAFFIC_MGT_SO_BACKGROUND bit for + * sockets created by other threads within this process. + */ + proc_fdlock(curp); + fdp = curp->p_fd; + for ( i = 0; i < fdp->fd_nfiles; i++ ) { + struct socket *sockp; + + fp = fdp->fd_ofiles[ i ]; + if ( fp == NULL || (fdp->fd_ofileflags[ i ] & UF_RESERVED) != 0 || + fp->f_fglob->fg_type != DTYPE_SOCKET ) { + continue; + } + sockp = (struct socket *)fp->f_fglob->fg_data; + if ( sockp->so_background_thread != thread ) { + continue; + } + sockp->so_traffic_mgt_flags &= ~TRAFFIC_MGT_SO_BACKGROUND; + sockp->so_background_thread = NULL; + } + proc_fdunlock(curp); + + return(0); + } + + /* background this thread */ + if ( (ut->uu_flag & UT_BACKGROUND) != 0 ) { + /* already backgrounded */ + return(0); + } + + /* tag thread as background and throttle disk IO */ + ut->uu_flag |= UT_BACKGROUND; + ut->uu_iopol_disk = IOPOL_THROTTLE; + + policy.importance = INT_MIN; + thread_policy_set( thread, THREAD_PRECEDENCE_POLICY, + (thread_policy_t)&policy, + THREAD_PRECEDENCE_POLICY_COUNT ); + + /* throttle networking IO happens in socket( ) syscall. + * If UT_BACKGROUND is set in the current thread then + * TRAFFIC_MGT_SO_BACKGROUND socket option is set. + */ + return(0); } +/* + * Returns: 0 Success + * copyin:EFAULT + * dosetrlimit: + */ /* ARGSUSED */ int -setrlimit(struct proc *p, register struct setrlimit_args *uap, __unused register_t *retval) +setrlimit(struct proc *p, struct setrlimit_args *uap, __unused register_t *retval) { struct rlimit alim; int error; @@ -262,36 +527,88 @@ setrlimit(struct proc *p, register struct setrlimit_args *uap, __unused register if ((error = copyin(uap->rlp, (caddr_t)&alim, sizeof (struct rlimit)))) return (error); + return (dosetrlimit(p, uap->which, &alim)); } +/* + * Returns: 0 Success + * EINVAL + * ENOMEM Cannot copy limit structure + * suser:EPERM + * + * Notes: EINVAL is returned both for invalid arguments, and in the + * case that the current usage (e.g. RLIMIT_STACK) is already + * in excess of the requested limit. + */ int -dosetrlimit(p, which, limp) - struct proc *p; - u_int which; - struct rlimit *limp; +dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) { - register struct rlimit *alimp; + struct rlimit *alimp; int error; + kern_return_t kr; + int posix = (which & _RLIMIT_POSIX_FLAG) ? 1 : 0; + + /* Mask out POSIX flag, saved above */ + which &= ~_RLIMIT_POSIX_FLAG; if (which >= RLIM_NLIMITS) return (EINVAL); + alimp = &p->p_rlimit[which]; + if (limp->rlim_cur > limp->rlim_max) + return EINVAL; + if (limp->rlim_cur > alimp->rlim_max || limp->rlim_max > alimp->rlim_max) - if ((error = suser(kauth_cred_get(), &p->p_acflag))) + if ((error = suser(kauth_cred_get(), &p->p_acflag))) { return (error); - if (limp->rlim_cur > limp->rlim_max) - limp->rlim_cur = limp->rlim_max; - if (p->p_limit->p_refcnt > 1 && - (p->p_limit->p_lflags & PL_SHAREMOD) == 0) { - p->p_limit->p_refcnt--; - p->p_limit = limcopy(p->p_limit); - alimp = &p->p_rlimit[which]; } + proc_limitblock(p); + + if ((error = proc_limitreplace(p)) != 0) { + proc_limitunblock(p); + return(error); + } + + alimp = &p->p_rlimit[which]; + switch (which) { + case RLIMIT_CPU: + if (limp->rlim_cur == RLIM_INFINITY) { + task_vtimer_clear(p->task, TASK_VTIMER_RLIM); + timerclear(&p->p_rlim_cpu); + } + else { + task_absolutetime_info_data_t tinfo; + mach_msg_type_number_t count; + struct timeval ttv, tv; + + count = TASK_ABSOLUTETIME_INFO_COUNT; + task_info(p->task, TASK_ABSOLUTETIME_INFO, + (task_info_t)&tinfo, &count); + absolutetime_to_microtime(tinfo.total_user + tinfo.total_system, + (uint32_t *) &ttv.tv_sec, (uint32_t *) &ttv.tv_usec); + + tv.tv_sec = (limp->rlim_cur > __INT_MAX__ ? __INT_MAX__ : limp->rlim_cur); + tv.tv_usec = 0; + timersub(&tv, &ttv, &p->p_rlim_cpu); + + timerclear(&tv); + if (timercmp(&p->p_rlim_cpu, &tv, >)) + task_vtimer_set(p->task, TASK_VTIMER_RLIM); + else { + task_vtimer_clear(p->task, TASK_VTIMER_RLIM); + + timerclear(&p->p_rlim_cpu); + + psignal(p, SIGXCPU); + } + } + break; + case RLIMIT_DATA: if (limp->rlim_cur > maxdmap) limp->rlim_cur = maxdmap; @@ -300,55 +617,162 @@ dosetrlimit(p, which, limp) break; case RLIMIT_STACK: - if (limp->rlim_cur > maxsmap) - limp->rlim_cur = maxsmap; - if (limp->rlim_max > maxsmap) - limp->rlim_max = maxsmap; + /* Disallow illegal stack size instead of clipping */ + if (limp->rlim_cur > maxsmap || + limp->rlim_max > maxsmap) { + if (posix) { + error = EINVAL; + goto out; + } + else { + /* + * 4797860 - workaround poorly written installers by + * doing previous implementation (< 10.5) when caller + * is non-POSIX conforming. + */ + if (limp->rlim_cur > maxsmap) + limp->rlim_cur = maxsmap; + if (limp->rlim_max > maxsmap) + limp->rlim_max = maxsmap; + } + } + /* * Stack is allocated to the max at exec time with only * "rlim_cur" bytes accessible. If stack limit is going * up make more accessible, if going down make inaccessible. */ - if (limp->rlim_cur != alimp->rlim_cur) { + if (limp->rlim_cur > alimp->rlim_cur) { user_addr_t addr; user_size_t size; - if (limp->rlim_cur > alimp->rlim_cur) { /* grow stack */ size = round_page_64(limp->rlim_cur); size -= round_page_64(alimp->rlim_cur); #if STACK_GROWTH_UP /* go to top of current stack */ - addr = p->user_stack + alimp->rlim_cur; -#else STACK_GROWTH_UP - addr = p->user_stack - alimp->rlim_cur; - addr -= size; + addr = p->user_stack + round_page_64(alimp->rlim_cur); +#else /* STACK_GROWTH_UP */ + addr = p->user_stack - round_page_64(limp->rlim_cur); #endif /* STACK_GROWTH_UP */ - if (mach_vm_allocate(current_map(), - &addr, size, - VM_FLAGS_FIXED) != KERN_SUCCESS) - return(EINVAL); - } else { + kr = mach_vm_protect(current_map(), + addr, size, + FALSE, VM_PROT_DEFAULT); + if (kr != KERN_SUCCESS) { + error = EINVAL; + goto out; + } + } else if (limp->rlim_cur < alimp->rlim_cur) { + user_addr_t addr; + user_size_t size; + user_addr_t cur_sp; + /* shrink stack */ + + /* + * First check if new stack limit would agree + * with current stack usage. + * Get the current thread's stack pointer... + */ + cur_sp = thread_adjuserstack(current_thread(), + 0); +#if STACK_GROWTH_UP + if (cur_sp >= p->user_stack && + cur_sp < (p->user_stack + + round_page_64(alimp->rlim_cur))) { + /* current stack pointer is in main stack */ + if (cur_sp >= (p->user_stack + + round_page_64(limp->rlim_cur))) { + /* + * New limit would cause + * current usage to be invalid: + * reject new limit. + */ + error = EINVAL; + goto out; + } + } else { + /* not on the main stack: reject */ + error = EINVAL; + goto out; + } + +#else /* STACK_GROWTH_UP */ + if (cur_sp <= p->user_stack && + cur_sp > (p->user_stack - + round_page_64(alimp->rlim_cur))) { + /* stack pointer is in main stack */ + if (cur_sp <= (p->user_stack - + round_page_64(limp->rlim_cur))) { + /* + * New limit would cause + * current usage to be invalid: + * reject new limit. + */ + error = EINVAL; + goto out; + } + } else { + /* not on the main stack: reject */ + error = EINVAL; + goto out; + } +#endif /* STACK_GROWTH_UP */ + + size = round_page_64(alimp->rlim_cur); + size -= round_page_64(limp->rlim_cur); + +#if STACK_GROWTH_UP + addr = p->user_stack + round_page_64(limp->rlim_cur); +#else /* STACK_GROWTH_UP */ + addr = p->user_stack - round_page_64(alimp->rlim_cur); +#endif /* STACK_GROWTH_UP */ + + kr = mach_vm_protect(current_map(), + addr, size, + FALSE, VM_PROT_NONE); + if (kr != KERN_SUCCESS) { + error = EINVAL; + goto out; } + } else { + /* no change ... */ } break; case RLIMIT_NOFILE: /* - * Only root can set the maxfiles limits, as it is systemwide resource + * Only root can set the maxfiles limits, as it is + * systemwide resource. If we are expecting POSIX behavior, + * instead of clamping the value, return EINVAL. We do this + * because historically, people have been able to attempt to + * set RLIM_INFINITY to get "whatever the maximum is". */ if ( is_suser() ) { - if (limp->rlim_cur > maxfiles) + if (limp->rlim_cur != alimp->rlim_cur && + limp->rlim_cur > (rlim_t)maxfiles) { + if (posix) { + error = EINVAL; + goto out; + } limp->rlim_cur = maxfiles; - if (limp->rlim_max > maxfiles) + } + if (limp->rlim_max != alimp->rlim_max && + limp->rlim_max > (rlim_t)maxfiles) limp->rlim_max = maxfiles; } else { - if (limp->rlim_cur > maxfilesperproc) + if (limp->rlim_cur != alimp->rlim_cur && + limp->rlim_cur > (rlim_t)maxfilesperproc) { + if (posix) { + error = EINVAL; + goto out; + } limp->rlim_cur = maxfilesperproc; - if (limp->rlim_max > maxfilesperproc) + } + if (limp->rlim_max != alimp->rlim_max && + limp->rlim_max > (rlim_t)maxfilesperproc) limp->rlim_max = maxfilesperproc; } break; @@ -360,31 +784,53 @@ dosetrlimit(p, which, limp) * maxprocperuid (presumably less than maxproc). */ if ( is_suser() ) { - if (limp->rlim_cur > maxproc) + if (limp->rlim_cur > (rlim_t)maxproc) limp->rlim_cur = maxproc; - if (limp->rlim_max > maxproc) + if (limp->rlim_max > (rlim_t)maxproc) limp->rlim_max = maxproc; } else { - if (limp->rlim_cur > maxprocperuid) + if (limp->rlim_cur > (rlim_t)maxprocperuid) limp->rlim_cur = maxprocperuid; - if (limp->rlim_max > maxprocperuid) + if (limp->rlim_max > (rlim_t)maxprocperuid) limp->rlim_max = maxprocperuid; } break; + + case RLIMIT_MEMLOCK: + /* + * Tell the Mach VM layer about the new limit value. + */ + + vm_map_set_user_wire_limit(current_map(), limp->rlim_cur); + break; } /* switch... */ + proc_lock(p); *alimp = *limp; - return (0); + proc_unlock(p); + error = 0; +out: + proc_limitunblock(p); + return (error); } /* ARGSUSED */ int -getrlimit(struct proc *p, register struct getrlimit_args *uap, __unused register_t *retval) +getrlimit(struct proc *p, struct getrlimit_args *uap, __unused register_t *retval) { + struct rlimit lim; + + /* + * Take out flag now in case we need to use it to trigger variant + * behaviour later. + */ + uap->which &= ~_RLIMIT_POSIX_FLAG; + if (uap->which >= RLIM_NLIMITS) return (EINVAL); - return (copyout((caddr_t)&p->p_rlimit[uap->which], + proc_limitget(p, uap->which, &lim); + return (copyout((caddr_t)&lim, uap->rlp, sizeof (struct rlimit))); } @@ -392,12 +838,9 @@ getrlimit(struct proc *p, register struct getrlimit_args *uap, __unused register * Transform the running time and tick information in proc p into user, * system, and interrupt time usage. */ +/* No lock on proc is held for this.. */ void -calcru(p, up, sp, ip) - register struct proc *p; - register struct timeval *up; - register struct timeval *sp; - register struct timeval *ip; +calcru(struct proc *p, struct timeval *up, struct timeval *sp, struct timeval *ip) { task_t task; @@ -410,12 +853,12 @@ calcru(p, up, sp, ip) if (task) { task_basic_info_data_t tinfo; task_thread_times_info_data_t ttimesinfo; - int task_info_stuff, task_ttimes_stuff; + mach_msg_type_number_t task_info_stuff, task_ttimes_stuff; struct timeval ut,st; task_info_stuff = TASK_BASIC_INFO_COUNT; task_info(task, TASK_BASIC_INFO, - &tinfo, &task_info_stuff); + (task_info_t)&tinfo, &task_info_stuff); ut.tv_sec = tinfo.user_time.seconds; ut.tv_usec = tinfo.user_time.microseconds; st.tv_sec = tinfo.system_time.seconds; @@ -425,7 +868,7 @@ calcru(p, up, sp, ip) task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT; task_info(task, TASK_THREAD_TIMES_INFO, - &ttimesinfo, &task_ttimes_stuff); + (task_info_t)&ttimesinfo, &task_ttimes_stuff); ut.tv_sec = ttimesinfo.user_time.seconds; ut.tv_usec = ttimesinfo.user_time.microseconds; @@ -440,24 +883,35 @@ __private_extern__ void munge_rusage(struct rusage *a_rusage_p, struct user_rusa /* ARGSUSED */ int -getrusage(register struct proc *p, register struct getrusage_args *uap, __unused register_t *retval) +getrusage(struct proc *p, struct getrusage_args *uap, __unused register_t *retval) { struct rusage *rup, rubuf; struct user_rusage rubuf64; size_t retsize = sizeof(rubuf); /* default: 32 bits */ caddr_t retbuf = (caddr_t)&rubuf; /* default: 32 bits */ + struct timeval utime; + struct timeval stime; + switch (uap->who) { case RUSAGE_SELF: - rup = &p->p_stats->p_ru; - calcru(p, &rup->ru_utime, &rup->ru_stime, NULL); + calcru(p, &utime, &stime, NULL); // LP64todo: proc struct should have 64 bit version of struct + proc_lock(p); + rup = &p->p_stats->p_ru; + rup->ru_utime = utime; + rup->ru_stime = stime; + rubuf = *rup; + proc_unlock(p); + break; case RUSAGE_CHILDREN: + proc_lock(p); rup = &p->p_stats->p_cru; rubuf = *rup; + proc_unlock(p); break; default: @@ -472,11 +926,10 @@ getrusage(register struct proc *p, register struct getrusage_args *uap, __unused } void -ruadd(ru, ru2) - register struct rusage *ru, *ru2; +ruadd(struct rusage *ru, struct rusage *ru2) { - register long *ip, *ip2; - register int i; + long *ip, *ip2; + int i; timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime); timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime); @@ -487,24 +940,195 @@ ruadd(ru, ru2) *ip++ += *ip2++; } -/* - * Make a copy of the plimit structure. - * We share these structures copy-on-write after fork, - * and copy when a limit is changed. - */ -struct plimit * -limcopy(lim) - struct plimit *lim; +void +proc_limitget(proc_t p, int which, struct rlimit * limp) +{ + proc_list_lock(); + limp->rlim_cur = p->p_rlimit[which].rlim_cur; + limp->rlim_max = p->p_rlimit[which].rlim_max; + proc_list_unlock(); +} + + +void +proc_limitdrop(proc_t p, int exiting) +{ + struct plimit * freelim = NULL; + struct plimit * freeoldlim = NULL; + + proc_list_lock(); + + if (--p->p_limit->pl_refcnt == 0) { + freelim = p->p_limit; + p->p_limit = NULL; + } + if ((exiting != 0) && (p->p_olimit != NULL) && (--p->p_olimit->pl_refcnt == 0)) { + freeoldlim = p->p_olimit; + p->p_olimit = NULL; + } + + proc_list_unlock(); + if (freelim != NULL) + FREE_ZONE(freelim, sizeof *p->p_limit, M_PLIMIT); + if (freeoldlim != NULL) + FREE_ZONE(freeoldlim, sizeof *p->p_olimit, M_PLIMIT); +} + + +void +proc_limitfork(proc_t parent, proc_t child) { - register struct plimit *copy; + proc_list_lock(); + child->p_limit = parent->p_limit; + child->p_limit->pl_refcnt++; + child->p_olimit = NULL; + proc_list_unlock(); +} + +void +proc_limitblock(proc_t p) +{ + proc_lock(p); + while (p->p_lflag & P_LLIMCHANGE) { + p->p_lflag |= P_LLIMWAIT; + msleep(&p->p_olimit, &p->p_mlock, 0, "proc_limitblock", NULL); + } + p->p_lflag |= P_LLIMCHANGE; + proc_unlock(p); + +} + + +void +proc_limitunblock(proc_t p) +{ + proc_lock(p); + p->p_lflag &= ~P_LLIMCHANGE; + if (p->p_lflag & P_LLIMWAIT) { + p->p_lflag &= ~P_LLIMWAIT; + wakeup(&p->p_olimit); + } + proc_unlock(p); +} + +/* This is called behind serialization provided by proc_limitblock/unlbock */ +int +proc_limitreplace(proc_t p) +{ + struct plimit *copy; + + + proc_list_lock(); + + if (p->p_limit->pl_refcnt == 1) { + proc_list_unlock(); + return(0); + } + + proc_list_unlock(); MALLOC_ZONE(copy, struct plimit *, - sizeof(struct plimit), M_SUBPROC, M_WAITOK); - if (copy == NULL) - panic("limcopy"); - bcopy(lim->pl_rlimit, copy->pl_rlimit, + sizeof(struct plimit), M_PLIMIT, M_WAITOK); + if (copy == NULL) { + return(ENOMEM); + } + + proc_list_lock(); + bcopy(p->p_limit->pl_rlimit, copy->pl_rlimit, sizeof(struct rlimit) * RLIM_NLIMITS); - copy->p_lflags = 0; - copy->p_refcnt = 1; - return (copy); + copy->pl_refcnt = 1; + /* hang on to reference to old till process exits */ + p->p_olimit = p->p_limit; + p->p_limit = copy; + proc_list_unlock(); + + return(0); +} + + +/* + * iopolicysys + * + * Description: System call MUX for use in manipulating I/O policy attributes of the current process or thread + * + * Parameters: cmd Policy command + * arg Pointer to policy arguments + * + * Returns: 0 Success + * EINVAL Invalid command or invalid policy arguments + * + */ +int +iopolicysys(__unused struct proc *p, __unused struct iopolicysys_args *uap, __unused register_t *retval) +{ + int error = 0; + thread_t thread = THREAD_NULL; + int *policy; + struct uthread *ut = NULL; + struct _iopol_param_t iop_param; + + if ((error = copyin(uap->arg, &iop_param, sizeof(iop_param))) != 0) + goto exit; + + if (iop_param.iop_iotype != IOPOL_TYPE_DISK) { + error = EINVAL; + goto exit; + } + + switch (iop_param.iop_scope) { + case IOPOL_SCOPE_PROCESS: + policy = &p->p_iopol_disk; + break; + case IOPOL_SCOPE_THREAD: + thread = current_thread(); + ut = get_bsdthread_info(thread); + policy = &ut->uu_iopol_disk; + break; + default: + error = EINVAL; + goto exit; + } + + switch(uap->cmd) { + case IOPOL_CMD_SET: + switch (iop_param.iop_policy) { + case IOPOL_DEFAULT: + case IOPOL_NORMAL: + case IOPOL_THROTTLE: + case IOPOL_PASSIVE: + proc_lock(p); + *policy = iop_param.iop_policy; + proc_unlock(p); + break; + default: + error = EINVAL; + goto exit; + } + break; + case IOPOL_CMD_GET: + switch (*policy) { + case IOPOL_DEFAULT: + case IOPOL_NORMAL: + case IOPOL_THROTTLE: + case IOPOL_PASSIVE: + iop_param.iop_policy = *policy; + break; + default: // in-kernel + // this should never happen + printf("%s: unknown I/O policy %d\n", __func__, *policy); + // restore to default value + *policy = IOPOL_DEFAULT; + iop_param.iop_policy = *policy; + } + + error = copyout((caddr_t)&iop_param, uap->arg, sizeof(iop_param)); + break; + default: + error = EINVAL; // unknown command + break; + } + + exit: + *retval = error; + return (error); }