+ log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
+ (long)pid, name, (uint32_t)uid);
+ return (1);
+endofstring:
+ log(LOG_ERR, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n",
+ (long)pid, name, (uint32_t)uid);
+ return (1);
+}
+#endif /* CONFIG_COREDUMP */
+
+/* Code Signing related routines */
+
+int
+csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
+{
+ return(csops_internal(uap->pid, uap->ops, uap->useraddr,
+ uap->usersize, USER_ADDR_NULL));
+}
+
+int
+csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
+{
+ if (uap->uaudittoken == USER_ADDR_NULL)
+ return(EINVAL);
+ return(csops_internal(uap->pid, uap->ops, uap->useraddr,
+ uap->usersize, uap->uaudittoken));
+}
+
+static int
+csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
+{
+ char fakeheader[8] = { 0 };
+ int error;
+
+ if (usize < sizeof(fakeheader))
+ return ERANGE;
+
+ /* if no blob, fill in zero header */
+ if (NULL == start) {
+ start = fakeheader;
+ length = sizeof(fakeheader);
+ } else if (usize < length) {
+ /* ... if input too short, copy out length of entitlement */
+ uint32_t length32 = htonl((uint32_t)length);
+ memcpy(&fakeheader[4], &length32, sizeof(length32));
+
+ error = copyout(fakeheader, uaddr, sizeof(fakeheader));
+ if (error == 0)
+ return ERANGE; /* input buffer to short, ERANGE signals that */
+ return error;
+ }
+ return copyout(start, uaddr, length);
+}
+
+static int
+csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
+{
+ size_t usize = (size_t)CAST_DOWN(size_t, usersize);
+ proc_t pt;
+ int forself;
+ int error;
+ vnode_t tvp;
+ off_t toff;
+ unsigned char cdhash[SHA1_RESULTLEN];
+ audit_token_t token;
+ unsigned int upid=0, uidversion = 0;
+
+ forself = error = 0;
+
+ if (pid == 0)
+ pid = proc_selfpid();
+ if (pid == proc_selfpid())
+ forself = 1;
+
+
+ switch (ops) {
+ case CS_OPS_STATUS:
+ case CS_OPS_CDHASH:
+ case CS_OPS_PIDOFFSET:
+ case CS_OPS_ENTITLEMENTS_BLOB:
+ case CS_OPS_IDENTITY:
+ case CS_OPS_BLOB:
+ break; /* not restricted to root */
+ default:
+ if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
+ return(EPERM);
+ break;
+ }
+
+ pt = proc_find(pid);
+ if (pt == PROC_NULL)
+ return(ESRCH);
+
+ upid = pt->p_pid;
+ uidversion = pt->p_idversion;
+ if (uaudittoken != USER_ADDR_NULL) {
+
+ error = copyin(uaudittoken, &token, sizeof(audit_token_t));
+ if (error != 0)
+ goto out;
+ /* verify the audit token pid/idversion matches with proc */
+ if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
+ error = ESRCH;
+ goto out;
+ }
+ }
+
+#if CONFIG_MACF
+ switch (ops) {
+ case CS_OPS_MARKINVALID:
+ case CS_OPS_MARKHARD:
+ case CS_OPS_MARKKILL:
+ case CS_OPS_MARKRESTRICT:
+ case CS_OPS_SET_STATUS:
+ case CS_OPS_CLEARINSTALLER:
+ if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops)))
+ goto out;
+ break;
+ default:
+ if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops)))
+ goto out;
+ }
+#endif
+
+ switch (ops) {
+
+ case CS_OPS_STATUS: {
+ uint32_t retflags;
+
+ proc_lock(pt);
+ retflags = pt->p_csflags;
+ if (cs_enforcement(pt))
+ retflags |= CS_ENFORCEMENT;
+ if (csproc_get_platform_binary(pt))
+ retflags |= CS_PLATFORM_BINARY;
+ if (csproc_get_platform_path(pt))
+ retflags |= CS_PLATFORM_PATH;
+ proc_unlock(pt);
+
+ if (uaddr != USER_ADDR_NULL)
+ error = copyout(&retflags, uaddr, sizeof(uint32_t));
+ break;
+ }
+ case CS_OPS_MARKINVALID:
+ proc_lock(pt);
+ if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
+ pt->p_csflags &= ~CS_VALID; /* set invalid */
+ if ((pt->p_csflags & CS_KILL) == CS_KILL) {
+ pt->p_csflags |= CS_KILLED;
+ proc_unlock(pt);
+ if (cs_debug) {
+ printf("CODE SIGNING: marked invalid by pid %d: "
+ "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
+ proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
+ }
+ psignal(pt, SIGKILL);
+ } else
+ proc_unlock(pt);
+ } else
+ proc_unlock(pt);
+
+ break;
+
+ case CS_OPS_MARKHARD:
+ proc_lock(pt);
+ pt->p_csflags |= CS_HARD;
+ if ((pt->p_csflags & CS_VALID) == 0) {
+ /* @@@ allow? reject? kill? @@@ */
+ proc_unlock(pt);
+ error = EINVAL;
+ goto out;
+ } else
+ proc_unlock(pt);
+ break;
+
+ case CS_OPS_MARKKILL:
+ proc_lock(pt);
+ pt->p_csflags |= CS_KILL;
+ if ((pt->p_csflags & CS_VALID) == 0) {
+ proc_unlock(pt);
+ psignal(pt, SIGKILL);
+ } else
+ proc_unlock(pt);
+ break;
+
+ case CS_OPS_PIDOFFSET:
+ toff = pt->p_textoff;
+ proc_rele(pt);
+ error = copyout(&toff, uaddr, sizeof(toff));
+ return(error);
+
+ case CS_OPS_CDHASH:
+
+ /* pt already holds a reference on its p_textvp */
+ tvp = pt->p_textvp;
+ toff = pt->p_textoff;
+
+ if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
+ proc_rele(pt);
+ return EINVAL;
+ }
+
+ error = vn_getcdhash(tvp, toff, cdhash);
+ proc_rele(pt);
+
+ if (error == 0) {
+ error = copyout(cdhash, uaddr, sizeof (cdhash));
+ }
+
+ return error;
+
+ case CS_OPS_ENTITLEMENTS_BLOB: {
+ void *start;
+ size_t length;
+
+ proc_lock(pt);
+
+ if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
+ proc_unlock(pt);
+ error = EINVAL;
+ break;
+ }
+
+ error = cs_entitlements_blob_get(pt, &start, &length);
+ proc_unlock(pt);
+ if (error)
+ break;
+
+ error = csops_copy_token(start, length, usize, uaddr);
+ break;
+ }
+ case CS_OPS_MARKRESTRICT:
+ proc_lock(pt);
+ pt->p_csflags |= CS_RESTRICT;
+ proc_unlock(pt);
+ break;
+
+ case CS_OPS_SET_STATUS: {
+ uint32_t flags;
+
+ if (usize < sizeof(flags)) {
+ error = ERANGE;
+ break;
+ }
+
+ error = copyin(uaddr, &flags, sizeof(flags));
+ if (error)
+ break;
+
+ /* only allow setting a subset of all code sign flags */
+ flags &=
+ CS_HARD | CS_EXEC_SET_HARD |
+ CS_KILL | CS_EXEC_SET_KILL |
+ CS_RESTRICT |
+ CS_REQUIRE_LV |
+ CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT;
+
+ proc_lock(pt);
+ if (pt->p_csflags & CS_VALID)
+ pt->p_csflags |= flags;
+ else
+ error = EINVAL;
+ proc_unlock(pt);
+
+ break;
+ }
+ case CS_OPS_BLOB: {
+ void *start;
+ size_t length;
+
+ proc_lock(pt);
+ if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
+ proc_unlock(pt);
+ error = EINVAL;
+ break;
+ }
+
+ error = cs_blob_get(pt, &start, &length);
+ proc_unlock(pt);
+ if (error)
+ break;
+
+ error = csops_copy_token(start, length, usize, uaddr);
+ break;
+ }
+ case CS_OPS_IDENTITY: {
+ const char *identity;
+ uint8_t fakeheader[8];
+ uint32_t idlen;
+ size_t length;
+
+ /*
+ * Make identity have a blob header to make it
+ * easier on userland to guess the identity
+ * length.
+ */
+ if (usize < sizeof(fakeheader)) {
+ error = ERANGE;
+ break;
+ }
+ memset(fakeheader, 0, sizeof(fakeheader));
+
+ proc_lock(pt);
+ if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) {
+ proc_unlock(pt);
+ error = EINVAL;
+ break;
+ }
+
+ identity = cs_identity_get(pt);
+ proc_unlock(pt);
+ if (identity == NULL) {
+ error = ENOENT;
+ break;
+ }
+
+ length = strlen(identity) + 1; /* include NUL */
+ idlen = htonl(length + sizeof(fakeheader));
+ memcpy(&fakeheader[4], &idlen, sizeof(idlen));
+
+ error = copyout(fakeheader, uaddr, sizeof(fakeheader));
+ if (error)
+ break;
+
+ if (usize < sizeof(fakeheader) + length)
+ error = ERANGE;
+ else if (usize > sizeof(fakeheader))
+ error = copyout(identity, uaddr + sizeof(fakeheader), length);
+
+ break;
+ }
+
+ case CS_OPS_CLEARINSTALLER:
+ proc_lock(pt);
+ pt->p_csflags &= ~(CS_INSTALLER | CS_EXEC_SET_INSTALLER);
+ proc_unlock(pt);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+out:
+ proc_rele(pt);
+ return(error);
+}
+
+int
+proc_iterate(
+ unsigned int flags,
+ proc_iterate_fn_t callout,
+ void *arg,
+ proc_iterate_fn_t filterfn,
+ void *filterarg)
+{
+ pid_t *pid_list;
+ vm_size_t pid_list_size = 0;
+ vm_size_t pid_list_size_needed = 0;
+ int pid_count = 0;
+ int pid_count_available = 0;
+
+ assert(callout != NULL);
+
+ /* allocate outside of the proc_list_lock */
+ for (;;) {
+ proc_list_lock();
+
+ pid_count_available = nprocs;
+ assert(pid_count_available > 0);
+
+ pid_list_size_needed = pid_count_available * sizeof(pid_t);
+ if (pid_list_size >= pid_list_size_needed) {
+ break;
+ }
+ proc_list_unlock();
+
+ if (pid_list_size != 0) {
+ kfree(pid_list, pid_list_size);
+ }
+ pid_list = kalloc(pid_list_size_needed);
+ if (!pid_list) {
+ return 1;
+ }
+ pid_list_size = pid_list_size_needed;
+ }
+
+ /* filter pids into pid_list */
+
+ if (flags & PROC_ALLPROCLIST) {
+ proc_t p;
+ ALLPROC_FOREACH(p) {
+ /* ignore processes that are being forked */
+ if (p->p_stat == SIDL) {
+ continue;
+ }
+ if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
+ continue;
+ }
+
+ pid_list[pid_count++] = proc_pid(p);
+ if (pid_count >= pid_count_available) {
+ break;
+ }
+ }
+ }
+
+ if ((pid_count < pid_count_available) &&
+ (flags & PROC_ZOMBPROCLIST))
+ {
+ proc_t p;
+ ZOMBPROC_FOREACH(p) {
+ if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
+ continue;
+ }
+
+ pid_list[pid_count++] = proc_pid(p);
+ if (pid_count >= pid_count_available) {
+ break;
+ }
+ }
+ }
+
+ proc_list_unlock();
+
+ /* call callout on processes in the pid_list */
+
+ for (int i = 0; i < pid_count; i++) {
+ proc_t p = proc_find(pid_list[i]);
+ if (p) {
+ if ((flags & PROC_NOWAITTRANS) == 0) {
+ proc_transwait(p, 0);
+ }
+ int callout_ret = callout(p, arg);
+
+ switch (callout_ret) {
+ case PROC_RETURNED_DONE:
+ proc_rele(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED_DONE:
+ goto out;
+
+ case PROC_RETURNED:
+ proc_rele(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED:
+ break;
+
+ default:
+ panic("proc_iterate: callout returned %d for pid %d",
+ callout_ret, pid_list[i]);
+ break;
+ }
+ } else if (flags & PROC_ZOMBPROCLIST) {
+ p = proc_find_zombref(pid_list[i]);
+ if (!p) {
+ continue;
+ }
+ int callout_ret = callout(p, arg);
+
+ switch (callout_ret) {
+ case PROC_RETURNED_DONE:
+ proc_drop_zombref(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED_DONE:
+ goto out;
+
+ case PROC_RETURNED:
+ proc_drop_zombref(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED:
+ break;
+
+ default:
+ panic("proc_iterate: callout returned %d for zombie pid %d",
+ callout_ret, pid_list[i]);
+ break;
+ }
+ }
+ }
+
+out:
+ kfree(pid_list, pid_list_size);
+ return 0;
+
+}
+
+void
+proc_rebootscan(
+ proc_iterate_fn_t callout,
+ void *arg,
+ proc_iterate_fn_t filterfn,
+ void *filterarg)
+{
+ proc_t p;
+
+ assert(callout != NULL);
+
+ proc_shutdown_exitcount = 0;
+
+restart_foreach:
+
+ proc_list_lock();
+
+ ALLPROC_FOREACH(p) {
+ if ((filterfn != NULL) && filterfn(p, filterarg) == 0) {
+ continue;
+ }
+ p = proc_ref_locked(p);
+ if (!p) {
+ continue;
+ }
+
+ proc_list_unlock();
+
+ proc_transwait(p, 0);
+ (void)callout(p, arg);
+ proc_rele(p);
+
+ goto restart_foreach;
+ }
+
+ proc_list_unlock();
+}
+
+int
+proc_childrenwalk(
+ proc_t parent,
+ proc_iterate_fn_t callout,
+ void *arg)
+{
+ pid_t *pid_list;
+ vm_size_t pid_list_size = 0;
+ vm_size_t pid_list_size_needed = 0;
+ int pid_count = 0;
+ int pid_count_available = 0;
+
+ assert(parent != NULL);
+ assert(callout != NULL);
+
+ for (;;) {
+ proc_list_lock();
+
+ pid_count_available = parent->p_childrencnt;
+ if (pid_count_available == 0) {
+ proc_list_unlock();
+ return 0;
+ }
+
+ pid_list_size_needed = pid_count_available * sizeof(pid_t);
+ if (pid_list_size >= pid_list_size_needed) {
+ break;
+ }
+ proc_list_unlock();
+
+ if (pid_list_size != 0) {
+ kfree(pid_list, pid_list_size);
+ }
+ pid_list = kalloc(pid_list_size_needed);
+ if (!pid_list) {
+ return 1;
+ }
+ pid_list_size = pid_list_size_needed;
+ }
+
+ proc_t p;
+ PCHILDREN_FOREACH(parent, p) {
+ if (p->p_stat == SIDL) {
+ continue;
+ }
+
+ pid_list[pid_count++] = proc_pid(p);
+ if (pid_count >= pid_count_available) {
+ break;
+ }
+ }
+
+ proc_list_unlock();
+
+ for (int i = 0; i < pid_count; i++) {
+ p = proc_find(pid_list[i]);
+ if (!p) {
+ continue;
+ }
+
+ int callout_ret = callout(p, arg);
+
+ switch (callout_ret) {
+ case PROC_RETURNED_DONE:
+ proc_rele(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED_DONE:
+ goto out;
+
+ case PROC_RETURNED:
+ proc_rele(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED:
+ break;
+ default:
+ panic("proc_childrenwalk: callout returned %d for pid %d",
+ callout_ret, pid_list[i]);
+ break;
+ }
+ }
+
+out:
+ kfree(pid_list, pid_list_size);
+ return 0;
+}
+
+int
+pgrp_iterate(
+ struct pgrp *pgrp,
+ unsigned int flags,
+ proc_iterate_fn_t callout,
+ void * arg,
+ proc_iterate_fn_t filterfn,
+ void * filterarg)
+{
+ pid_t *pid_list;
+ proc_t p;
+ vm_size_t pid_list_size = 0;
+ vm_size_t pid_list_size_needed = 0;
+ int pid_count = 0;
+ int pid_count_available = 0;
+
+ pid_t pgid;
+
+ assert(pgrp != NULL);
+ assert(callout != NULL);
+
+ for (;;) {
+ pgrp_lock(pgrp);
+
+ pid_count_available = pgrp->pg_membercnt;
+ if (pid_count_available == 0) {
+ pgrp_unlock(pgrp);
+ return 0;
+ }
+
+ pid_list_size_needed = pid_count_available * sizeof(pid_t);
+ if (pid_list_size >= pid_list_size_needed) {
+ break;
+ }
+ pgrp_unlock(pgrp);
+
+ if (pid_list_size != 0) {
+ kfree(pid_list, pid_list_size);
+ }
+ pid_list = kalloc(pid_list_size_needed);
+ if (!pid_list) {
+ return 1;
+ }
+ pid_list_size = pid_list_size_needed;
+ }
+
+ pgid = pgrp->pg_id;
+
+ PGMEMBERS_FOREACH(pgrp, p) {
+ if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) {
+ continue;;
+ }
+ pid_list[pid_count++] = proc_pid(p);
+ if (pid_count >= pid_count_available) {
+ break;
+ }
+ }
+
+ pgrp_unlock(pgrp);
+
+ if (flags & PGRP_DROPREF) {
+ pg_rele(pgrp);
+ }
+
+ for (int i = 0; i< pid_count; i++) {
+ /* do not handle kernproc */
+ if (pid_list[i] == 0) {
+ continue;
+ }
+ p = proc_find(pid_list[i]);
+ if (!p) {
+ continue;
+ }
+ if (p->p_pgrpid != pgid) {
+ proc_rele(p);
+ continue;
+ }
+
+ int callout_ret = callout(p, arg);
+
+ switch (callout_ret) {
+ case PROC_RETURNED:
+ proc_rele(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED:
+ break;
+
+ case PROC_RETURNED_DONE:
+ proc_rele(p);
+ /* FALLTHROUGH */
+ case PROC_CLAIMED_DONE:
+ goto out;
+
+ default:
+ panic("pgrp_iterate: callout returned %d for pid %d",
+ callout_ret, pid_list[i]);
+ }
+ }
+
+out:
+ kfree(pid_list, pid_list_size);
+ return 0;
+}
+
+static void
+pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
+{
+ proc_list_lock();
+ child->p_pgrp = pgrp;
+ child->p_pgrpid = pgrp->pg_id;
+ child->p_listflag |= P_LIST_INPGRP;
+ /*
+ * When pgrp is being freed , a process can still
+ * request addition using setpgid from bash when
+ * login is terminated (login cycler) return ESRCH
+ * Safe to hold lock due to refcount on pgrp
+ */
+ if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
+ pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
+ }
+
+ if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
+ panic("pgrp_add : pgrp is dead adding process");
+ proc_list_unlock();
+
+ pgrp_lock(pgrp);
+ pgrp->pg_membercnt++;
+ if ( parent != PROC_NULL) {
+ LIST_INSERT_AFTER(parent, child, p_pglist);
+ }else {
+ LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
+ }
+ pgrp_unlock(pgrp);
+
+ proc_list_lock();
+ if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
+ pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
+ }
+ proc_list_unlock();
+}
+
+static void
+pgrp_remove(struct proc * p)
+{
+ struct pgrp * pg;
+
+ pg = proc_pgrp(p);
+
+ proc_list_lock();
+#if __PROC_INTERNAL_DEBUG
+ if ((p->p_listflag & P_LIST_INPGRP) == 0)
+ panic("removing from pglist but no named ref\n");
+#endif
+ p->p_pgrpid = PGRPID_DEAD;
+ p->p_listflag &= ~P_LIST_INPGRP;
+ p->p_pgrp = NULL;
+ proc_list_unlock();
+
+ if (pg == PGRP_NULL)
+ panic("pgrp_remove: pg is NULL");
+ pgrp_lock(pg);
+ pg->pg_membercnt--;
+
+ if (pg->pg_membercnt < 0)
+ panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p);
+
+ LIST_REMOVE(p, p_pglist);
+ if (pg->pg_members.lh_first == 0) {
+ pgrp_unlock(pg);
+ pgdelete_dropref(pg);
+ } else {
+ pgrp_unlock(pg);
+ pg_rele(pg);
+ }
+}
+
+
+/* cannot use proc_pgrp as it maybe stalled */
+static void
+pgrp_replace(struct proc * p, struct pgrp * newpg)
+{
+ struct pgrp * oldpg;
+
+
+
+ proc_list_lock();
+
+ while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
+ p->p_listflag |= P_LIST_PGRPTRWAIT;
+ (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
+ }
+
+ p->p_listflag |= P_LIST_PGRPTRANS;
+
+ oldpg = p->p_pgrp;
+ if (oldpg == PGRP_NULL)
+ panic("pgrp_replace: oldpg NULL");
+ oldpg->pg_refcount++;
+#if __PROC_INTERNAL_DEBUG
+ if ((p->p_listflag & P_LIST_INPGRP) == 0)
+ panic("removing from pglist but no named ref\n");
+#endif
+ p->p_pgrpid = PGRPID_DEAD;
+ p->p_listflag &= ~P_LIST_INPGRP;
+ p->p_pgrp = NULL;
+
+ proc_list_unlock();
+
+ pgrp_lock(oldpg);
+ oldpg->pg_membercnt--;
+ if (oldpg->pg_membercnt < 0)
+ panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p);
+ LIST_REMOVE(p, p_pglist);
+ if (oldpg->pg_members.lh_first == 0) {
+ pgrp_unlock(oldpg);
+ pgdelete_dropref(oldpg);
+ } else {
+ pgrp_unlock(oldpg);
+ pg_rele(oldpg);
+ }
+
+ proc_list_lock();
+ p->p_pgrp = newpg;
+ p->p_pgrpid = newpg->pg_id;
+ p->p_listflag |= P_LIST_INPGRP;
+ /*
+ * When pgrp is being freed , a process can still
+ * request addition using setpgid from bash when
+ * login is terminated (login cycler) return ESRCH
+ * Safe to hold lock due to refcount on pgrp
+ */
+ if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
+ newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
+ }
+
+ if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
+ panic("pgrp_add : pgrp is dead adding process");
+ proc_list_unlock();
+
+ pgrp_lock(newpg);
+ newpg->pg_membercnt++;
+ LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
+ pgrp_unlock(newpg);
+
+ proc_list_lock();
+ if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
+ newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
+ }
+
+ p->p_listflag &= ~P_LIST_PGRPTRANS;
+ if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
+ p->p_listflag &= ~P_LIST_PGRPTRWAIT;
+ wakeup(&p->p_pgrpid);
+
+ }
+ proc_list_unlock();
+}
+
+void
+pgrp_lock(struct pgrp * pgrp)
+{
+ lck_mtx_lock(&pgrp->pg_mlock);
+}
+
+void
+pgrp_unlock(struct pgrp * pgrp)
+{
+ lck_mtx_unlock(&pgrp->pg_mlock);
+}
+
+void
+session_lock(struct session * sess)
+{
+ lck_mtx_lock(&sess->s_mlock);
+}
+
+
+void
+session_unlock(struct session * sess)
+{
+ lck_mtx_unlock(&sess->s_mlock);
+}
+
+struct pgrp *
+proc_pgrp(proc_t p)
+{
+ struct pgrp * pgrp;
+
+ if (p == PROC_NULL)
+ return(PGRP_NULL);
+ proc_list_lock();
+
+ while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
+ p->p_listflag |= P_LIST_PGRPTRWAIT;
+ (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
+ }
+
+ pgrp = p->p_pgrp;
+
+ assert(pgrp != NULL);
+
+ if (pgrp != PGRP_NULL) {
+ pgrp->pg_refcount++;
+ if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0)
+ panic("proc_pgrp: ref being povided for dead pgrp");
+ }
+
+ proc_list_unlock();
+
+ return(pgrp);
+}
+
+struct pgrp *
+tty_pgrp(struct tty * tp)
+{
+ struct pgrp * pg = PGRP_NULL;
+
+ proc_list_lock();
+ pg = tp->t_pgrp;
+
+ if (pg != PGRP_NULL) {
+ if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0)
+ panic("tty_pgrp: ref being povided for dead pgrp");
+ pg->pg_refcount++;
+ }
+ proc_list_unlock();
+
+ return(pg);
+}
+
+struct session *
+proc_session(proc_t p)
+{
+ struct session * sess = SESSION_NULL;
+
+ if (p == PROC_NULL)
+ return(SESSION_NULL);
+
+ proc_list_lock();
+
+ /* wait during transitions */
+ while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
+ p->p_listflag |= P_LIST_PGRPTRWAIT;
+ (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
+ }
+
+ if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
+ if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
+ panic("proc_session:returning sesssion ref on terminating session");
+ sess->s_count++;
+ }
+ proc_list_unlock();
+ return(sess);
+}
+
+void
+session_rele(struct session *sess)
+{
+ proc_list_lock();
+ if (--sess->s_count == 0) {
+ if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
+ panic("session_rele: terminating already terminated session");
+ sess->s_listflags |= S_LIST_TERM;
+ LIST_REMOVE(sess, s_hash);
+ sess->s_listflags |= S_LIST_DEAD;
+ if (sess->s_count != 0)
+ panic("session_rele: freeing session in use");
+ proc_list_unlock();
+#if CONFIG_FINE_LOCK_GROUPS
+ lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
+#else
+ lck_mtx_destroy(&sess->s_mlock, proc_lck_grp);
+#endif
+ FREE_ZONE(sess, sizeof(struct session), M_SESSION);
+ } else
+ proc_list_unlock();
+}
+
+int
+proc_transstart(proc_t p, int locked, int non_blocking)
+{
+ if (locked == 0)
+ proc_lock(p);
+ while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
+ if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
+ if (locked == 0)
+ proc_unlock(p);
+ return EDEADLK;
+ }
+ p->p_lflag |= P_LTRANSWAIT;
+ msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
+ }
+ p->p_lflag |= P_LINTRANSIT;
+ p->p_transholder = current_thread();
+ if (locked == 0)
+ proc_unlock(p);
+ return 0;
+}
+
+void
+proc_transcommit(proc_t p, int locked)
+{
+ if (locked == 0)
+ proc_lock(p);
+
+ assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
+ assert (p->p_transholder == current_thread());
+ p->p_lflag |= P_LTRANSCOMMIT;
+
+ if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
+ p->p_lflag &= ~P_LTRANSWAIT;
+ wakeup(&p->p_lflag);
+ }
+ if (locked == 0)
+ proc_unlock(p);
+}
+
+void
+proc_transend(proc_t p, int locked)
+{
+ if (locked == 0)
+ proc_lock(p);
+
+ p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT);
+ p->p_transholder = NULL;
+
+ if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
+ p->p_lflag &= ~P_LTRANSWAIT;
+ wakeup(&p->p_lflag);
+ }
+ if (locked == 0)
+ proc_unlock(p);
+}
+
+int
+proc_transwait(proc_t p, int locked)
+{
+ if (locked == 0)
+ proc_lock(p);
+ while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
+ if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
+ if (locked == 0)
+ proc_unlock(p);
+ return EDEADLK;
+ }
+ p->p_lflag |= P_LTRANSWAIT;
+ msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
+ }
+ if (locked == 0)
+ proc_unlock(p);
+ return 0;
+}
+
+void
+proc_klist_lock(void)
+{
+ lck_mtx_lock(proc_klist_mlock);
+}
+
+void
+proc_klist_unlock(void)
+{
+ lck_mtx_unlock(proc_klist_mlock);
+}
+
+void
+proc_knote(struct proc * p, long hint)
+{
+ proc_klist_lock();
+ KNOTE(&p->p_klist, hint);
+ proc_klist_unlock();
+}
+
+void
+proc_knote_drain(struct proc *p)
+{
+ struct knote *kn = NULL;
+
+ /*
+ * Clear the proc's klist to avoid references after the proc is reaped.
+ */
+ proc_klist_lock();
+ while ((kn = SLIST_FIRST(&p->p_klist))) {
+ kn->kn_ptr.p_proc = PROC_NULL;
+ KNOTE_DETACH(&p->p_klist, kn);
+ }
+ proc_klist_unlock();
+}
+
+void
+proc_setregister(proc_t p)
+{
+ proc_lock(p);
+ p->p_lflag |= P_LREGISTER;
+ proc_unlock(p);
+}
+
+void
+proc_resetregister(proc_t p)
+{
+ proc_lock(p);
+ p->p_lflag &= ~P_LREGISTER;
+ proc_unlock(p);
+}
+
+pid_t
+proc_pgrpid(proc_t p)
+{
+ return p->p_pgrpid;
+}
+
+pid_t
+proc_selfpgrpid()
+{
+ return current_proc()->p_pgrpid;
+}
+
+
+/* return control and action states */
+int
+proc_getpcontrol(int pid, int * pcontrolp)
+{
+ proc_t p;
+
+ p = proc_find(pid);
+ if (p == PROC_NULL)
+ return(ESRCH);
+ if (pcontrolp != NULL)
+ *pcontrolp = p->p_pcaction;
+
+ proc_rele(p);
+ return(0);
+}
+
+int
+proc_dopcontrol(proc_t p)
+{
+ int pcontrol;
+
+ proc_lock(p);
+
+ pcontrol = PROC_CONTROL_STATE(p);
+
+ if (PROC_ACTION_STATE(p) == 0) {
+ switch(pcontrol) {
+ case P_PCTHROTTLE:
+ PROC_SETACTION_STATE(p);
+ proc_unlock(p);
+ printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
+ break;
+
+ case P_PCSUSP:
+ PROC_SETACTION_STATE(p);
+ proc_unlock(p);
+ printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
+ task_suspend(p->task);
+ break;
+
+ case P_PCKILL:
+ PROC_SETACTION_STATE(p);
+ proc_unlock(p);
+ printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
+ psignal(p, SIGKILL);
+ break;
+
+ default:
+ proc_unlock(p);
+ }
+
+ } else
+ proc_unlock(p);
+
+ return(PROC_RETURNED);
+}
+
+
+/*
+ * Resume a throttled or suspended process. This is an internal interface that's only
+ * used by the user level code that presents the GUI when we run out of swap space and
+ * hence is restricted to processes with superuser privileges.
+ */
+
+int
+proc_resetpcontrol(int pid)
+{
+ proc_t p;
+ int pcontrol;
+ int error;
+ proc_t self = current_proc();
+
+ /* if the process has been validated to handle resource control or root is valid one */
+ if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0)))
+ return error;
+
+ p = proc_find(pid);
+ if (p == PROC_NULL)
+ return(ESRCH);
+
+ proc_lock(p);
+
+ pcontrol = PROC_CONTROL_STATE(p);
+
+ if(PROC_ACTION_STATE(p) !=0) {
+ switch(pcontrol) {
+ case P_PCTHROTTLE:
+ PROC_RESETACTION_STATE(p);
+ proc_unlock(p);
+ printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
+ break;
+
+ case P_PCSUSP:
+ PROC_RESETACTION_STATE(p);
+ proc_unlock(p);
+ printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
+ task_resume(p->task);
+ break;
+
+ case P_PCKILL:
+ /* Huh? */
+ PROC_SETACTION_STATE(p);
+ proc_unlock(p);
+ printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
+ break;
+
+ default:
+ proc_unlock(p);
+ }
+
+ } else
+ proc_unlock(p);
+
+ proc_rele(p);
+ return(0);
+}
+
+
+
+struct no_paging_space
+{
+ uint64_t pcs_max_size;
+ uint64_t pcs_uniqueid;
+ int pcs_pid;
+ int pcs_proc_count;
+ uint64_t pcs_total_size;
+
+ uint64_t npcs_max_size;
+ uint64_t npcs_uniqueid;
+ int npcs_pid;
+ int npcs_proc_count;
+ uint64_t npcs_total_size;
+
+ int apcs_proc_count;
+ uint64_t apcs_total_size;
+};
+
+
+static int
+proc_pcontrol_filter(proc_t p, void *arg)
+{
+ struct no_paging_space *nps;
+ uint64_t compressed;
+
+ nps = (struct no_paging_space *)arg;
+
+ compressed = get_task_compressed(p->task);
+
+ if (PROC_CONTROL_STATE(p)) {
+ if (PROC_ACTION_STATE(p) == 0) {
+ if (compressed > nps->pcs_max_size) {
+ nps->pcs_pid = p->p_pid;
+ nps->pcs_uniqueid = p->p_uniqueid;
+ nps->pcs_max_size = compressed;
+ }
+ nps->pcs_total_size += compressed;
+ nps->pcs_proc_count++;
+ } else {
+ nps->apcs_total_size += compressed;
+ nps->apcs_proc_count++;
+ }
+ } else {
+ if (compressed > nps->npcs_max_size) {
+ nps->npcs_pid = p->p_pid;
+ nps->npcs_uniqueid = p->p_uniqueid;
+ nps->npcs_max_size = compressed;
+ }
+ nps->npcs_total_size += compressed;
+ nps->npcs_proc_count++;
+
+ }
+ return (0);
+}
+
+
+static int
+proc_pcontrol_null(__unused proc_t p, __unused void *arg)
+{
+ return(PROC_RETURNED);
+}
+
+
+/*
+ * Deal with the low on compressor pool space condition... this function
+ * gets called when we are approaching the limits of the compressor pool or
+ * we are unable to create a new swap file.
+ * Since this eventually creates a memory deadlock situtation, we need to take action to free up
+ * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
+ * There are 2 categories of processes to deal with. Those that have an action
+ * associated with them by the task itself and those that do not. Actionable
+ * tasks can have one of three categories specified: ones that
+ * can be killed immediately, ones that should be suspended, and ones that should
+ * be throttled. Processes that do not have an action associated with them are normally
+ * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
+ * that only by killing them can we hope to put the system back into a usable state.
+ */
+
+#define NO_PAGING_SPACE_DEBUG 0
+
+extern uint64_t vm_compressor_pages_compressed(void);
+
+struct timeval last_no_space_action = {0, 0};
+
+int
+no_paging_space_action()
+{
+ proc_t p;
+ struct no_paging_space nps;
+ struct timeval now;
+
+ /*
+ * Throttle how often we come through here. Once every 5 seconds should be plenty.
+ */
+ microtime(&now);
+
+ if (now.tv_sec <= last_no_space_action.tv_sec + 5)
+ return (0);
+
+ /*
+ * Examine all processes and find the biggest (biggest is based on the number of pages this
+ * task has in the compressor pool) that has been marked to have some action
+ * taken when swap space runs out... we also find the biggest that hasn't been marked for
+ * action.
+ *
+ * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
+ * the total number of pages held by the compressor, we go ahead and kill it since no other task
+ * can have any real effect on the situation. Otherwise, we go after the actionable process.
+ */
+ bzero(&nps, sizeof(nps));
+
+ proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
+
+#if NO_PAGING_SPACE_DEBUG
+ printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
+ nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
+ printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
+ nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
+ printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
+ nps.apcs_proc_count, nps.apcs_total_size);
+#endif
+ if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
+ /*
+ * for now we'll knock out any task that has more then 50% of the pages
+ * held by the compressor
+ */
+ if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
+
+ if (nps.npcs_uniqueid == p->p_uniqueid) {
+ /*
+ * verify this is still the same process
+ * in case the proc exited and the pid got reused while
+ * we were finishing the proc_iterate and getting to this point
+ */
+ last_no_space_action = now;
+
+ printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
+ psignal(p, SIGKILL);
+
+ proc_rele(p);
+
+ return (0);
+ }
+
+ proc_rele(p);
+ }
+ }
+
+ if (nps.pcs_max_size > 0) {
+ if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
+
+ if (nps.pcs_uniqueid == p->p_uniqueid) {
+ /*
+ * verify this is still the same process
+ * in case the proc exited and the pid got reused while
+ * we were finishing the proc_iterate and getting to this point
+ */
+ last_no_space_action = now;
+
+ proc_dopcontrol(p);
+
+ proc_rele(p);
+
+ return (1);
+ }
+
+ proc_rele(p);
+ }
+ }
+ last_no_space_action = now;
+
+ printf("low swap: unable to find any eligible processes to take action on\n");
+
+ return (0);
+}
+
+int
+proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
+{
+ int ret = 0;
+ proc_t target_proc = PROC_NULL;
+ pid_t target_pid = uap->pid;
+ uint64_t target_uniqueid = uap->uniqueid;
+ task_t target_task = NULL;
+
+ if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
+ ret = EPERM;
+ goto out;
+ }
+ target_proc = proc_find(target_pid);
+ if (target_proc != PROC_NULL) {
+ if (target_uniqueid != proc_uniqueid(target_proc)) {
+ ret = ENOENT;
+ goto out;
+ }
+
+ target_task = proc_task(target_proc);
+ if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
+ ret = EINVAL;
+ goto out;
+ }
+ } else
+ ret = ENOENT;
+
+out:
+ if (target_proc != PROC_NULL)
+ proc_rele(target_proc);
+ return (ret);
+}
+
+#if VM_SCAN_FOR_SHADOW_CHAIN
+extern int vm_map_shadow_max(vm_map_t map);
+int proc_shadow_max(void);
+int proc_shadow_max(void)
+{
+ int retval, max;
+ proc_t p;
+ task_t task;
+ vm_map_t map;
+
+ max = 0;
+ proc_list_lock();
+ for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
+ if (p->p_stat == SIDL)
+ continue;
+ task = p->task;
+ if (task == NULL) {
+ continue;
+ }
+ map = get_task_map(task);
+ if (map == NULL) {
+ continue;
+ }
+ retval = vm_map_shadow_max(map);
+ if (retval > max) {
+ max = retval;
+ }
+ }
+ proc_list_unlock();
+ return max;
+}
+#endif /* VM_SCAN_FOR_SHADOW_CHAIN */
+
+void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid);
+void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid)
+{
+ if (target_proc != NULL) {
+ target_proc->p_responsible_pid = responsible_pid;
+ }
+ return;
+}
+
+int
+proc_chrooted(proc_t p)
+{
+ int retval = 0;
+
+ if (p) {
+ proc_fdlock(p);
+ retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0;
+ proc_fdunlock(p);
+ }
+
+ return retval;
+}
+
+void *
+proc_get_uthread_uu_threadlist(void * uthread_v)
+{
+ uthread_t uth = (uthread_t)uthread_v;
+ return (uth != NULL) ? uth->uu_threadlist : NULL;