+ log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n",
+ (long)pid, name, (uint32_t)uid);
+ return (1);
+}
+
+#if CONFIG_LCTX
+
+static void
+lctxinit(void)
+{
+ LIST_INIT(&alllctx);
+ alllctx_cnt = 0;
+
+ /* allocate lctx lock group attribute and group */
+ lctx_lck_grp_attr = lck_grp_attr_alloc_init();
+ lck_grp_attr_setstat(lctx_lck_grp_attr);
+
+ lctx_lck_grp = lck_grp_alloc_init("lctx", lctx_lck_grp_attr);
+ /* Allocate lctx lock attribute */
+ lctx_lck_attr = lck_attr_alloc_init();
+
+ lck_mtx_init(&alllctx_lock, lctx_lck_grp, lctx_lck_attr);
+}
+
+/*
+ * Locate login context by number.
+ */
+struct lctx *
+lcfind(pid_t lcid)
+{
+ struct lctx *l;
+
+ ALLLCTX_LOCK;
+ LIST_FOREACH(l, &alllctx, lc_list) {
+ if (l->lc_id == lcid) {
+ LCTX_LOCK(l);
+ break;
+ }
+ }
+ ALLLCTX_UNLOCK;
+ return (l);
+}
+
+#define LCID_INC \
+ do { \
+ lastlcid++; \
+ if (lastlcid > maxlcid) \
+ lastlcid = 1; \
+ } while (0) \
+
+struct lctx *
+lccreate(void)
+{
+ struct lctx *l;
+ pid_t newlcid;
+
+ /* Not very efficient but this isn't a common operation. */
+ while ((l = lcfind(lastlcid)) != NULL) {
+ LCTX_UNLOCK(l);
+ LCID_INC;
+ }
+ newlcid = lastlcid;
+ LCID_INC;
+
+ MALLOC(l, struct lctx *, sizeof(struct lctx), M_LCTX, M_WAITOK|M_ZERO);
+ l->lc_id = newlcid;
+ LIST_INIT(&l->lc_members);
+ lck_mtx_init(&l->lc_mtx, lctx_lck_grp, lctx_lck_attr);
+#if CONFIG_MACF
+ l->lc_label = mac_lctx_label_alloc();
+#endif
+ ALLLCTX_LOCK;
+ LIST_INSERT_HEAD(&alllctx, l, lc_list);
+ alllctx_cnt++;
+ ALLLCTX_UNLOCK;
+
+ return (l);
+}
+
+/*
+ * Call with proc protected (either by being invisible
+ * or by having the all-login-context lock held) and
+ * the lctx locked.
+ *
+ * Will unlock lctx on return.
+ */
+void
+enterlctx (proc_t p, struct lctx *l, __unused int create)
+{
+ if (l == NULL)
+ return;
+
+ p->p_lctx = l;
+ LIST_INSERT_HEAD(&l->lc_members, p, p_lclist);
+ l->lc_mc++;
+
+#if CONFIG_MACF
+ if (create)
+ mac_lctx_notify_create(p, l);
+ else
+ mac_lctx_notify_join(p, l);
+#endif
+ LCTX_UNLOCK(l);
+
+ return;
+}
+
+/*
+ * Remove process from login context (if any). Called with p protected by
+ * the alllctx lock.
+ */
+void
+leavelctx (proc_t p)
+{
+ struct lctx *l;
+
+ if (p->p_lctx == NULL) {
+ return;
+ }
+
+ LCTX_LOCK(p->p_lctx);
+ l = p->p_lctx;
+ p->p_lctx = NULL;
+ LIST_REMOVE(p, p_lclist);
+ l->lc_mc--;
+#if CONFIG_MACF
+ mac_lctx_notify_leave(p, l);
+#endif
+ if (LIST_EMPTY(&l->lc_members)) {
+ LIST_REMOVE(l, lc_list);
+ alllctx_cnt--;
+ LCTX_UNLOCK(l);
+ lck_mtx_destroy(&l->lc_mtx, lctx_lck_grp);
+#if CONFIG_MACF
+ mac_lctx_label_free(l->lc_label);
+ l->lc_label = NULL;
+#endif
+ FREE(l, M_LCTX);
+ } else {
+ LCTX_UNLOCK(l);
+ }
+ return;
+}
+
+static int
+sysctl_kern_lctx SYSCTL_HANDLER_ARGS
+{
+ int *name = (int*) arg1;
+ u_int namelen = arg2;
+ struct kinfo_lctx kil;
+ struct lctx *l;
+ int error;
+
+ error = 0;
+
+ switch (oidp->oid_number) {
+ case KERN_LCTX_ALL:
+ ALLLCTX_LOCK;
+ /* Request for size. */
+ if (!req->oldptr) {
+ error = SYSCTL_OUT(req, 0,
+ sizeof(struct kinfo_lctx) * (alllctx_cnt + 1));
+ goto out;
+ }
+ break;
+
+ case KERN_LCTX_LCID:
+ /* No space */
+ if (req->oldlen < sizeof(struct kinfo_lctx))
+ return (ENOMEM);
+ /* No argument */
+ if (namelen != 1)
+ return (EINVAL);
+ /* No login context */
+ l = lcfind((pid_t)name[0]);
+ if (l == NULL)
+ return (ENOENT);
+ kil.id = l->lc_id;
+ kil.mc = l->lc_mc;
+ LCTX_UNLOCK(l);
+ return (SYSCTL_OUT(req, (caddr_t)&kil, sizeof(kil)));
+
+ default:
+ return (EINVAL);
+ }
+
+ /* Provided buffer is too small. */
+ if (req->oldlen < (sizeof(struct kinfo_lctx) * alllctx_cnt)) {
+ error = ENOMEM;
+ goto out;
+ }
+
+ LIST_FOREACH(l, &alllctx, lc_list) {
+ LCTX_LOCK(l);
+ kil.id = l->lc_id;
+ kil.mc = l->lc_mc;
+ LCTX_UNLOCK(l);
+ error = SYSCTL_OUT(req, (caddr_t)&kil, sizeof(kil));
+ if (error)
+ break;
+ }
+out:
+ ALLLCTX_UNLOCK;
+
+ return (error);
+}
+
+SYSCTL_NODE(_kern, KERN_LCTX, lctx, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Login Context");
+
+SYSCTL_PROC(_kern_lctx, KERN_LCTX_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT | CTLFLAG_LOCKED,
+ 0, 0, sysctl_kern_lctx, "S,lctx",
+ "Return entire login context table");
+SYSCTL_NODE(_kern_lctx, KERN_LCTX_LCID, lcid, CTLFLAG_RD | CTLFLAG_LOCKED,
+ sysctl_kern_lctx, "Login Context Table");
+SYSCTL_INT(_kern_lctx, OID_AUTO, last, CTLFLAG_RD | CTLFLAG_LOCKED, &lastlcid, 0, "");
+SYSCTL_INT(_kern_lctx, OID_AUTO, count, CTLFLAG_RD | CTLFLAG_LOCKED, &alllctx_cnt, 0, "");
+SYSCTL_INT(_kern_lctx, OID_AUTO, max, CTLFLAG_RW | CTLFLAG_LOCKED, &maxlcid, 0, "");
+
+#endif /* LCTX */
+
+/* Code Signing related routines */
+
+int
+csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval)
+{
+ return(csops_internal(uap->pid, uap->ops, uap->useraddr,
+ uap->usersize, USER_ADDR_NULL));
+}
+
+int
+csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval)
+{
+ if (uap->uaudittoken == USER_ADDR_NULL)
+ return(EINVAL);
+ return(csops_internal(uap->pid, uap->ops, uap->useraddr,
+ uap->usersize, uap->uaudittoken));
+}
+
+static int
+csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr)
+{
+ char fakeheader[8] = { 0 };
+ int error;
+
+ if (usize < sizeof(fakeheader))
+ return ERANGE;
+
+ /* if no blob, fill in zero header */
+ if (NULL == start) {
+ start = fakeheader;
+ length = sizeof(fakeheader);
+ } else if (usize < length) {
+ /* ... if input too short, copy out length of entitlement */
+ uint32_t length32 = htonl((uint32_t)length);
+ memcpy(&fakeheader[4], &length32, sizeof(length32));
+
+ error = copyout(fakeheader, uaddr, sizeof(fakeheader));
+ if (error == 0)
+ return ERANGE; /* input buffer to short, ERANGE signals that */
+ return error;
+ }
+ return copyout(start, uaddr, length);
+}
+
+static int
+csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken)
+{
+ size_t usize = (size_t)CAST_DOWN(size_t, usersize);
+ proc_t pt;
+ int forself;
+ int error;
+ vnode_t tvp;
+ off_t toff;
+ unsigned char cdhash[SHA1_RESULTLEN];
+ audit_token_t token;
+ unsigned int upid=0, uidversion = 0;
+
+ forself = error = 0;
+
+ if (pid == 0)
+ pid = proc_selfpid();
+ if (pid == proc_selfpid())
+ forself = 1;
+
+
+ switch (ops) {
+ case CS_OPS_STATUS:
+ case CS_OPS_CDHASH:
+ case CS_OPS_PIDOFFSET:
+ case CS_OPS_ENTITLEMENTS_BLOB:
+ case CS_OPS_IDENTITY:
+ case CS_OPS_BLOB:
+ break; /* unrestricted */
+ default:
+ if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
+ return(EPERM);
+ break;
+ }
+
+ pt = proc_find(pid);
+ if (pt == PROC_NULL)
+ return(ESRCH);
+
+ upid = pt->p_pid;
+ uidversion = pt->p_idversion;
+ if (uaudittoken != USER_ADDR_NULL) {
+
+ error = copyin(uaudittoken, &token, sizeof(audit_token_t));
+ if (error != 0)
+ goto out;
+ /* verify the audit token pid/idversion matches with proc */
+ if ((token.val[5] != upid) || (token.val[7] != uidversion)) {
+ error = ESRCH;
+ goto out;
+ }
+ }
+
+ switch (ops) {
+
+ case CS_OPS_STATUS: {
+ uint32_t retflags;
+
+ proc_lock(pt);
+ retflags = pt->p_csflags;
+ if (cs_enforcement(pt))
+ retflags |= CS_ENFORCEMENT;
+ proc_unlock(pt);
+
+ if (uaddr != USER_ADDR_NULL)
+ error = copyout(&retflags, uaddr, sizeof(uint32_t));
+ break;
+ }
+ case CS_OPS_MARKINVALID:
+ proc_lock(pt);
+ if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */
+ pt->p_csflags &= ~CS_VALID; /* set invalid */
+ if ((pt->p_csflags & CS_KILL) == CS_KILL) {
+ pt->p_csflags |= CS_KILLED;
+ proc_unlock(pt);
+ if (cs_debug) {
+ printf("CODE SIGNING: marked invalid by pid %d: "
+ "p=%d[%s] honoring CS_KILL, final status 0x%x\n",
+ proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags);
+ }
+ psignal(pt, SIGKILL);
+ } else
+ proc_unlock(pt);
+ } else
+ proc_unlock(pt);
+
+ break;
+
+ case CS_OPS_MARKHARD:
+ proc_lock(pt);
+ pt->p_csflags |= CS_HARD;
+ if ((pt->p_csflags & CS_VALID) == 0) {
+ /* @@@ allow? reject? kill? @@@ */
+ proc_unlock(pt);
+ error = EINVAL;
+ goto out;
+ } else
+ proc_unlock(pt);
+ break;
+
+ case CS_OPS_MARKKILL:
+ proc_lock(pt);
+ pt->p_csflags |= CS_KILL;
+ if ((pt->p_csflags & CS_VALID) == 0) {
+ proc_unlock(pt);
+ psignal(pt, SIGKILL);
+ } else
+ proc_unlock(pt);
+ break;
+
+ case CS_OPS_PIDOFFSET:
+ toff = pt->p_textoff;
+ proc_rele(pt);
+ error = copyout(&toff, uaddr, sizeof(toff));
+ return(error);
+
+ case CS_OPS_CDHASH:
+
+ /* pt already holds a reference on its p_textvp */
+ tvp = pt->p_textvp;
+ toff = pt->p_textoff;
+
+ if (tvp == NULLVP || usize != SHA1_RESULTLEN) {
+ proc_rele(pt);
+ return EINVAL;
+ }
+
+ error = vn_getcdhash(tvp, toff, cdhash);
+ proc_rele(pt);
+
+ if (error == 0) {
+ error = copyout(cdhash, uaddr, sizeof (cdhash));
+ }
+
+ return error;
+
+ case CS_OPS_ENTITLEMENTS_BLOB: {
+ void *start;
+ size_t length;
+
+ proc_lock(pt);
+
+ if ((pt->p_csflags & CS_VALID) == 0) {
+ proc_unlock(pt);
+ error = EINVAL;
+ break;
+ }
+
+ error = cs_entitlements_blob_get(pt, &start, &length);
+ proc_unlock(pt);
+ if (error)
+ break;
+
+ error = csops_copy_token(start, length, usize, uaddr);
+ break;
+ }
+ case CS_OPS_MARKRESTRICT:
+ proc_lock(pt);
+ pt->p_csflags |= CS_RESTRICT;
+ proc_unlock(pt);
+ break;
+
+ case CS_OPS_SET_STATUS: {
+ uint32_t flags;
+
+ if (usize < sizeof(flags)) {
+ error = ERANGE;
+ break;
+ }
+
+ error = copyin(uaddr, &flags, sizeof(flags));
+ if (error)
+ break;
+
+ /* only allow setting a subset of all code sign flags */
+ flags &=
+ CS_HARD | CS_EXEC_SET_HARD |
+ CS_KILL | CS_EXEC_SET_KILL |
+ CS_RESTRICT |
+ CS_REQUIRE_LV |
+ CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT |
+ CS_ENTITLEMENTS_VALIDATED;
+
+ proc_lock(pt);
+ if (pt->p_csflags & CS_VALID)
+ pt->p_csflags |= flags;
+ else
+ error = EINVAL;
+ proc_unlock(pt);
+
+ break;
+ }
+ case CS_OPS_BLOB: {
+ void *start;
+ size_t length;
+
+ proc_lock(pt);
+ if ((pt->p_csflags & CS_VALID) == 0) {
+ proc_unlock(pt);
+ error = EINVAL;
+ break;
+ }
+
+ error = cs_blob_get(pt, &start, &length);
+ proc_unlock(pt);
+ if (error)
+ break;
+
+ error = csops_copy_token(start, length, usize, uaddr);
+ break;
+ }
+ case CS_OPS_IDENTITY: {
+ const char *identity;
+ uint8_t fakeheader[8];
+ uint32_t idlen;
+ size_t length;
+
+ /*
+ * Make identity have a blob header to make it
+ * easier on userland to guess the identity
+ * length.
+ */
+ if (usize < sizeof(fakeheader)) {
+ error = ERANGE;
+ break;
+ }
+ memset(fakeheader, 0, sizeof(fakeheader));
+
+ proc_lock(pt);
+ if ((pt->p_csflags & CS_VALID) == 0) {
+ proc_unlock(pt);
+ error = EINVAL;
+ break;
+ }
+
+ identity = cs_identity_get(pt);
+ proc_unlock(pt);
+ if (identity == NULL) {
+ error = ENOENT;
+ break;
+ }
+
+ length = strlen(identity) + 1; /* include NUL */
+ idlen = htonl(length + sizeof(fakeheader));
+ memcpy(&fakeheader[4], &idlen, sizeof(idlen));
+
+ error = copyout(fakeheader, uaddr, sizeof(fakeheader));
+ if (error)
+ break;
+
+ if (usize < sizeof(fakeheader) + length)
+ error = ERANGE;
+ else if (usize > sizeof(fakeheader))
+ error = copyout(identity, uaddr + sizeof(fakeheader), length);
+
+ break;
+ }
+
+ case CS_OPS_SIGPUP_INSTALL:
+ error = sigpup_install(uaddr);
+ break;
+
+ case CS_OPS_SIGPUP_DROP:
+ error = sigpup_drop();
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+out:
+ proc_rele(pt);
+ return(error);
+}
+
+int
+proc_iterate(flags, callout, arg, filterfn, filterarg)
+ int flags;
+ int (*callout)(proc_t, void *);
+ void * arg;
+ int (*filterfn)(proc_t, void *);
+ void * filterarg;
+{
+ proc_t p;
+ pid_t * pid_list;
+ int count, pidcount, alloc_count, i, retval;
+
+ count = nprocs+ 10;
+ if (count > hard_maxproc)
+ count = hard_maxproc;
+ alloc_count = count * sizeof(pid_t);
+ pid_list = (pid_t *)kalloc(alloc_count);
+ bzero(pid_list, alloc_count);
+
+
+ proc_list_lock();
+
+
+ pidcount = 0;
+ if (flags & PROC_ALLPROCLIST) {
+ for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
+ if (p->p_stat == SIDL)
+ continue;
+ if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
+ pid_list[pidcount] = p->p_pid;
+ pidcount++;
+ if (pidcount >= count)
+ break;
+ }
+ }
+ }
+ if ((pidcount < count ) && (flags & PROC_ZOMBPROCLIST)) {
+ for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
+ if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
+ pid_list[pidcount] = p->p_pid;
+ pidcount++;
+ if (pidcount >= count)
+ break;
+ }
+ }
+ }
+
+
+ proc_list_unlock();
+
+
+ for (i = 0; i< pidcount; i++) {
+ p = proc_find(pid_list[i]);
+ if (p) {
+ if ((flags & PROC_NOWAITTRANS) == 0)
+ proc_transwait(p, 0);
+ retval = callout(p, arg);
+
+ switch (retval) {
+ case PROC_RETURNED:
+ proc_rele(p);
+ break;
+ case PROC_RETURNED_DONE:
+ proc_rele(p);
+ goto out;
+ case PROC_CLAIMED_DONE:
+ goto out;
+ case PROC_CLAIMED:
+ default:
+ break;
+ }
+ } else if (flags & PROC_ZOMBPROCLIST) {
+ p = proc_find_zombref(pid_list[i]);
+ if (p != PROC_NULL) {
+ retval = callout(p, arg);
+
+ switch (retval) {
+ case PROC_RETURNED:
+ proc_drop_zombref(p);
+ break;
+ case PROC_RETURNED_DONE:
+ proc_drop_zombref(p);
+ goto out;
+ case PROC_CLAIMED_DONE:
+ goto out;
+ case PROC_CLAIMED:
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+out:
+ kfree(pid_list, alloc_count);
+ return(0);
+
+}
+
+
+#if 0
+/* This is for iteration in case of trivial non blocking callouts */
+int
+proc_scanall(flags, callout, arg)
+ int flags;
+ int (*callout)(proc_t, void *);
+ void * arg;
+{
+ proc_t p;
+ int retval;
+
+
+ proc_list_lock();
+
+
+ if (flags & PROC_ALLPROCLIST) {
+ for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
+ retval = callout(p, arg);
+ if (retval == PROC_RETURNED_DONE)
+ goto out;
+ }
+ }
+ if (flags & PROC_ZOMBPROCLIST) {
+ for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
+ retval = callout(p, arg);
+ if (retval == PROC_RETURNED_DONE)
+ goto out;
+ }
+ }
+out:
+
+ proc_list_unlock();
+
+ return(0);
+}
+#endif
+
+
+int
+proc_rebootscan(callout, arg, filterfn, filterarg)
+ int (*callout)(proc_t, void *);
+ void * arg;
+ int (*filterfn)(proc_t, void *);
+ void * filterarg;
+{
+ proc_t p;
+ int lockheld = 0, retval;
+
+ proc_shutdown_exitcount = 0;
+
+ps_allprocscan:
+
+ proc_list_lock();
+
+ lockheld = 1;
+
+ for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
+ if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
+ p = proc_ref_locked(p);
+
+ proc_list_unlock();
+ lockheld = 0;
+
+ if (p) {
+ proc_transwait(p, 0);
+ retval = callout(p, arg);
+ proc_rele(p);
+
+ switch (retval) {
+ case PROC_RETURNED_DONE:
+ case PROC_CLAIMED_DONE:
+ goto out;
+ }
+ }
+ goto ps_allprocscan;
+ } /* filter pass */
+ } /* allproc walk thru */
+
+ if (lockheld == 1) {
+ proc_list_unlock();
+ lockheld = 0;
+ }
+
+out:
+ return(0);
+
+}
+
+
+int
+proc_childrenwalk(parent, callout, arg)
+ struct proc * parent;
+ int (*callout)(proc_t, void *);
+ void * arg;
+{
+ register struct proc *p;
+ pid_t * pid_list;
+ int count, pidcount, alloc_count, i, retval;
+
+ count = nprocs+ 10;
+ if (count > hard_maxproc)
+ count = hard_maxproc;
+ alloc_count = count * sizeof(pid_t);
+ pid_list = (pid_t *)kalloc(alloc_count);
+ bzero(pid_list, alloc_count);
+
+
+ proc_list_lock();
+
+
+ pidcount = 0;
+ for (p = parent->p_children.lh_first; (p != 0); p = p->p_sibling.le_next) {
+ if (p->p_stat == SIDL)
+ continue;
+ pid_list[pidcount] = p->p_pid;
+ pidcount++;
+ if (pidcount >= count)
+ break;
+ }
+ proc_list_unlock();
+
+
+ for (i = 0; i< pidcount; i++) {
+ p = proc_find(pid_list[i]);
+ if (p) {
+ proc_transwait(p, 0);
+ retval = callout(p, arg);
+
+ switch (retval) {
+ case PROC_RETURNED:
+ case PROC_RETURNED_DONE:
+ proc_rele(p);
+ if (retval == PROC_RETURNED_DONE) {
+ goto out;
+ }
+ break;
+
+ case PROC_CLAIMED_DONE:
+ goto out;
+ case PROC_CLAIMED:
+ default:
+ break;
+ }
+ }
+ }
+
+out:
+ kfree(pid_list, alloc_count);
+ return(0);
+
+}
+
+/*
+ */
+/* PGRP_BLOCKITERATE is not implemented yet */
+int
+pgrp_iterate(pgrp, flags, callout, arg, filterfn, filterarg)
+ struct pgrp *pgrp;
+ int flags;
+ int (*callout)(proc_t, void *);
+ void * arg;
+ int (*filterfn)(proc_t, void *);
+ void * filterarg;
+{
+ proc_t p;
+ pid_t * pid_list;
+ int count, pidcount, i, alloc_count;
+ int retval;
+ pid_t pgid;
+ int dropref = flags & PGRP_DROPREF;
+#if 0
+ int serialize = flags & PGRP_BLOCKITERATE;
+#else
+ int serialize = 0;
+#endif
+
+ if (pgrp == 0)
+ return(0);
+ count = pgrp->pg_membercnt + 10;
+ if (count > hard_maxproc)
+ count = hard_maxproc;
+ alloc_count = count * sizeof(pid_t);
+ pid_list = (pid_t *)kalloc(alloc_count);
+ bzero(pid_list, alloc_count);
+
+ pgrp_lock(pgrp);
+ if (serialize != 0) {
+ while ((pgrp->pg_listflags & PGRP_FLAG_ITERABEGIN) == PGRP_FLAG_ITERABEGIN) {
+ pgrp->pg_listflags |= PGRP_FLAG_ITERWAIT;
+ msleep(&pgrp->pg_listflags, &pgrp->pg_mlock, 0, "pgrp_iterate", 0);
+ }
+ pgrp->pg_listflags |= PGRP_FLAG_ITERABEGIN;
+ }
+
+ pgid = pgrp->pg_id;
+
+ pidcount = 0;
+ for (p = pgrp->pg_members.lh_first; p != 0;
+ p = p->p_pglist.le_next) {
+ if ( (filterfn == 0 ) || (filterfn(p, filterarg) != 0)) {
+ pid_list[pidcount] = p->p_pid;
+ pidcount++;
+ if (pidcount >= count)
+ break;
+ }
+ }
+
+
+ pgrp_unlock(pgrp);
+ if ((serialize == 0) && (dropref != 0))
+ pg_rele(pgrp);
+
+
+ for (i = 0; i< pidcount; i++) {
+ /* No handling or proc0 */
+ if (pid_list[i] == 0)
+ continue;
+ p = proc_find(pid_list[i]);
+ if (p) {
+ if (p->p_pgrpid != pgid) {
+ proc_rele(p);
+ continue;
+ }
+ proc_transwait(p, 0);
+ retval = callout(p, arg);
+
+ switch (retval) {
+ case PROC_RETURNED:
+ case PROC_RETURNED_DONE:
+ proc_rele(p);
+ if (retval == PROC_RETURNED_DONE) {
+ goto out;
+ }
+ break;
+
+ case PROC_CLAIMED_DONE:
+ goto out;
+ case PROC_CLAIMED:
+ default:
+ break;
+ }
+ }
+ }
+out:
+ if (serialize != 0) {
+ pgrp_lock(pgrp);
+ pgrp->pg_listflags &= ~PGRP_FLAG_ITERABEGIN;
+ if ((pgrp->pg_listflags & PGRP_FLAG_ITERWAIT) == PGRP_FLAG_ITERWAIT) {
+ pgrp->pg_listflags &= ~PGRP_FLAG_ITERWAIT;
+ wakeup(&pgrp->pg_listflags);
+ }
+ pgrp_unlock(pgrp);
+ if (dropref != 0)
+ pg_rele(pgrp);
+ }
+ kfree(pid_list, alloc_count);
+ return(0);
+}
+
+static void
+pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child)
+{
+ proc_list_lock();
+ child->p_pgrp = pgrp;
+ child->p_pgrpid = pgrp->pg_id;
+ child->p_listflag |= P_LIST_INPGRP;
+ /*
+ * When pgrp is being freed , a process can still
+ * request addition using setpgid from bash when
+ * login is terminated (login cycler) return ESRCH
+ * Safe to hold lock due to refcount on pgrp
+ */
+ if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
+ pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
+ }
+
+ if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
+ panic("pgrp_add : pgrp is dead adding process");
+ proc_list_unlock();
+
+ pgrp_lock(pgrp);
+ pgrp->pg_membercnt++;
+ if ( parent != PROC_NULL) {
+ LIST_INSERT_AFTER(parent, child, p_pglist);
+ }else {
+ LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist);
+ }
+ pgrp_unlock(pgrp);
+
+ proc_list_lock();
+ if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) {
+ pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE;
+ }
+ proc_list_unlock();
+}
+
+static void
+pgrp_remove(struct proc * p)
+{
+ struct pgrp * pg;
+
+ pg = proc_pgrp(p);
+
+ proc_list_lock();
+#if __PROC_INTERNAL_DEBUG
+ if ((p->p_listflag & P_LIST_INPGRP) == 0)
+ panic("removing from pglist but no named ref\n");
+#endif
+ p->p_pgrpid = PGRPID_DEAD;
+ p->p_listflag &= ~P_LIST_INPGRP;
+ p->p_pgrp = NULL;
+ proc_list_unlock();
+
+ if (pg == PGRP_NULL)
+ panic("pgrp_remove: pg is NULL");
+ pgrp_lock(pg);
+ pg->pg_membercnt--;
+
+ if (pg->pg_membercnt < 0)
+ panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p);
+
+ LIST_REMOVE(p, p_pglist);
+ if (pg->pg_members.lh_first == 0) {
+ pgrp_unlock(pg);
+ pgdelete_dropref(pg);
+ } else {
+ pgrp_unlock(pg);
+ pg_rele(pg);
+ }
+}
+
+
+/* cannot use proc_pgrp as it maybe stalled */
+static void
+pgrp_replace(struct proc * p, struct pgrp * newpg)
+{
+ struct pgrp * oldpg;
+
+
+
+ proc_list_lock();
+
+ while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
+ p->p_listflag |= P_LIST_PGRPTRWAIT;
+ (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
+ }
+
+ p->p_listflag |= P_LIST_PGRPTRANS;
+
+ oldpg = p->p_pgrp;
+ if (oldpg == PGRP_NULL)
+ panic("pgrp_replace: oldpg NULL");
+ oldpg->pg_refcount++;
+#if __PROC_INTERNAL_DEBUG
+ if ((p->p_listflag & P_LIST_INPGRP) == 0)
+ panic("removing from pglist but no named ref\n");
+#endif
+ p->p_pgrpid = PGRPID_DEAD;
+ p->p_listflag &= ~P_LIST_INPGRP;
+ p->p_pgrp = NULL;
+
+ proc_list_unlock();
+
+ pgrp_lock(oldpg);
+ oldpg->pg_membercnt--;
+ if (oldpg->pg_membercnt < 0)
+ panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p);
+ LIST_REMOVE(p, p_pglist);
+ if (oldpg->pg_members.lh_first == 0) {
+ pgrp_unlock(oldpg);
+ pgdelete_dropref(oldpg);
+ } else {
+ pgrp_unlock(oldpg);
+ pg_rele(oldpg);
+ }
+
+ proc_list_lock();
+ p->p_pgrp = newpg;
+ p->p_pgrpid = newpg->pg_id;
+ p->p_listflag |= P_LIST_INPGRP;
+ /*
+ * When pgrp is being freed , a process can still
+ * request addition using setpgid from bash when
+ * login is terminated (login cycler) return ESRCH
+ * Safe to hold lock due to refcount on pgrp
+ */
+ if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) {
+ newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
+ }
+
+ if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD)
+ panic("pgrp_add : pgrp is dead adding process");
+ proc_list_unlock();
+
+ pgrp_lock(newpg);
+ newpg->pg_membercnt++;
+ LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist);
+ pgrp_unlock(newpg);
+
+ proc_list_lock();
+ if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) {
+ newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE;
+ }
+
+ p->p_listflag &= ~P_LIST_PGRPTRANS;
+ if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) {
+ p->p_listflag &= ~P_LIST_PGRPTRWAIT;
+ wakeup(&p->p_pgrpid);
+
+ }
+ proc_list_unlock();
+}
+
+void
+pgrp_lock(struct pgrp * pgrp)
+{
+ lck_mtx_lock(&pgrp->pg_mlock);
+}
+
+void
+pgrp_unlock(struct pgrp * pgrp)
+{
+ lck_mtx_unlock(&pgrp->pg_mlock);
+}
+
+void
+session_lock(struct session * sess)
+{
+ lck_mtx_lock(&sess->s_mlock);
+}
+
+
+void
+session_unlock(struct session * sess)
+{
+ lck_mtx_unlock(&sess->s_mlock);
+}
+
+struct pgrp *
+proc_pgrp(proc_t p)
+{
+ struct pgrp * pgrp;
+
+ if (p == PROC_NULL)
+ return(PGRP_NULL);
+ proc_list_lock();
+
+ while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
+ p->p_listflag |= P_LIST_PGRPTRWAIT;
+ (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
+ }
+
+ pgrp = p->p_pgrp;
+
+ assert(pgrp != NULL);
+
+ if (pgrp != PGRP_NULL) {
+ pgrp->pg_refcount++;
+ if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0)
+ panic("proc_pgrp: ref being povided for dead pgrp");
+ }
+
+ proc_list_unlock();
+
+ return(pgrp);
+}
+
+struct pgrp *
+tty_pgrp(struct tty * tp)
+{
+ struct pgrp * pg = PGRP_NULL;
+
+ proc_list_lock();
+ pg = tp->t_pgrp;
+
+ if (pg != PGRP_NULL) {
+ if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0)
+ panic("tty_pgrp: ref being povided for dead pgrp");
+ pg->pg_refcount++;
+ }
+ proc_list_unlock();
+
+ return(pg);
+}
+
+struct session *
+proc_session(proc_t p)
+{
+ struct session * sess = SESSION_NULL;
+
+ if (p == PROC_NULL)
+ return(SESSION_NULL);
+
+ proc_list_lock();
+
+ /* wait during transitions */
+ while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) {
+ p->p_listflag |= P_LIST_PGRPTRWAIT;
+ (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0);
+ }
+
+ if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) {
+ if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
+ panic("proc_session:returning sesssion ref on terminating session");
+ sess->s_count++;
+ }
+ proc_list_unlock();
+ return(sess);
+}
+
+void
+session_rele(struct session *sess)
+{
+ proc_list_lock();
+ if (--sess->s_count == 0) {
+ if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0)
+ panic("session_rele: terminating already terminated session");
+ sess->s_listflags |= S_LIST_TERM;
+ LIST_REMOVE(sess, s_hash);
+ sess->s_listflags |= S_LIST_DEAD;
+ if (sess->s_count != 0)
+ panic("session_rele: freeing session in use");
+ proc_list_unlock();
+#if CONFIG_FINE_LOCK_GROUPS
+ lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp);
+#else
+ lck_mtx_destroy(&sess->s_mlock, proc_lck_grp);
+#endif
+ FREE_ZONE(sess, sizeof(struct session), M_SESSION);
+ } else
+ proc_list_unlock();
+}
+
+int
+proc_transstart(proc_t p, int locked, int non_blocking)
+{
+ if (locked == 0)
+ proc_lock(p);
+ while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
+ if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) {
+ if (locked == 0)
+ proc_unlock(p);
+ return EDEADLK;
+ }
+ p->p_lflag |= P_LTRANSWAIT;
+ msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
+ }
+ p->p_lflag |= P_LINTRANSIT;
+ p->p_transholder = current_thread();
+ if (locked == 0)
+ proc_unlock(p);
+ return 0;
+}
+
+void
+proc_transcommit(proc_t p, int locked)
+{
+ if (locked == 0)
+ proc_lock(p);
+
+ assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT);
+ assert (p->p_transholder == current_thread());
+ p->p_lflag |= P_LTRANSCOMMIT;
+
+ if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
+ p->p_lflag &= ~P_LTRANSWAIT;
+ wakeup(&p->p_lflag);
+ }
+ if (locked == 0)
+ proc_unlock(p);
+}
+
+void
+proc_transend(proc_t p, int locked)
+{
+ if (locked == 0)
+ proc_lock(p);
+
+ p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT);
+ p->p_transholder = NULL;
+
+ if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) {
+ p->p_lflag &= ~P_LTRANSWAIT;
+ wakeup(&p->p_lflag);
+ }
+ if (locked == 0)
+ proc_unlock(p);
+}
+
+int
+proc_transwait(proc_t p, int locked)
+{
+ if (locked == 0)
+ proc_lock(p);
+ while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) {
+ if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) {
+ if (locked == 0)
+ proc_unlock(p);
+ return EDEADLK;
+ }
+ p->p_lflag |= P_LTRANSWAIT;
+ msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL);
+ }
+ if (locked == 0)
+ proc_unlock(p);
+ return 0;
+}
+
+void
+proc_klist_lock(void)
+{
+ lck_mtx_lock(proc_klist_mlock);
+}
+
+void
+proc_klist_unlock(void)
+{
+ lck_mtx_unlock(proc_klist_mlock);
+}
+
+void
+proc_knote(struct proc * p, long hint)
+{
+ proc_klist_lock();
+ KNOTE(&p->p_klist, hint);
+ proc_klist_unlock();
+}
+
+void
+proc_knote_drain(struct proc *p)
+{
+ struct knote *kn = NULL;
+
+ /*
+ * Clear the proc's klist to avoid references after the proc is reaped.
+ */
+ proc_klist_lock();
+ while ((kn = SLIST_FIRST(&p->p_klist))) {
+ kn->kn_ptr.p_proc = PROC_NULL;
+ KNOTE_DETACH(&p->p_klist, kn);
+ }
+ proc_klist_unlock();
+}
+
+void
+proc_setregister(proc_t p)
+{
+ proc_lock(p);
+ p->p_lflag |= P_LREGISTER;
+ proc_unlock(p);
+}
+
+void
+proc_resetregister(proc_t p)
+{
+ proc_lock(p);
+ p->p_lflag &= ~P_LREGISTER;
+ proc_unlock(p);
+}
+
+pid_t
+proc_pgrpid(proc_t p)
+{
+ return p->p_pgrpid;
+}
+
+pid_t
+proc_selfpgrpid()
+{
+ return current_proc()->p_pgrpid;
+}
+
+
+/* return control and action states */
+int
+proc_getpcontrol(int pid, int * pcontrolp)
+{
+ proc_t p;
+
+ p = proc_find(pid);
+ if (p == PROC_NULL)
+ return(ESRCH);
+ if (pcontrolp != NULL)
+ *pcontrolp = p->p_pcaction;
+
+ proc_rele(p);
+ return(0);
+}
+
+int
+proc_dopcontrol(proc_t p)
+{
+ int pcontrol;
+
+ proc_lock(p);
+
+ pcontrol = PROC_CONTROL_STATE(p);
+
+ if (PROC_ACTION_STATE(p) == 0) {
+ switch(pcontrol) {
+ case P_PCTHROTTLE:
+ PROC_SETACTION_STATE(p);
+ proc_unlock(p);
+ printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm);
+ break;
+
+ case P_PCSUSP:
+ PROC_SETACTION_STATE(p);
+ proc_unlock(p);
+ printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm);
+ task_suspend(p->task);
+ break;
+
+ case P_PCKILL:
+ PROC_SETACTION_STATE(p);
+ proc_unlock(p);
+ printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
+ psignal(p, SIGKILL);
+ break;
+
+ default:
+ proc_unlock(p);
+ }
+
+ } else
+ proc_unlock(p);
+
+ return(PROC_RETURNED);
+}
+
+
+/*
+ * Resume a throttled or suspended process. This is an internal interface that's only
+ * used by the user level code that presents the GUI when we run out of swap space and
+ * hence is restricted to processes with superuser privileges.
+ */
+
+int
+proc_resetpcontrol(int pid)
+{
+ proc_t p;
+ int pcontrol;
+ int error;
+ proc_t self = current_proc();
+
+ /* if the process has been validated to handle resource control or root is valid one */
+ if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0)))
+ return error;
+
+ p = proc_find(pid);
+ if (p == PROC_NULL)
+ return(ESRCH);
+
+ proc_lock(p);
+
+ pcontrol = PROC_CONTROL_STATE(p);
+
+ if(PROC_ACTION_STATE(p) !=0) {
+ switch(pcontrol) {
+ case P_PCTHROTTLE:
+ PROC_RESETACTION_STATE(p);
+ proc_unlock(p);
+ printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm);
+ break;
+
+ case P_PCSUSP:
+ PROC_RESETACTION_STATE(p);
+ proc_unlock(p);
+ printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm);
+ task_resume(p->task);
+ break;
+
+ case P_PCKILL:
+ /* Huh? */
+ PROC_SETACTION_STATE(p);
+ proc_unlock(p);
+ printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm);
+ break;
+
+ default:
+ proc_unlock(p);
+ }
+
+ } else
+ proc_unlock(p);
+
+ proc_rele(p);
+ return(0);
+}
+
+
+
+struct no_paging_space
+{
+ uint64_t pcs_max_size;
+ uint64_t pcs_uniqueid;
+ int pcs_pid;
+ int pcs_proc_count;
+ uint64_t pcs_total_size;
+
+ uint64_t npcs_max_size;
+ uint64_t npcs_uniqueid;
+ int npcs_pid;
+ int npcs_proc_count;
+ uint64_t npcs_total_size;
+
+ int apcs_proc_count;
+ uint64_t apcs_total_size;
+};
+
+
+static int
+proc_pcontrol_filter(proc_t p, void *arg)
+{
+ struct no_paging_space *nps;
+ uint64_t compressed;
+
+ nps = (struct no_paging_space *)arg;
+
+ compressed = get_task_compressed(p->task);
+
+ if (PROC_CONTROL_STATE(p)) {
+ if (PROC_ACTION_STATE(p) == 0) {
+ if (compressed > nps->pcs_max_size) {
+ nps->pcs_pid = p->p_pid;
+ nps->pcs_uniqueid = p->p_uniqueid;
+ nps->pcs_max_size = compressed;
+ }
+ nps->pcs_total_size += compressed;
+ nps->pcs_proc_count++;
+ } else {
+ nps->apcs_total_size += compressed;
+ nps->apcs_proc_count++;
+ }
+ } else {
+ if (compressed > nps->npcs_max_size) {
+ nps->npcs_pid = p->p_pid;
+ nps->npcs_uniqueid = p->p_uniqueid;
+ nps->npcs_max_size = compressed;
+ }
+ nps->npcs_total_size += compressed;
+ nps->npcs_proc_count++;
+
+ }
+ return (0);
+}
+
+
+static int
+proc_pcontrol_null(__unused proc_t p, __unused void *arg)
+{
+ return(PROC_RETURNED);
+}
+
+
+/*
+ * Deal with the low on compressor pool space condition... this function
+ * gets called when we are approaching the limits of the compressor pool or
+ * we are unable to create a new swap file.
+ * Since this eventually creates a memory deadlock situtation, we need to take action to free up
+ * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely.
+ * There are 2 categories of processes to deal with. Those that have an action
+ * associated with them by the task itself and those that do not. Actionable
+ * tasks can have one of three categories specified: ones that
+ * can be killed immediately, ones that should be suspended, and ones that should
+ * be throttled. Processes that do not have an action associated with them are normally
+ * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%)
+ * that only by killing them can we hope to put the system back into a usable state.
+ */
+
+#define NO_PAGING_SPACE_DEBUG 0
+
+extern uint64_t vm_compressor_pages_compressed(void);
+
+struct timeval last_no_space_action = {0, 0};
+
+int
+no_paging_space_action()
+{
+ proc_t p;
+ struct no_paging_space nps;
+ struct timeval now;
+
+ /*
+ * Throttle how often we come through here. Once every 5 seconds should be plenty.
+ */
+ microtime(&now);
+
+ if (now.tv_sec <= last_no_space_action.tv_sec + 5)
+ return (0);
+
+ /*
+ * Examine all processes and find the biggest (biggest is based on the number of pages this
+ * task has in the compressor pool) that has been marked to have some action
+ * taken when swap space runs out... we also find the biggest that hasn't been marked for
+ * action.
+ *
+ * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of
+ * the total number of pages held by the compressor, we go ahead and kill it since no other task
+ * can have any real effect on the situation. Otherwise, we go after the actionable process.
+ */
+ bzero(&nps, sizeof(nps));
+
+ proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps);
+
+#if NO_PAGING_SPACE_DEBUG
+ printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n",
+ nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size);
+ printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n",
+ nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size);
+ printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n",
+ nps.apcs_proc_count, nps.apcs_total_size);
+#endif
+ if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) {
+ /*
+ * for now we'll knock out any task that has more then 50% of the pages
+ * held by the compressor
+ */
+ if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) {
+
+ if (nps.npcs_uniqueid == p->p_uniqueid) {
+ /*
+ * verify this is still the same process
+ * in case the proc exited and the pid got reused while
+ * we were finishing the proc_iterate and getting to this point
+ */
+ last_no_space_action = now;
+
+ printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm);
+ psignal(p, SIGKILL);
+
+ proc_rele(p);
+
+ return (0);
+ }
+
+ proc_rele(p);
+ }
+ }
+
+ if (nps.pcs_max_size > 0) {
+ if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) {
+
+ if (nps.pcs_uniqueid == p->p_uniqueid) {
+ /*
+ * verify this is still the same process
+ * in case the proc exited and the pid got reused while
+ * we were finishing the proc_iterate and getting to this point
+ */
+ last_no_space_action = now;
+
+ proc_dopcontrol(p);
+
+ proc_rele(p);
+
+ return (1);
+ }
+
+ proc_rele(p);
+ }
+ }
+ last_no_space_action = now;
+
+ printf("low swap: unable to find any eligible processes to take action on\n");
+
+ return (0);
+}
+
+int
+proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval)
+{
+ int ret = 0;
+ proc_t target_proc = PROC_NULL;
+ pid_t target_pid = uap->pid;
+ uint64_t target_uniqueid = uap->uniqueid;
+ task_t target_task = NULL;
+
+ if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) {
+ ret = EPERM;
+ goto out;
+ }
+ target_proc = proc_find(target_pid);
+ if (target_proc != PROC_NULL) {
+ if (target_uniqueid != proc_uniqueid(target_proc)) {
+ ret = ENOENT;
+ goto out;
+ }
+
+ target_task = proc_task(target_proc);
+ if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) {
+ ret = EINVAL;
+ goto out;
+ }
+ } else
+ ret = ENOENT;
+
+out:
+ if (target_proc != PROC_NULL)
+ proc_rele(target_proc);
+ return (ret);
+}
+
+#if VM_SCAN_FOR_SHADOW_CHAIN
+extern int vm_map_shadow_max(vm_map_t map);
+int proc_shadow_max(void);
+int proc_shadow_max(void)
+{
+ int retval, max;
+ proc_t p;
+ task_t task;
+ vm_map_t map;
+
+ max = 0;
+ proc_list_lock();
+ for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) {
+ if (p->p_stat == SIDL)
+ continue;
+ task = p->task;
+ if (task == NULL) {
+ continue;
+ }
+ map = get_task_map(task);
+ if (map == NULL) {
+ continue;
+ }
+ retval = vm_map_shadow_max(map);
+ if (retval > max) {
+ max = retval;
+ }
+ }
+ proc_list_unlock();
+ return max;