+
+void
+proc_childdrainend(proc_t p)
+{
+#if __PROC_INTERNAL_DEBUG
+ if (p->p_childrencnt > 0) {
+ panic("exiting: children stil hanging around\n");
+ }
+#endif
+ p->p_listflag |= P_LIST_CHILDDRAINED;
+ if ((p->p_listflag & (P_LIST_CHILDLKWAIT | P_LIST_CHILDDRWAIT)) != 0) {
+ p->p_listflag &= ~(P_LIST_CHILDLKWAIT | P_LIST_CHILDDRWAIT);
+ wakeup(&p->p_childrencnt);
+ }
+}
+
+void
+proc_checkdeadrefs(__unused proc_t p)
+{
+#if __PROC_INTERNAL_DEBUG
+ if ((p->p_listflag & P_LIST_INHASH) != 0) {
+ panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag);
+ }
+ if (p->p_childrencnt != 0) {
+ panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt);
+ }
+ if (p->p_refcount != 0) {
+ panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount);
+ }
+ if (p->p_parentref != 0) {
+ panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref);
+ }
+#endif
+}
+
+int
+proc_pid(proc_t p)
+{
+ if (p != NULL) {
+ return p->p_pid;
+ }
+ return -1;
+}
+
+int
+proc_ppid(proc_t p)
+{
+ if (p != NULL) {
+ return p->p_ppid;
+ }
+ return -1;
+}
+
+int
+proc_original_ppid(proc_t p)
+{
+ if (p != NULL) {
+ return p->p_original_ppid;
+ }
+ return -1;
+}
+
+int
+proc_selfpid(void)
+{
+ return current_proc()->p_pid;
+}
+
+int
+proc_selfppid(void)
+{
+ return current_proc()->p_ppid;
+}
+
+uint64_t
+proc_selfcsflags(void)
+{
+ return (uint64_t)current_proc()->p_csflags;
+}
+
+int
+proc_csflags(proc_t p, uint64_t *flags)
+{
+ if (p && flags) {
+ *flags = (uint64_t)p->p_csflags;
+ return 0;
+ }
+ return EINVAL;
+}
+
+uint32_t
+proc_platform(proc_t p)
+{
+ if (p != NULL) {
+ return p->p_platform;
+ }
+ return (uint32_t)-1;
+}
+
+uint32_t
+proc_sdk(proc_t p)
+{
+ if (p != NULL) {
+ return p->p_sdk;
+ }
+ return (uint32_t)-1;
+}
+
+#if CONFIG_DTRACE
+static proc_t
+dtrace_current_proc_vforking(void)
+{
+ thread_t th = current_thread();
+ struct uthread *ut = get_bsdthread_info(th);
+
+ if (ut &&
+ ((ut->uu_flag & (UT_VFORK | UT_VFORKING)) == (UT_VFORK | UT_VFORKING))) {
+ /*
+ * Handle the narrow window where we're in the vfork syscall,
+ * but we're not quite ready to claim (in particular, to DTrace)
+ * that we're running as the child.
+ */
+ return get_bsdtask_info(get_threadtask(th));
+ }
+ return current_proc();
+}
+
+int
+dtrace_proc_selfpid(void)
+{
+ return dtrace_current_proc_vforking()->p_pid;
+}
+
+int
+dtrace_proc_selfppid(void)
+{
+ return dtrace_current_proc_vforking()->p_ppid;
+}
+
+uid_t
+dtrace_proc_selfruid(void)
+{
+ return dtrace_current_proc_vforking()->p_ruid;
+}
+#endif /* CONFIG_DTRACE */
+
+proc_t
+proc_parent(proc_t p)
+{
+ proc_t parent;
+ proc_t pp;
+
+ proc_list_lock();
+loop:
+ pp = p->p_pptr;
+ parent = proc_ref_locked(pp);
+ if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED) == 0)) {
+ pp->p_listflag |= P_LIST_CHILDLKWAIT;
+ msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0);
+ goto loop;
+ }
+ proc_list_unlock();
+ return parent;
+}
+
+static boolean_t
+proc_parent_is_currentproc(proc_t p)
+{
+ boolean_t ret = FALSE;
+
+ proc_list_lock();
+ if (p->p_pptr == current_proc()) {
+ ret = TRUE;
+ }
+
+ proc_list_unlock();
+ return ret;
+}
+
+void
+proc_name(int pid, char * buf, int size)
+{
+ proc_t p;
+
+ if (size <= 0) {
+ return;
+ }
+
+ bzero(buf, size);
+
+ if ((p = proc_find(pid)) != PROC_NULL) {
+ strlcpy(buf, &p->p_comm[0], size);
+ proc_rele(p);
+ }
+}
+
+void
+proc_name_kdp(task_t t, char * buf, int size)
+{
+ proc_t p = get_bsdtask_info(t);
+ if (p == PROC_NULL) {
+ return;
+ }
+
+ if ((size_t)size > sizeof(p->p_comm)) {
+ strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size));
+ } else {
+ strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size));
+ }
+}
+
+boolean_t
+proc_binary_uuid_kdp(task_t task, uuid_t uuid)
+{
+ proc_t p = get_bsdtask_info(task);
+ if (p == PROC_NULL) {
+ return FALSE;
+ }
+
+ proc_getexecutableuuid(p, uuid, sizeof(uuid_t));
+
+ return TRUE;
+}
+
+int
+proc_threadname_kdp(void * uth, char * buf, size_t size)
+{
+ if (size < MAXTHREADNAMESIZE) {
+ /* this is really just a protective measure for the future in
+ * case the thread name size in stackshot gets out of sync with
+ * the BSD max thread name size. Note that bsd_getthreadname
+ * doesn't take input buffer size into account. */
+ return -1;
+ }
+
+ if (uth != NULL) {
+ bsd_getthreadname(uth, buf);
+ }
+ return 0;
+}
+
+
+/* note that this function is generally going to be called from stackshot,
+ * and the arguments will be coming from a struct which is declared packed
+ * thus the input arguments will in general be unaligned. We have to handle
+ * that here. */
+void
+proc_starttime_kdp(void *p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime)
+{
+ proc_t pp = (proc_t)p;
+ if (pp != PROC_NULL) {
+ if (tv_sec != NULL) {
+ *tv_sec = pp->p_start.tv_sec;
+ }
+ if (tv_usec != NULL) {
+ *tv_usec = pp->p_start.tv_usec;
+ }
+ if (abstime != NULL) {
+ if (pp->p_stats != NULL) {
+ *abstime = pp->p_stats->ps_start;
+ } else {
+ *abstime = 0;
+ }
+ }
+ }
+}
+
+char *
+proc_name_address(void *p)
+{
+ return &((proc_t)p)->p_comm[0];
+}
+
+char *
+proc_best_name(proc_t p)
+{
+ if (p->p_name[0] != 0) {
+ return &p->p_name[0];
+ }
+ return &p->p_comm[0];
+}
+
+void
+proc_selfname(char * buf, int size)
+{
+ proc_t p;
+
+ if ((p = current_proc()) != (proc_t)0) {
+ strlcpy(buf, &p->p_comm[0], size);
+ }
+}
+
+void
+proc_signal(int pid, int signum)
+{
+ proc_t p;
+
+ if ((p = proc_find(pid)) != PROC_NULL) {
+ psignal(p, signum);
+ proc_rele(p);
+ }
+}
+
+int
+proc_issignal(int pid, sigset_t mask)
+{
+ proc_t p;
+ int error = 0;
+
+ if ((p = proc_find(pid)) != PROC_NULL) {
+ error = proc_pendingsignals(p, mask);
+ proc_rele(p);
+ }
+
+ return error;
+}
+
+int
+proc_noremotehang(proc_t p)
+{
+ int retval = 0;
+
+ if (p) {
+ retval = p->p_flag & P_NOREMOTEHANG;
+ }
+ return retval? 1: 0;
+}
+
+int
+proc_exiting(proc_t p)
+{
+ int retval = 0;
+
+ if (p) {
+ retval = p->p_lflag & P_LEXIT;
+ }
+ return retval? 1: 0;
+}
+
+int
+proc_in_teardown(proc_t p)
+{
+ int retval = 0;
+
+ if (p) {
+ retval = p->p_lflag & P_LPEXIT;
+ }
+ return retval? 1: 0;
+}
+
+int
+proc_forcequota(proc_t p)
+{
+ int retval = 0;
+
+ if (p) {
+ retval = p->p_flag & P_FORCEQUOTA;
+ }
+ return retval? 1: 0;
+}
+
+int
+proc_suser(proc_t p)
+{
+ kauth_cred_t my_cred;
+ int error;
+
+ my_cred = kauth_cred_proc_ref(p);
+ error = suser(my_cred, &p->p_acflag);
+ kauth_cred_unref(&my_cred);
+ return error;
+}
+
+task_t
+proc_task(proc_t proc)
+{
+ return (task_t)proc->task;
+}
+
+/*
+ * Obtain the first thread in a process
+ *
+ * XXX This is a bad thing to do; it exists predominantly to support the
+ * XXX use of proc_t's in places that should really be using
+ * XXX thread_t's instead. This maintains historical behaviour, but really
+ * XXX needs an audit of the context (proxy vs. not) to clean up.
+ */
+thread_t
+proc_thread(proc_t proc)
+{
+ uthread_t uth = TAILQ_FIRST(&proc->p_uthlist);
+
+ if (uth != NULL) {
+ return uth->uu_context.vc_thread;
+ }
+
+ return NULL;
+}
+
+kauth_cred_t
+proc_ucred(proc_t p)
+{
+ return p->p_ucred;
+}
+
+struct uthread *
+current_uthread()
+{
+ thread_t th = current_thread();
+
+ return (struct uthread *)get_bsdthread_info(th);
+}
+
+
+int
+proc_is64bit(proc_t p)
+{
+ return IS_64BIT_PROCESS(p);
+}
+
+int
+proc_is64bit_data(proc_t p)
+{
+ assert(p->task);
+ return (int)task_get_64bit_data(p->task);
+}
+
+int
+proc_pidversion(proc_t p)
+{
+ return p->p_idversion;
+}
+
+uint32_t
+proc_persona_id(proc_t p)
+{
+ return (uint32_t)persona_id_from_proc(p);
+}
+
+uint32_t
+proc_getuid(proc_t p)
+{
+ return p->p_uid;
+}
+
+uint32_t
+proc_getgid(proc_t p)
+{
+ return p->p_gid;
+}
+
+uint64_t
+proc_uniqueid(proc_t p)
+{
+ return p->p_uniqueid;
+}
+
+uint64_t
+proc_puniqueid(proc_t p)
+{
+ return p->p_puniqueid;
+}
+
+void
+proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES])
+{
+#if CONFIG_COALITIONS
+ task_coalition_ids(p->task, ids);
+#else
+ memset(ids, 0, sizeof(uint64_t[COALITION_NUM_TYPES]));
+#endif
+ return;
+}
+
+uint64_t
+proc_was_throttled(proc_t p)
+{
+ return p->was_throttled;
+}
+
+uint64_t
+proc_did_throttle(proc_t p)
+{
+ return p->did_throttle;
+}
+
+int
+proc_getcdhash(proc_t p, unsigned char *cdhash)
+{
+ return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash);
+}
+
+int
+proc_exitstatus(proc_t p)
+{
+ return p->p_xstat & 0xffff;
+}
+
+void
+proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size)
+{
+ if (size >= sizeof(p->p_uuid)) {
+ memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid));
+ }
+}
+
+/* Return vnode for executable with an iocount. Must be released with vnode_put() */
+vnode_t
+proc_getexecutablevnode(proc_t p)
+{
+ vnode_t tvp = p->p_textvp;
+
+ if (tvp != NULLVP) {
+ if (vnode_getwithref(tvp) == 0) {
+ return tvp;
+ }
+ }
+
+ return NULLVP;
+}
+
+int
+proc_gettty(proc_t p, vnode_t *vp)
+{
+ if (!p || !vp) {
+ return EINVAL;
+ }
+
+ struct session *procsp = proc_session(p);
+ int err = EINVAL;
+
+ if (procsp != SESSION_NULL) {
+ session_lock(procsp);
+ vnode_t ttyvp = procsp->s_ttyvp;
+ int ttyvid = procsp->s_ttyvid;
+ session_unlock(procsp);
+
+ if (ttyvp) {
+ if (vnode_getwithvid(ttyvp, ttyvid) == 0) {
+ *vp = ttyvp;
+ err = 0;
+ }
+ } else {
+ err = ENOENT;
+ }
+
+ session_rele(procsp);
+ }
+
+ return err;
+}
+
+int
+proc_gettty_dev(proc_t p, dev_t *dev)
+{
+ struct session *procsp = proc_session(p);
+ boolean_t has_tty = FALSE;
+
+ if (procsp != SESSION_NULL) {
+ session_lock(procsp);
+
+ struct tty * tp = SESSION_TP(procsp);
+ if (tp != TTY_NULL) {
+ *dev = tp->t_dev;
+ has_tty = TRUE;
+ }
+
+ session_unlock(procsp);
+ session_rele(procsp);
+ }
+
+ if (has_tty) {
+ return 0;
+ } else {
+ return EINVAL;
+ }
+}
+
+int
+proc_selfexecutableargs(uint8_t *buf, size_t *buflen)
+{
+ proc_t p = current_proc();
+
+ // buflen must always be provided
+ if (buflen == NULL) {
+ return EINVAL;
+ }
+
+ // If a buf is provided, there must be at least enough room to fit argc
+ if (buf && *buflen < sizeof(p->p_argc)) {
+ return EINVAL;
+ }
+
+ if (!p->user_stack) {
+ return EINVAL;
+ }
+
+ if (buf == NULL) {
+ *buflen = p->p_argslen + sizeof(p->p_argc);
+ return 0;
+ }
+
+ // Copy in argc to the first 4 bytes
+ memcpy(buf, &p->p_argc, sizeof(p->p_argc));
+
+ if (*buflen > sizeof(p->p_argc) && p->p_argslen > 0) {
+ // See memory layout comment in kern_exec.c:exec_copyout_strings()
+ // We want to copy starting from `p_argslen` bytes away from top of stack
+ return copyin(p->user_stack - p->p_argslen,
+ buf + sizeof(p->p_argc),
+ MIN(p->p_argslen, *buflen - sizeof(p->p_argc)));
+ } else {
+ return 0;
+ }
+}
+
+off_t
+proc_getexecutableoffset(proc_t p)
+{
+ return p->p_textoff;
+}
+
+void
+bsd_set_dependency_capable(task_t task)
+{
+ proc_t p = get_bsdtask_info(task);
+
+ if (p) {
+ OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag);
+ }
+}
+
+
+#ifndef __arm__
+int
+IS_64BIT_PROCESS(proc_t p)
+{
+ if (p && (p->p_flag & P_LP64)) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+#endif
+
+/*
+ * Locate a process by number
+ */
+proc_t
+pfind_locked(pid_t pid)
+{
+ proc_t p;
+#if DEBUG
+ proc_t q;
+#endif
+
+ if (!pid) {
+ return kernproc;
+ }
+
+ for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) {
+ if (p->p_pid == pid) {
+#if DEBUG
+ for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) {
+ if ((p != q) && (q->p_pid == pid)) {
+ panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid);
+ }
+ }
+#endif
+ return p;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Locate a zombie by PID
+ */
+__private_extern__ proc_t
+pzfind(pid_t pid)
+{
+ proc_t p;
+
+
+ proc_list_lock();
+
+ for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) {
+ if (p->p_pid == pid) {
+ break;
+ }
+ }
+
+ proc_list_unlock();
+
+ return p;
+}
+
+/*
+ * Locate a process group by number
+ */
+
+struct pgrp *
+pgfind(pid_t pgid)
+{
+ struct pgrp * pgrp;
+
+ proc_list_lock();
+ pgrp = pgfind_internal(pgid);
+ if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0)) {
+ pgrp = PGRP_NULL;
+ } else {
+ pgrp->pg_refcount++;
+ }
+ proc_list_unlock();
+ return pgrp;
+}
+
+
+
+struct pgrp *
+pgfind_internal(pid_t pgid)
+{
+ struct pgrp *pgrp;
+
+ for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next) {
+ if (pgrp->pg_id == pgid) {
+ return pgrp;
+ }
+ }
+ return NULL;
+}
+
+void
+pg_rele(struct pgrp * pgrp)
+{
+ if (pgrp == PGRP_NULL) {
+ return;
+ }
+ pg_rele_dropref(pgrp);
+}
+
+void
+pg_rele_dropref(struct pgrp * pgrp)
+{
+ proc_list_lock();
+ if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) {
+ proc_list_unlock();
+ pgdelete_dropref(pgrp);
+ return;
+ }
+
+ pgrp->pg_refcount--;
+ proc_list_unlock();
+}
+
+struct session *
+session_find_internal(pid_t sessid)
+{
+ struct session *sess;
+
+ for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next) {
+ if (sess->s_sid == sessid) {
+ return sess;
+ }
+ }
+ return NULL;
+}
+
+
+/*
+ * Make a new process ready to become a useful member of society by making it
+ * visible in all the right places and initialize its own lists to empty.
+ *
+ * Parameters: parent The parent of the process to insert
+ * child The child process to insert
+ *
+ * Returns: (void)
+ *
+ * Notes: Insert a child process into the parents process group, assign
+ * the child the parent process pointer and PPID of the parent,
+ * place it on the parents p_children list as a sibling,
+ * initialize its own child list, place it in the allproc list,
+ * insert it in the proper hash bucket, and initialize its
+ * event list.
+ */
+void
+pinsertchild(proc_t parent, proc_t child)
+{
+ struct pgrp * pg;
+
+ LIST_INIT(&child->p_children);
+ TAILQ_INIT(&child->p_evlist);
+ child->p_pptr = parent;
+ child->p_ppid = parent->p_pid;
+ child->p_original_ppid = parent->p_pid;
+ child->p_puniqueid = parent->p_uniqueid;
+ child->p_xhighbits = 0;
+
+ pg = proc_pgrp(parent);
+ pgrp_add(pg, parent, child);
+ pg_rele(pg);
+
+ proc_list_lock();
+
+#if CONFIG_MEMORYSTATUS
+ memorystatus_add(child, TRUE);
+#endif
+
+ parent->p_childrencnt++;
+ LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
+
+ LIST_INSERT_HEAD(&allproc, child, p_list);
+ /* mark the completion of proc creation */
+ child->p_listflag &= ~P_LIST_INCREATE;
+
+ proc_list_unlock();
+}
+
+/*
+ * Move p to a new or existing process group (and session)
+ *
+ * Returns: 0 Success
+ * ESRCH No such process
+ */
+int
+enterpgrp(proc_t p, pid_t pgid, int mksess)
+{
+ struct pgrp *pgrp;
+ struct pgrp *mypgrp;
+ struct session * procsp;
+
+ pgrp = pgfind(pgid);
+ mypgrp = proc_pgrp(p);
+ procsp = proc_session(p);
+
+#if DIAGNOSTIC
+ if (pgrp != NULL && mksess) { /* firewalls */
+ panic("enterpgrp: setsid into non-empty pgrp");
+ }
+ if (SESS_LEADER(p, procsp)) {
+ panic("enterpgrp: session leader attempted setpgrp");
+ }
+#endif
+ if (pgrp == PGRP_NULL) {
+ pid_t savepid = p->p_pid;
+ proc_t np = PROC_NULL;
+ /*
+ * new process group
+ */
+#if DIAGNOSTIC
+ if (p->p_pid != pgid) {
+ panic("enterpgrp: new pgrp and pid != pgid");
+ }
+#endif
+ MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
+ M_WAITOK);
+ if (pgrp == NULL) {
+ panic("enterpgrp: M_PGRP zone depleted");
+ }
+ if ((np = proc_find(savepid)) == NULL || np != p) {
+ if (np != PROC_NULL) {
+ proc_rele(np);
+ }
+ if (mypgrp != PGRP_NULL) {
+ pg_rele(mypgrp);
+ }
+ if (procsp != SESSION_NULL) {
+ session_rele(procsp);
+ }
+ FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP);
+ return ESRCH;
+ }
+ proc_rele(np);
+ if (mksess) {
+ struct session *sess;
+
+ /*
+ * new session
+ */
+ MALLOC_ZONE(sess, struct session *,
+ sizeof(struct session), M_SESSION, M_WAITOK);
+ if (sess == NULL) {
+ panic("enterpgrp: M_SESSION zone depleted");
+ }
+ sess->s_leader = p;
+ sess->s_sid = p->p_pid;
+ sess->s_count = 1;
+ sess->s_ttyvp = NULL;
+ sess->s_ttyp = TTY_NULL;
+ sess->s_flags = 0;
+ sess->s_listflags = 0;
+ sess->s_ttypgrpid = NO_PID;
+
+ lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr);
+
+ bcopy(procsp->s_login, sess->s_login,
+ sizeof(sess->s_login));
+ OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
+ proc_list_lock();
+ LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash);
+ proc_list_unlock();
+ pgrp->pg_session = sess;
+#if DIAGNOSTIC
+ if (p != current_proc()) {
+ panic("enterpgrp: mksession and p != curproc");
+ }
+#endif
+ } else {
+ proc_list_lock();
+ pgrp->pg_session = procsp;
+
+ if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
+ panic("enterpgrp: providing ref to terminating session ");
+ }
+ pgrp->pg_session->s_count++;
+ proc_list_unlock();
+ }
+ pgrp->pg_id = pgid;
+
+ lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr);
+
+ LIST_INIT(&pgrp->pg_members);
+ pgrp->pg_membercnt = 0;
+ pgrp->pg_jobc = 0;
+ proc_list_lock();
+ pgrp->pg_refcount = 1;
+ pgrp->pg_listflags = 0;
+ LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
+ proc_list_unlock();
+ } else if (pgrp == mypgrp) {
+ pg_rele(pgrp);
+ if (mypgrp != NULL) {
+ pg_rele(mypgrp);
+ }
+ if (procsp != SESSION_NULL) {
+ session_rele(procsp);
+ }
+ return 0;
+ }
+
+ if (procsp != SESSION_NULL) {
+ session_rele(procsp);
+ }
+ /*
+ * Adjust eligibility of affected pgrps to participate in job control.
+ * Increment eligibility counts before decrementing, otherwise we
+ * could reach 0 spuriously during the first call.
+ */
+ fixjobc(p, pgrp, 1);
+ fixjobc(p, mypgrp, 0);
+
+ if (mypgrp != PGRP_NULL) {
+ pg_rele(mypgrp);
+ }
+ pgrp_replace(p, pgrp);
+ pg_rele(pgrp);
+
+ return 0;
+}
+
+/*
+ * remove process from process group
+ */
+int
+leavepgrp(proc_t p)
+{
+ pgrp_remove(p);
+ return 0;
+}
+
+/*
+ * delete a process group
+ */
+static void
+pgdelete_dropref(struct pgrp *pgrp)
+{
+ struct tty *ttyp;
+ int emptypgrp = 1;
+ struct session *sessp;
+
+
+ pgrp_lock(pgrp);
+ if (pgrp->pg_membercnt != 0) {
+ emptypgrp = 0;
+ }
+ pgrp_unlock(pgrp);
+
+ proc_list_lock();
+ pgrp->pg_refcount--;
+ if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) {
+ proc_list_unlock();
+ return;
+ }
+
+ pgrp->pg_listflags |= PGRP_FLAG_TERMINATE;
+
+ if (pgrp->pg_refcount > 0) {
+ proc_list_unlock();
+ return;
+ }
+
+ pgrp->pg_listflags |= PGRP_FLAG_DEAD;
+ LIST_REMOVE(pgrp, pg_hash);
+
+ proc_list_unlock();
+
+ ttyp = SESSION_TP(pgrp->pg_session);
+ if (ttyp != TTY_NULL) {
+ if (ttyp->t_pgrp == pgrp) {
+ tty_lock(ttyp);
+ /* Re-check after acquiring the lock */
+ if (ttyp->t_pgrp == pgrp) {
+ ttyp->t_pgrp = NULL;
+ pgrp->pg_session->s_ttypgrpid = NO_PID;
+ }
+ tty_unlock(ttyp);
+ }
+ }
+
+ proc_list_lock();
+
+ sessp = pgrp->pg_session;
+ if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
+ panic("pg_deleteref: manipulating refs of already terminating session");
+ }
+ if (--sessp->s_count == 0) {
+ if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) {
+ panic("pg_deleteref: terminating already terminated session");
+ }
+ sessp->s_listflags |= S_LIST_TERM;
+ ttyp = SESSION_TP(sessp);
+ LIST_REMOVE(sessp, s_hash);
+ proc_list_unlock();
+ if (ttyp != TTY_NULL) {
+ tty_lock(ttyp);
+ if (ttyp->t_session == sessp) {
+ ttyp->t_session = NULL;
+ }
+ tty_unlock(ttyp);
+ }
+ proc_list_lock();
+ sessp->s_listflags |= S_LIST_DEAD;
+ if (sessp->s_count != 0) {
+ panic("pg_deleteref: freeing session in use");
+ }
+ proc_list_unlock();
+ lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp);
+
+ FREE_ZONE(sessp, sizeof(struct session), M_SESSION);
+ } else {
+ proc_list_unlock();
+ }
+ lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp);
+ FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP);
+}
+
+
+/*
+ * Adjust pgrp jobc counters when specified process changes process group.
+ * We count the number of processes in each process group that "qualify"
+ * the group for terminal job control (those with a parent in a different
+ * process group of the same session). If that count reaches zero, the
+ * process group becomes orphaned. Check both the specified process'
+ * process group and that of its children.
+ * entering == 0 => p is leaving specified group.
+ * entering == 1 => p is entering specified group.
+ */
+int
+fixjob_callback(proc_t p, void * arg)
+{
+ struct fixjob_iterargs *fp;
+ struct pgrp * pg, *hispg;
+ struct session * mysession, *hissess;
+ int entering;
+
+ fp = (struct fixjob_iterargs *)arg;
+ pg = fp->pg;
+ mysession = fp->mysession;
+ entering = fp->entering;
+
+ hispg = proc_pgrp(p);
+ hissess = proc_session(p);
+
+ if ((hispg != pg) &&
+ (hissess == mysession)) {
+ pgrp_lock(hispg);
+ if (entering) {
+ hispg->pg_jobc++;
+ pgrp_unlock(hispg);
+ } else if (--hispg->pg_jobc == 0) {
+ pgrp_unlock(hispg);
+ orphanpg(hispg);
+ } else {
+ pgrp_unlock(hispg);
+ }
+ }
+ if (hissess != SESSION_NULL) {
+ session_rele(hissess);
+ }
+ if (hispg != PGRP_NULL) {
+ pg_rele(hispg);
+ }
+
+ return PROC_RETURNED;
+}
+
+void
+fixjobc(proc_t p, struct pgrp *pgrp, int entering)
+{
+ struct pgrp *hispgrp = PGRP_NULL;
+ struct session *hissess = SESSION_NULL;
+ struct session *mysession = pgrp->pg_session;
+ proc_t parent;
+ struct fixjob_iterargs fjarg;
+ boolean_t proc_parent_self;
+
+ /*
+ * Check if p's parent is current proc, if yes then no need to take
+ * a ref; calling proc_parent with current proc as parent may
+ * deadlock if current proc is exiting.
+ */
+ proc_parent_self = proc_parent_is_currentproc(p);
+ if (proc_parent_self) {
+ parent = current_proc();
+ } else {
+ parent = proc_parent(p);
+ }
+
+ if (parent != PROC_NULL) {
+ hispgrp = proc_pgrp(parent);
+ hissess = proc_session(parent);
+ if (!proc_parent_self) {
+ proc_rele(parent);
+ }
+ }
+
+
+ /*
+ * Check p's parent to see whether p qualifies its own process
+ * group; if so, adjust count for p's process group.
+ */
+ if ((hispgrp != pgrp) &&
+ (hissess == mysession)) {
+ pgrp_lock(pgrp);
+ if (entering) {
+ pgrp->pg_jobc++;
+ pgrp_unlock(pgrp);
+ } else if (--pgrp->pg_jobc == 0) {
+ pgrp_unlock(pgrp);
+ orphanpg(pgrp);
+ } else {
+ pgrp_unlock(pgrp);
+ }
+ }
+
+ if (hissess != SESSION_NULL) {
+ session_rele(hissess);
+ }
+ if (hispgrp != PGRP_NULL) {
+ pg_rele(hispgrp);
+ }
+
+ /*
+ * Check this process' children to see whether they qualify
+ * their process groups; if so, adjust counts for children's
+ * process groups.
+ */
+ fjarg.pg = pgrp;
+ fjarg.mysession = mysession;
+ fjarg.entering = entering;
+ proc_childrenwalk(p, fixjob_callback, &fjarg);
+}
+
+/*
+ * The pidlist_* routines support the functions in this file that
+ * walk lists of processes applying filters and callouts to the
+ * elements of the list.
+ *
+ * A prior implementation used a single linear array, which can be
+ * tricky to allocate on large systems. This implementation creates
+ * an SLIST of modestly sized arrays of PIDS_PER_ENTRY elements.
+ *
+ * The array should be sized large enough to keep the overhead of
+ * walking the list low, but small enough that blocking allocations of
+ * pidlist_entry_t structures always succeed.
+ */
+
+#define PIDS_PER_ENTRY 1021
+
+typedef struct pidlist_entry {
+ SLIST_ENTRY(pidlist_entry) pe_link;
+ u_int pe_nused;
+ pid_t pe_pid[PIDS_PER_ENTRY];
+} pidlist_entry_t;
+
+typedef struct {
+ SLIST_HEAD(, pidlist_entry) pl_head;
+ struct pidlist_entry *pl_active;
+ u_int pl_nalloc;
+} pidlist_t;
+
+static __inline__ pidlist_t *
+pidlist_init(pidlist_t *pl)
+{
+ SLIST_INIT(&pl->pl_head);
+ pl->pl_active = NULL;
+ pl->pl_nalloc = 0;
+ return pl;
+}
+
+static u_int
+pidlist_alloc(pidlist_t *pl, u_int needed)
+{
+ while (pl->pl_nalloc < needed) {
+ pidlist_entry_t *pe = kalloc(sizeof(*pe));
+ if (NULL == pe) {
+ panic("no space for pidlist entry");
+ }
+ pe->pe_nused = 0;
+ SLIST_INSERT_HEAD(&pl->pl_head, pe, pe_link);
+ pl->pl_nalloc += (sizeof(pe->pe_pid) / sizeof(pe->pe_pid[0]));
+ }
+ return pl->pl_nalloc;
+}
+
+static void
+pidlist_free(pidlist_t *pl)
+{
+ pidlist_entry_t *pe;
+ while (NULL != (pe = SLIST_FIRST(&pl->pl_head))) {
+ SLIST_FIRST(&pl->pl_head) = SLIST_NEXT(pe, pe_link);
+ kfree(pe, sizeof(*pe));
+ }
+ pl->pl_nalloc = 0;
+}
+
+static __inline__ void
+pidlist_set_active(pidlist_t *pl)
+{
+ pl->pl_active = SLIST_FIRST(&pl->pl_head);
+ assert(pl->pl_active);
+}
+
+static void
+pidlist_add_pid(pidlist_t *pl, pid_t pid)
+{
+ pidlist_entry_t *pe = pl->pl_active;
+ if (pe->pe_nused >= sizeof(pe->pe_pid) / sizeof(pe->pe_pid[0])) {
+ if (NULL == (pe = SLIST_NEXT(pe, pe_link))) {
+ panic("pidlist allocation exhausted");
+ }
+ pl->pl_active = pe;
+ }
+ pe->pe_pid[pe->pe_nused++] = pid;
+}
+
+static __inline__ u_int
+pidlist_nalloc(const pidlist_t *pl)
+{
+ return pl->pl_nalloc;
+}
+
+/*
+ * A process group has become orphaned; if there are any stopped processes in
+ * the group, hang-up all process in that group.
+ */
+static void
+orphanpg(struct pgrp *pgrp)
+{
+ pidlist_t pid_list, *pl = pidlist_init(&pid_list);
+ u_int pid_count_available = 0;
+ proc_t p;
+
+ /* allocate outside of the pgrp_lock */
+ for (;;) {
+ pgrp_lock(pgrp);
+
+ boolean_t should_iterate = FALSE;
+ pid_count_available = 0;
+
+ PGMEMBERS_FOREACH(pgrp, p) {
+ pid_count_available++;
+ if (p->p_stat == SSTOP) {
+ should_iterate = TRUE;
+ }
+ }
+ if (pid_count_available == 0 || !should_iterate) {
+ pgrp_unlock(pgrp);
+ goto out; /* no orphaned processes OR nothing stopped */
+ }
+ if (pidlist_nalloc(pl) >= pid_count_available) {
+ break;
+ }
+ pgrp_unlock(pgrp);
+
+ pidlist_alloc(pl, pid_count_available);
+ }
+ pidlist_set_active(pl);
+
+ u_int pid_count = 0;
+ PGMEMBERS_FOREACH(pgrp, p) {
+ pidlist_add_pid(pl, proc_pid(p));
+ if (++pid_count >= pid_count_available) {
+ break;
+ }
+ }
+ pgrp_unlock(pgrp);
+
+ const pidlist_entry_t *pe;
+ SLIST_FOREACH(pe, &(pl->pl_head), pe_link) {
+ for (u_int i = 0; i < pe->pe_nused; i++) {
+ const pid_t pid = pe->pe_pid[i];
+ if (0 == pid) {
+ continue; /* skip kernproc */
+ }
+ p = proc_find(pid);
+ if (!p) {
+ continue;
+ }
+ proc_transwait(p, 0);
+ pt_setrunnable(p);
+ psignal(p, SIGHUP);
+ psignal(p, SIGCONT);
+ proc_rele(p);
+ }
+ }
+out:
+ pidlist_free(pl);
+}
+
+int
+proc_is_classic(proc_t p __unused)
+{
+ return 0;
+}
+
+/* XXX Why does this function exist? Need to kill it off... */
+proc_t
+current_proc_EXTERNAL(void)
+{
+ return current_proc();
+}
+
+int
+proc_is_forcing_hfs_case_sensitivity(proc_t p)
+{
+ return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0;
+}
+
+#if CONFIG_COREDUMP
+/*
+ * proc_core_name(name, uid, pid)
+ * Expand the name described in corefilename, using name, uid, and pid.
+ * corefilename is a printf-like string, with three format specifiers:
+ * %N name of process ("name")
+ * %P process id (pid)
+ * %U user id (uid)
+ * For example, "%N.core" is the default; they can be disabled completely
+ * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
+ * This is controlled by the sysctl variable kern.corefile (see above).
+ */
+__private_extern__ int
+proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name,
+ size_t cf_name_len)
+{
+ const char *format, *appendstr;
+ char id_buf[11]; /* Buffer for pid/uid -- max 4B */
+ size_t i, l, n;
+
+ if (cf_name == NULL) {
+ goto toolong;
+ }
+
+ format = corefilename;
+ for (i = 0, n = 0; n < cf_name_len && format[i]; i++) {