+
+ /*
+ * Remove proc from allproc queue and from pidhash chain.
+ * Need to do this before we do anything that can block.
+ * Not doing causes things like mount() find this on allproc
+ * in partially cleaned state.
+ */
+
+ proc_list_lock();
+
+ LIST_REMOVE(p, p_list);
+ LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
+ /* will not be visible via proc_find */
+ p->p_listflag |= P_LIST_EXITED;
+
+ proc_list_unlock();
+
+ proc_lock(p);
+ p->p_xstat = rv;
+ p->p_lflag &= ~(P_LTRACED | P_LPPWAIT);
+ p->p_sigignore = ~0;
+ proc_unlock(p);
+
+ proc_spinlock(p);
+ if (thread_call_cancel(p->p_rcall))
+ p->p_ractive--;
+
+ while (p->p_ractive > 0) {
+ proc_spinunlock(p);
+
+ delay(1);
+
+ proc_spinlock(p);
+ }
+ proc_spinunlock(p);
+
+ thread_call_free(p->p_rcall);
+ p->p_rcall = NULL;
+
+ ut->uu_siglist = 0;
+
+ vproc_exit(p);
+}
+
+void
+vproc_exit(proc_t p)
+{
+ proc_t q;
+ proc_t pp;
+
+ vnode_t tvp;
+#ifdef FIXME
+ struct task *task = p->task;
+#endif
+ struct pgrp * pg;
+ struct session *sessp;
+ boolean_t fstate;
+
+ /* XXX Zombie allocation may fail, in which case stats get lost */
+ MALLOC_ZONE(p->p_ru, struct rusage *,
+ sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
+
+
+ proc_refdrain(p);
+
+ /*
+ * Close open files and release open-file table.
+ * This may block!
+ */
+ fdfree(p);
+
+ sessp = proc_session(p);
+ if (SESS_LEADER(p, sessp)) {
+
+ /* Protected by funnel for tty accesses */
+ fstate = thread_funnel_set(kernel_flock, TRUE);
+
+ if (sessp->s_ttyvp != NULLVP) {
+ struct vnode *ttyvp;
+ int ttyvid;
+ struct vfs_context context;
+ struct tty * tp;
+
+ /*
+ * Controlling process.
+ * Signal foreground pgrp,
+ * drain controlling terminal
+ * and revoke access to controlling terminal.
+ */
+ tp = sessp->s_ttyp;
+
+ if ((tp != TTY_NULL) && (tp->t_session == sessp)) {
+ tty_pgsignal(tp, SIGHUP, 1);
+ (void) ttywait(tp);
+ /*
+ * The tty could have been revoked
+ * if we blocked.
+ */
+
+ session_lock(sessp);
+ ttyvp = sessp->s_ttyvp;
+ ttyvid = sessp->s_ttyvid;
+ sessp->s_ttyvp = NULL;
+ sessp->s_ttyvid = 0;
+ sessp->s_ttyp = NULL;
+ sessp->s_ttypgrpid = NO_PID;
+ session_unlock(sessp);
+
+ if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
+ context.vc_thread = proc_thread(p); /* XXX */
+ context.vc_ucred = kauth_cred_proc_ref(p);
+ VNOP_REVOKE(ttyvp, REVOKEALL, &context);
+ vnode_put(ttyvp);
+ kauth_cred_unref(&context.vc_ucred);
+ }
+ } else {
+ session_lock(sessp);
+ ttyvp = sessp->s_ttyvp;
+ sessp->s_ttyvp = NULL;
+ sessp->s_ttyvid = 0;
+ sessp->s_ttyp = NULL;
+ sessp->s_ttypgrpid = NO_PID;
+ session_unlock(sessp);
+ }
+ if (ttyvp)
+ vnode_rele(ttyvp);
+ /*
+ * s_ttyp is not zero'd; we use this to indicate
+ * that the session once had a controlling terminal.
+ * (for logging and informational purposes)
+ */
+ }
+ (void) thread_funnel_set(kernel_flock, fstate);
+
+ session_lock(sessp);
+ sessp->s_leader = NULL;
+ session_unlock(sessp);
+ }
+ session_rele(sessp);
+
+ pg = proc_pgrp(p);
+ fixjobc(p, pg, 0);
+ pg_rele(pg);
+
+ p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+
+ proc_list_lock();
+ proc_childdrainstart(p);
+ while ((q = p->p_children.lh_first) != NULL) {
+ q->p_listflag |= P_LIST_DEADPARENT;
+ if (q->p_stat == SZOMB) {
+ if (p != q->p_pptr)
+ panic("parent child linkage broken");
+ /* check for lookups by zomb sysctl */
+ while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
+ msleep(&q->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
+ }
+ q->p_listflag |= P_LIST_WAITING;
+ /*
+ * This is a named reference and it is not granted
+ * if the reap is already in progress. So we get
+ * the reference here exclusively and their can be
+ * no waiters. So there is no need for a wakeup
+ * after we are done. AlsO the reap frees the structure
+ * and the proc struct cannot be used for wakeups as well.
+ * It is safe to use q here as this is system reap
+ */
+ (void)reap_child_locked(p, q, 1, 1, 0);
+ } else {
+ proc_reparentlocked(q, initproc, 0, 1);
+ /*
+ * Traced processes are killed
+ * since their existence means someone is messing up.
+ */
+ if (q->p_lflag & P_LTRACED) {
+ proc_list_unlock();
+ proc_lock(q);
+ q->p_lflag &= ~P_LTRACED;
+ if (q->sigwait_thread) {
+ proc_unlock(q);
+ /*
+ * The sigwait_thread could be stopped at a
+ * breakpoint. Wake it up to kill.
+ * Need to do this as it could be a thread which is not
+ * the first thread in the task. So any attempts to kill
+ * the process would result into a deadlock on q->sigwait.
+ */
+ thread_resume((thread_t)q->sigwait_thread);
+ clear_wait(q->sigwait_thread, THREAD_INTERRUPTED);
+ threadsignal((thread_t)q->sigwait_thread, SIGKILL, 0);
+ } else
+ proc_unlock(q);
+
+ psignal(q, SIGKILL);
+ proc_list_lock();
+ }
+ }
+ }
+
+ proc_childdrainend(p);
+ proc_list_unlock();
+
+ /*
+ * Release reference to text vnode
+ */
+ tvp = p->p_textvp;
+ p->p_textvp = NULL;
+ if (tvp != NULLVP) {
+ vnode_rele(tvp);
+ }
+
+ /*
+ * Save exit status and final rusage info, adding in child rusage
+ * info and self times. If we were unable to allocate a zombie
+ * structure, this information is lost.
+ */
+ /* No need for locking here as no one than this thread can access this */
+ if (p->p_ru != NULL) {
+ *p->p_ru = p->p_stats->p_ru;
+ timerclear(&p->p_ru->ru_utime);
+ timerclear(&p->p_ru->ru_stime);
+
+#ifdef FIXME
+ if (task) {
+ task_basic_info_data_t tinfo;
+ task_thread_times_info_data_t ttimesinfo;
+ int task_info_stuff, task_ttimes_stuff;
+ struct timeval ut,st;
+
+ task_info_stuff = TASK_BASIC_INFO_COUNT;
+ task_info(task, TASK_BASIC_INFO,
+ &tinfo, &task_info_stuff);
+ p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds;
+ p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds;
+ p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds;
+ p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds;
+
+ task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT;
+ task_info(task, TASK_THREAD_TIMES_INFO,
+ &ttimesinfo, &task_ttimes_stuff);
+
+ ut.tv_sec = ttimesinfo.user_time.seconds;
+ ut.tv_usec = ttimesinfo.user_time.microseconds;
+ st.tv_sec = ttimesinfo.system_time.seconds;
+ st.tv_usec = ttimesinfo.system_time.microseconds;
+ timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime);
+ timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
+ }
+#endif /* FIXME */
+
+ ruadd(p->p_ru, &p->p_stats->p_cru);
+ }
+
+ /*
+ * Free up profiling buffers.
+ */
+ {
+ struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
+
+ p1 = p0->pr_next;
+ p0->pr_next = NULL;
+ p0->pr_scale = 0;
+
+ for (; p1 != NULL; p1 = pn) {
+ pn = p1->pr_next;
+ kfree(p1, sizeof *p1);
+ }
+ }
+
+ /*
+ * Other substructures are freed from wait().
+ */
+ FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
+ p->p_stats = NULL;
+
+ FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
+ p->p_sigacts = NULL;
+
+ proc_limitdrop(p, 1);
+ p->p_limit = NULL;
+
+ /*
+ * Finish up by terminating the task
+ * and halt this thread (only if a
+ * member of the task exiting).
+ */
+ p->task = TASK_NULL;
+
+ /*
+ * Notify parent that we're gone.
+ */
+ pp = proc_parent(p);
+ if ((p->p_listflag & P_LIST_DEADPARENT) == 0) {
+ if (pp != initproc) {
+ proc_lock(pp);
+ pp->si_pid = p->p_pid;
+ pp->si_status = p->p_xstat;
+ pp->si_code = CLD_EXITED;
+ /*
+ * p_ucred usage is safe as it is an exiting process
+ * and reference is dropped in reap
+ */
+ pp->si_uid = p->p_ucred->cr_ruid;
+ proc_unlock(pp);
+ }
+ /* mark as a zombie */
+ /* mark as a zombie */
+ /* No need to take proc lock as all refs are drained and
+ * no one except parent (reaping ) can look at this.
+ * The write is to an int and is coherent. Also parent is
+ * keyed off of list lock for reaping
+ */
+ p->p_stat = SZOMB;
+
+ psignal(pp, SIGCHLD);
+
+ /* and now wakeup the parent */
+ proc_list_lock();
+ wakeup((caddr_t)pp);
+ proc_list_unlock();
+ } else {
+ proc_list_lock();
+ p->p_stat = SZOMB;
+ /* check for lookups by zomb sysctl */
+ while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
+ msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
+ }
+ p->p_listflag |= P_LIST_WAITING;
+ /*
+ * This is a named reference and it is not granted
+ * if the reap is already in progress. So we get
+ * the reference here exclusively and their can be
+ * no waiters. So there is no need for a wakeup
+ * after we are done. AlsO the reap frees the structure
+ * and the proc struct cannot be used for wakeups as well.
+ * It is safe to use p here as this is system reap
+ */
+ (void)reap_child_locked(pp, p, 0, 1, 1);
+ /* list lock dropped by reap_child_locked */
+ }
+ proc_rele(pp);
+}
+
+
+/*
+ * munge_rusage
+ * LP64 support - long is 64 bits if we are dealing with a 64 bit user
+ * process. We munge the kernel (32 bit) version of rusage into the
+ * 64 bit version.
+ */
+__private_extern__ void
+munge_rusage(struct rusage *a_rusage_p, struct user_rusage *a_user_rusage_p)
+{
+ /* timeval changes size, so utime and stime need special handling */
+ a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
+ a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
+ a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
+ a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
+ /*
+ * everything else can be a direct assign, since there is no loss
+ * of precision implied boing 32->64.
+ */
+ a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
+ a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
+ a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
+ a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
+ a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
+ a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
+ a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
+ a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
+ a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
+ a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
+ a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
+ a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
+ a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
+ a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;