#include <mach/task.h>
#include <mach/thread_act.h>
+#include <vm/vm_protos.h>
+
#include <sys/sdt.h>
+extern boolean_t init_task_died;
extern char init_task_failure_data[];
void proc_prepareexit(proc_t p, int rv, boolean_t perf_notify);
void vfork_exit(proc_t p, int rv);
/*
* Things which should have prototypes in headers, but don't
*/
-void *get_bsduthreadarg(thread_t);
void proc_exit(proc_t p);
int wait1continue(int result);
int waitidcontinue(int result);
-int *get_bsduthreadrval(thread_t);
kern_return_t sys_perf_notify(thread_t thread, int pid);
kern_return_t task_exception_notify(exception_type_t exception,
mach_exception_data_type_t code, mach_exception_data_type_t subcode);
void delay(int);
-
+void gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor);
+
/*
* NOTE: Source and target may *NOT* overlap!
* XXX Should share code with bsd/dev/ppc/unix_signal.c
{
exit1(p, W_EXITCODE(uap->rval, 0), retval);
- /* drop funnel before we return */
thread_exception_return();
/* NOTREACHED */
while (TRUE)
int
exit1(proc_t p, int rv, int *retval)
{
- return exit1_internal(p, rv, retval, TRUE, TRUE);
+ return exit1_internal(p, rv, retval, TRUE, TRUE, 0);
}
int
-exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify)
+exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
+ int jetsam_flags)
{
thread_t self = current_thread();
struct task *task = p->task;
DTRACE_PROC1(exit, int, CLD_EXITED);
/* mark process is going to exit and pull out of DBG/disk throttle */
- proc_removethrottle(p);
-
-#if CONFIG_MEMORYSTATUS
- memorystatus_list_remove(p->p_pid);
-#endif
+ /* TODO: This should be done after becoming exit thread */
+ proc_set_task_policy(p->task, THREAD_NULL, TASK_POLICY_ATTRIBUTE,
+ TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
proc_lock(p);
- error = proc_transstart(p, 1);
+ error = proc_transstart(p, 1, ((jetsam_flags & P_JETSAM_VNODE) ? 1 : 0));
if (error == EDEADLK) {
/* Temp: If deadlock error, then it implies multithreaded exec is
* in progress. Instread of letting exit continue and
*/
proc_unlock(p);
if (current_proc() == p){
+ if (p->exit_thread == self)
+ printf("exit_thread failed to exit, leaving process %s[%d] in unkillable limbo\n",
+ p->p_comm, p->p_pid);
thread_exception_return();
} else {
/* external termination like jetsam */
}
sig_lock_to_exit(p);
}
- if (p == initproc) {
+ if (p == initproc && current_proc() == p) {
proc_unlock(p);
printf("pid 1 exited (signal %d, exit %d)",
WTERMSIG(rv), WEXITSTATUS(rv));
+#if (DEVELOPMENT || DEBUG)
+ int err;
+ /*
+ * For debugging purposes, generate a core file of initproc before
+ * panicking. Leave at least 300 MB free on the root volume, and ignore
+ * the process's corefile ulimit.
+ */
+ if ((err = coredump(p, 300, 1)) != 0) {
+ printf("Failed to generate initproc core file: error %d", err);
+ } else {
+ printf("Generated initproc core file");
+ sync(p, (void *)NULL, (int *)NULL);
+ }
+#endif
+ init_task_died = TRUE;
panic("%s died\nState at Last Exception:\n\n%s",
(p->p_comm[0] != '\0' ?
p->p_comm :
p->p_lflag |= P_LEXIT;
p->p_xstat = rv;
+ p->p_lflag |= jetsam_flags;
proc_transend(p, 1);
proc_unlock(p);
struct uthread *ut;
thread_t self = current_thread();
ut = get_bsdthread_info(self);
+ struct rusage_superset *rup;
/* If a core should be generated, notify crash reporter */
if (hassigprop(WTERMSIG(rv), SA_CORE) || ((p->p_csflags & CS_KILLED) != 0)) {
(void)sys_perf_notify(self, p->p_pid);
}
+ /*
+ * Before this process becomes a zombie, stash resource usage
+ * stats in the proc for external observers to query
+ * via proc_pid_rusage().
+ *
+ * If the zombie allocation fails, just punt the stats.
+ */
+ MALLOC_ZONE(rup, struct rusage_superset *,
+ sizeof (*rup), M_ZOMBIE, M_WAITOK);
+ if (rup != NULL) {
+ gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT);
+ rup->ri.ri_phys_footprint = 0;
+ rup->ri.ri_proc_exit_abstime = mach_absolute_time();
+
+ /*
+ * Make the rusage_info visible to external observers
+ * only after it has been completely filled in.
+ */
+ p->p_ru = rup;
+ }
+
/*
* Remove proc from allproc queue and from pidhash chain.
* Need to do this before we do anything that can block.
proc_list_lock();
+#if CONFIG_MEMORYSTATUS
+ memorystatus_remove(p, TRUE);
+#endif
+
LIST_REMOVE(p, p_list);
LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
/* will not be visible via proc_find */
int exitval;
int knote_hint;
- uth = (struct uthread *)get_bsdthread_info(current_thread());
+ uth = current_uthread();
proc_lock(p);
- proc_transstart(p, 1);
+ proc_transstart(p, 1, 0);
if( !(p->p_lflag & P_LEXIT)) {
/*
* This can happen if a thread_terminate() occurs
proc_unlock(p);
#endif
- /* XXX Zombie allocation may fail, in which case stats get lost */
- MALLOC_ZONE(p->p_ru, struct rusage *,
- sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
-
nspace_proc_exit(p);
#if VM_PRESSURE_EVENTS
proc_refdrain(p);
/* if any pending cpu limits action, clear it */
- task_clear_cpuusage(p->task);
+ task_clear_cpuusage(p->task, TRUE);
workqueue_mark_exiting(p);
workqueue_exit(p);
* no need to throttle this thread since its going away
* but we do need to update our bookeeping w/r to throttled threads
*/
- throttle_lowpri_io(FALSE);
- }
-
-#if !CONFIG_EMBEDDED
- if (p->p_legacy_behavior & PROC_LEGACY_BEHAVIOR_IOTHROTTLE) {
- throttle_legacy_process_decr();
+ throttle_lowpri_io(0);
}
-#endif
#if SYSV_SHM
/* Close ref SYSV Shared memory*/
if (sessp->s_ttyvp != NULLVP) {
struct vnode *ttyvp;
int ttyvid;
+ int cttyflag = 0;
struct vfs_context context;
struct tty *tp;
if ((tp != TTY_NULL) && (tp->t_session == sessp)) {
session_unlock(sessp);
+ /*
+ * We're going to SIGHUP the foreground process
+ * group. It can't change from this point on
+ * until the revoke is complete.
+ * The process group changes under both the tty
+ * lock and proc_list_lock but we need only one
+ */
+ tty_lock(tp);
+ ttysetpgrphup(tp);
+ tty_unlock(tp);
+
tty_pgsignal(tp, SIGHUP, 1);
session_lock(sessp);
tp = SESSION_TP(sessp);
}
+ cttyflag = sessp->s_flags & S_CTTYREF;
+ sessp->s_flags &= ~S_CTTYREF;
ttyvp = sessp->s_ttyvp;
ttyvid = sessp->s_ttyvid;
sessp->s_ttyvp = NULLVP;
}
context.vc_thread = proc_thread(p); /* XXX */
context.vc_ucred = kauth_cred_proc_ref(p);
- vnode_rele(ttyvp);
VNOP_REVOKE(ttyvp, REVOKEALL, &context);
+ if (cttyflag) {
+ /*
+ * Release the extra usecount taken in cttyopen.
+ * usecount should be released after VNOP_REVOKE is called.
+ * This usecount was taken to ensure that
+ * the VNOP_REVOKE results in a close to
+ * the tty since cttyclose is a no-op.
+ */
+ vnode_rele(ttyvp);
+ }
vnode_put(ttyvp);
kauth_cred_unref(&context.vc_ucred);
ttyvp = NULLVP;
}
- if (ttyvp)
- vnode_rele(ttyvp);
- if (tp)
+ if (tp) {
+ /*
+ * This is cleared even if not set. This is also done in
+ * spec_close to ensure that the flag is cleared.
+ */
+ tty_lock(tp);
+ ttyclrpgrphup(tp);
+ tty_unlock(tp);
+
ttyfree(tp);
+ }
}
session_lock(sessp);
sessp->s_leader = NULL;
proc_childdrainstart(p);
while ((q = p->p_children.lh_first) != NULL) {
int reparentedtoinit = (q->p_listflag & P_LIST_DEADPARENT) ? 1 : 0;
- q->p_listflag |= P_LIST_DEADPARENT;
if (q->p_stat == SZOMB) {
if (p != q->p_pptr)
panic("parent child linkage broken");
*/
(void)reap_child_locked(p, q, 1, reparentedtoinit, 1, 0);
} else {
- proc_reparentlocked(q, initproc, 0, 1);
/*
* Traced processes are killed
* since their existence means someone is messing up.
*/
if (q->p_lflag & P_LTRACED) {
+ struct proc *opp;
+
/*
* Take a reference on the child process to
* ensure it doesn't exit and disappear between
continue;
proc_list_unlock();
+
+ opp = proc_find(q->p_oppid);
+ if (opp != PROC_NULL) {
+ proc_list_lock();
+ q->p_oppid = 0;
+ proc_list_unlock();
+ proc_reparentlocked(q, opp, 0, 0);
+ proc_rele(opp);
+ } else {
+ /* original parent exited while traced */
+ proc_list_lock();
+ q->p_listflag |= P_LIST_DEADPARENT;
+ q->p_oppid = 0;
+ proc_list_unlock();
+ proc_reparentlocked(q, initproc, 0, 0);
+ }
+
proc_lock(q);
q->p_lflag &= ~P_LTRACED;
+
if (q->sigwait_thread) {
thread_t thread = q->sigwait_thread;
psignal(q, SIGKILL);
proc_list_lock();
proc_rele_locked(q);
+ } else {
+ q->p_listflag |= P_LIST_DEADPARENT;
+ proc_reparentlocked(q, initproc, 0, 1);
}
}
}
* info and self times. If we were unable to allocate a zombie
* structure, this information is lost.
*/
- /* No need for locking here as no one than this thread can access this */
if (p->p_ru != NULL) {
calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, NULL);
- *p->p_ru = p->p_stats->p_ru;
+ p->p_ru->ru = p->p_stats->p_ru;
- ruadd(p->p_ru, &p->p_stats->p_cru);
+ ruadd(&(p->p_ru->ru), &p->p_stats->p_cru);
}
/*
proc_limitdrop(p, 1);
p->p_limit = NULL;
+ vm_purgeable_disown(p->task);
/*
* Finish up by terminating the task
pp = proc_parent(p);
if (pp->p_flag & P_NOCLDWAIT) {
+ if (p->p_ru != NULL) {
+ proc_lock(pp);
#if 3839178
/*
* If the parent is ignoring SIGCHLD, then POSIX requires
* zombie to init. If we were unable to allocate a
* zombie structure, this information is lost.
*/
- if (p->p_ru != NULL) {
- proc_lock(pp);
- ruadd(&pp->p_stats->p_cru, p->p_ru);
+ ruadd(&pp->p_stats->p_cru, &p->p_ru->ru);
+#endif /* !3839178 */
+ update_rusage_info_child(&pp->p_stats->ri_child, &p->p_ru->ri);
proc_unlock(pp);
}
-#endif /* !3839178 */
-
+
/* kernel can reap this one, no need to move it to launchd */
proc_list_lock();
p->p_listflag |= P_LIST_DEADPARENT;
* no need to throttle this thread since its going away
* but we do need to update our bookeeping w/r to throttled threads
*/
- throttle_lowpri_io(FALSE);
+ throttle_lowpri_io(0);
}
proc_rele(pp);
}
}
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
proc_knote(child, NOTE_REAP);
+#pragma clang diagnostic pop
+
proc_knote_drain(child);
child->p_xstat = 0;
*/
if (!(parent->p_flag & P_NOCLDWAIT))
#endif /* 3839178 */
- ruadd(&parent->p_stats->p_cru, child->p_ru);
+ ruadd(&parent->p_stats->p_cru, &child->p_ru->ru);
+ update_rusage_info_child(&parent->p_stats->ri_child, &child->p_ru->ri);
proc_unlock(parent);
FREE_ZONE(child->p_ru, sizeof *child->p_ru, M_ZOMBIE);
child->p_ru = NULL;
proc_checkdeadrefs(child);
nprocs--;
+ if (deadparent) {
+ /*
+ * If a child zombie is being reaped because its parent
+ * is exiting, make sure we update the list flag
+ */
+ child->p_listflag |= P_LIST_DEADPARENT;
+ }
+
proc_list_unlock();
#if CONFIG_FINE_LOCK_GROUPS
int
wait1continue(int result)
{
- void *vt;
+ proc_t p;
thread_t thread;
+ uthread_t uth;
+ struct _wait4_data *wait4_data;
+ struct wait4_nocancel_args *uap;
int *retval;
- proc_t p;
if (result)
return(result);
p = current_proc();
thread = current_thread();
- vt = get_bsduthreadarg(thread);
- retval = get_bsduthreadrval(thread);
- return(wait4(p, (struct wait4_args *)vt, retval));
+ uth = (struct uthread *)get_bsdthread_info(thread);
+
+ wait4_data = &uth->uu_kevent.uu_wait4_data;
+ uap = wait4_data->args;
+ retval = wait4_data->retval;
+ return(wait4_nocancel(p, uap, retval));
}
int
int sibling_count;
proc_t p;
int status, error;
+ uthread_t uth;
+ struct _wait4_data *wait4_data;
AUDIT_ARG(pid, uap->pid);
} else {
if (IS_64BIT_PROCESS(q)) {
struct user64_rusage my_rusage;
- munge_user64_rusage(p->p_ru, &my_rusage);
+ munge_user64_rusage(&p->p_ru->ru, &my_rusage);
error = copyout((caddr_t)&my_rusage,
uap->rusage,
sizeof (my_rusage));
}
else {
struct user32_rusage my_rusage;
- munge_user32_rusage(p->p_ru, &my_rusage);
+ munge_user32_rusage(&p->p_ru->ru, &my_rusage);
error = copyout((caddr_t)&my_rusage,
uap->rusage,
sizeof (my_rusage));
*/
if ( sibling_count == 0 ) {
int mask = sigmask(SIGCHLD);
- uthread_t uth = (struct uthread *)get_bsdthread_info(current_thread());
+ uth = current_uthread();
if ( (uth->uu_sigmask & mask) != 0 ) {
/* we are blocking SIGCHLD signals. clear any pending SIGCHLD.
return (0);
}
+ /* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
+ uth = current_uthread();
+ wait4_data = &uth->uu_kevent.uu_wait4_data;
+ wait4_data->args = uap;
+ wait4_data->retval = retval;
+
if ((error = msleep0((caddr_t)q, proc_list_mlock, PWAIT | PCATCH | PDROP, "wait", 0, wait1continue)))
return (error);
int
waitidcontinue(int result)
{
- void *vt;
+ proc_t p;
thread_t thread;
+ uthread_t uth;
+ struct _waitid_data *waitid_data;
+ struct waitid_nocancel_args *uap;
int *retval;
if (result)
return (result);
+ p = current_proc();
thread = current_thread();
- vt = get_bsduthreadarg(thread);
- retval = get_bsduthreadrval(thread);
- return (waitid(current_proc(), (struct waitid_args *)vt, retval));
+ uth = (struct uthread *)get_bsdthread_info(thread);
+
+ waitid_data = &uth->uu_kevent.uu_waitid_data;
+ uap = waitid_data->args;
+ retval = waitid_data->retval;
+ return(waitid_nocancel(p, uap, retval));
}
/*
int nfound;
proc_t p;
int error;
+ uthread_t uth;
+ struct _waitid_data *waitid_data;
if (uap->options == 0 ||
(uap->options & ~(WNOHANG|WNOWAIT|WCONTINUED|WSTOPPED|WEXITED)))
return (0);
}
+ /* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
+ uth = current_uthread();
+ waitid_data = &uth->uu_kevent.uu_waitid_data;
+ waitid_data->args = uap;
+ waitid_data->retval = retval;
+
if ((error = msleep0(q, proc_list_mlock,
PWAIT | PCATCH | PDROP, "waitid", 0, waitidcontinue)) != 0)
return (error);
proc_list_lock();
+#if CONFIG_MEMORYSTATUS
+ memorystatus_remove(p, TRUE);
+#endif
+
LIST_REMOVE(p, p_list);
LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
/* will not be visible via proc_find */
#endif
struct pgrp * pg;
struct session *sessp;
+ struct rusage_superset *rup;
/* XXX Zombie allocation may fail, in which case stats get lost */
- MALLOC_ZONE(p->p_ru, struct rusage *,
- sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
-
+ MALLOC_ZONE(rup, struct rusage_superset *,
+ sizeof (*rup), M_ZOMBIE, M_WAITOK);
proc_refdrain(p);
*/
fdfree(p);
-#if !CONFIG_EMBEDDED
- if (p->p_legacy_behavior & PROC_LEGACY_BEHAVIOR_IOTHROTTLE) {
- throttle_legacy_process_decr();
- }
-#endif
-
sessp = proc_session(p);
if (SESS_LEADER(p, sessp)) {
if (sessp->s_ttyvp != NULLVP) {
struct vnode *ttyvp;
int ttyvid;
+ int cttyflag = 0;
struct vfs_context context;
struct tty *tp;
if ((tp != TTY_NULL) && (tp->t_session == sessp)) {
session_unlock(sessp);
+ /*
+ * We're going to SIGHUP the foreground process
+ * group. It can't change from this point on
+ * until the revoke is complete.
+ * The process group changes under both the tty
+ * lock and proc_list_lock but we need only one
+ */
+ tty_lock(tp);
+ ttysetpgrphup(tp);
+ tty_unlock(tp);
+
tty_pgsignal(tp, SIGHUP, 1);
session_lock(sessp);
tp = SESSION_TP(sessp);
}
+ cttyflag = sessp->s_flags & S_CTTYREF;
+ sessp->s_flags &= ~S_CTTYREF;
ttyvp = sessp->s_ttyvp;
ttyvid = sessp->s_ttyvid;
sessp->s_ttyvp = NULL;
}
context.vc_thread = proc_thread(p); /* XXX */
context.vc_ucred = kauth_cred_proc_ref(p);
- vnode_rele(ttyvp);
VNOP_REVOKE(ttyvp, REVOKEALL, &context);
+ if (cttyflag) {
+ /*
+ * Release the extra usecount taken in cttyopen.
+ * usecount should be released after VNOP_REVOKE is called.
+ * This usecount was taken to ensure that
+ * the VNOP_REVOKE results in a close to
+ * the tty since cttyclose is a no-op.
+ */
+ vnode_rele(ttyvp);
+ }
vnode_put(ttyvp);
kauth_cred_unref(&context.vc_ucred);
ttyvp = NULLVP;
}
- if (ttyvp)
- vnode_rele(ttyvp);
- if (tp)
+ if (tp) {
+ /*
+ * This is cleared even if not set. This is also done in
+ * spec_close to ensure that the flag is cleared.
+ */
+ tty_lock(tp);
+ ttyclrpgrphup(tp);
+ tty_unlock(tp);
+
ttyfree(tp);
+ }
}
session_lock(sessp);
sessp->s_leader = NULL;
proc_list_lock();
proc_childdrainstart(p);
while ((q = p->p_children.lh_first) != NULL) {
- q->p_listflag |= P_LIST_DEADPARENT;
if (q->p_stat == SZOMB) {
if (p != q->p_pptr)
panic("parent child linkage broken");
*/
(void)reap_child_locked(p, q, 1, 0, 1, 0);
} else {
- proc_reparentlocked(q, initproc, 0, 1);
/*
* Traced processes are killed
* since their existence means someone is messing up.
*/
if (q->p_lflag & P_LTRACED) {
+ struct proc *opp;
+
proc_list_unlock();
+
+ opp = proc_find(q->p_oppid);
+ if (opp != PROC_NULL) {
+ proc_list_lock();
+ q->p_oppid = 0;
+ proc_list_unlock();
+ proc_reparentlocked(q, opp, 0, 0);
+ proc_rele(opp);
+ } else {
+ /* original parent exited while traced */
+ proc_list_lock();
+ q->p_listflag |= P_LIST_DEADPARENT;
+ q->p_oppid = 0;
+ proc_list_unlock();
+ proc_reparentlocked(q, initproc, 0, 0);
+ }
+
proc_lock(q);
q->p_lflag &= ~P_LTRACED;
+
if (q->sigwait_thread) {
thread_t thread = q->sigwait_thread;
psignal(q, SIGKILL);
proc_list_lock();
+ } else {
+ q->p_listflag |= P_LIST_DEADPARENT;
+ proc_reparentlocked(q, initproc, 0, 1);
}
}
}
* info and self times. If we were unable to allocate a zombie
* structure, this information is lost.
*/
- /* No need for locking here as no one than this thread can access this */
- if (p->p_ru != NULL) {
- *p->p_ru = p->p_stats->p_ru;
- timerclear(&p->p_ru->ru_utime);
- timerclear(&p->p_ru->ru_stime);
+ if (rup != NULL) {
+ rup->ru = p->p_stats->p_ru;
+ timerclear(&rup->ru.ru_utime);
+ timerclear(&rup->ru.ru_stime);
#ifdef FIXME
if (task) {
task_info_stuff = MACH_TASK_BASIC_INFO_COUNT;
task_info(task, MACH_TASK_BASIC_INFO,
&tinfo, &task_info_stuff);
- p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds;
- p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds;
- p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds;
- p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds;
+ p->p_ru->ru.ru_utime.tv_sec = tinfo.user_time.seconds;
+ p->p_ru->ru.ru_utime.tv_usec = tinfo.user_time.microseconds;
+ p->p_ru->ru.ru_stime.tv_sec = tinfo.system_time.seconds;
+ p->p_ru->ru.ru_stime.tv_usec = tinfo.system_time.microseconds;
task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT;
task_info(task, TASK_THREAD_TIMES_INFO,
ut.tv_usec = ttimesinfo.user_time.microseconds;
st.tv_sec = ttimesinfo.system_time.seconds;
st.tv_usec = ttimesinfo.system_time.microseconds;
- timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime);
- timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
+ timeradd(&ut,&p->p_ru->ru.ru_utime,&p->p_ru->ru.ru_utime);
+ timeradd(&st,&p->p_ru->ru.ru_stime,&p->p_ru->ru.ru_stime);
}
#endif /* FIXME */
- ruadd(p->p_ru, &p->p_stats->p_cru);
+ ruadd(&rup->ru, &p->p_stats->p_cru);
+
+ gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT);
+ rup->ri.ri_phys_footprint = 0;
+ rup->ri.ri_proc_exit_abstime = mach_absolute_time();
+
+ /*
+ * Now that we have filled in the rusage info, make it
+ * visible to an external observer via proc_pid_rusage().
+ */
+ p->p_ru = rup;
}
/*