/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <sys/resourcevar.h>
#include <sys/ptrace.h>
#include <sys/user.h>
+#include <sys/aio_kern.h>
+
+#include <bsm/audit_kernel.h>
+#include <bsm/audit_kevents.h>
#include <mach/mach_types.h>
#include <kern/thread.h>
#include <kern/thread_act.h>
+#include <kern/sched_prim.h>
#include <kern/assert.h>
+#if KTRACE
+#include <sys/ktrace.h>
+#include <sys/ubc.h>
+#endif
extern char init_task_failure_data[];
-void exit1 __P((struct proc *, int));
+int exit1 __P((struct proc *, int, int *));
+void proc_prepareexit(struct proc *p);
+int vfork_exit(struct proc *p, int rv);
+void vproc_exit(struct proc *p);
/*
* exit --
struct exit_args *uap;
int *retval;
{
- exit1(p, W_EXITCODE(uap->rval, 0));
+ exit1(p, W_EXITCODE(uap->rval, 0), retval);
- /* drop funnel befewo we return */
+ /* drop funnel before we return */
thread_funnel_set(kernel_flock, FALSE);
thread_exception_return();
/* NOTREACHED */
while (TRUE)
- thread_block(0);
+ thread_block(THREAD_CONTINUE_NULL);
/* NOTREACHED */
}
* to zombie, and unlink proc from allproc and parent's lists. Save exit
* status and rusage for wait(). Check for child processes and orphan them.
*/
-void
-exit1(p, rv)
+int
+exit1(p, rv, retval)
register struct proc *p;
int rv;
+ int * retval;
{
register struct proc *q, *nq;
- thread_t self = current_thread();
- thread_act_t th_act_self = current_act();
+ thread_act_t self = current_act();
struct task *task = p->task;
register int i,s;
struct uthread *ut;
* called exit(), then halt any others
* right here.
*/
+
+ ut = get_bsdthread_info(self);
+ if (ut->uu_flag & P_VFORK) {
+ if (!vfork_exit(p, rv)) {
+ vfork_return(self, p->p_pptr, p , retval);
+ unix_syscall_return(0);
+ /* NOT REACHED */
+ }
+ return(EINVAL);
+ }
+ AUDIT_SYSCALL_EXIT(0, p, ut); /* Exit is always successfull */
signal_lock(p);
while (p->exit_thread != self) {
if (sig_try_locked(p) <= 0) {
- if (get_threadtask(th_act_self) != task) {
+ if (get_threadtask(self) != task) {
signal_unlock(p);
- return;
+ return(0);
}
signal_unlock(p);
- thread_terminate(th_act_self);
+ thread_terminate(self);
thread_funnel_set(kernel_flock, FALSE);
thread_exception_return();
/* NOTREACHED */
/* task terminate will call proc_terminate and that cleans it up */
task_terminate_internal(task);
- /*
- * we come back and returns to AST which
- * should cleanup the rest
- */
-#if 0
- if (task == current_task()) {
- thread_exception_return();
- /*NOTREACHED*/
- }
-
- while (task == current_task()) {
- thread_terminate_self();
- /*NOTREACHED*/
- }
-#endif
+ return(0);
}
void
{
int s;
struct uthread *ut;
- thread_t self = current_thread();
- thread_act_t th_act_self = current_act();
+ exception_data_t code[EXCEPTION_CODE_MAX];
+ thread_act_t self = current_act();
+ code[0] = 0xFF000001; /* Set terminate code */
+ code[1] = p->p_pid; /* Pass out the pid */
+ (void)sys_perf_notify(p->task, &code, 2); /* Notify the perf server */
/*
* Remove proc from allproc queue and from pidhash chain.
* in partially cleaned state.
*/
LIST_REMOVE(p, p_list);
+ LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
LIST_REMOVE(p, p_hash);
#ifdef PGINPROF
p->p_flag &= ~(P_TRACED | P_PPWAIT);
p->p_sigignore = ~0;
p->p_siglist = 0;
- ut = get_bsdthread_info(th_act_self);
- ut->uu_sig = 0;
- untimeout(realitexpire, (caddr_t)p);
-
+ ut = get_bsdthread_info(self);
+ ut->uu_siglist = 0;
+ untimeout(realitexpire, (caddr_t)p->p_pid);
}
void
proc_exit(struct proc *p)
{
- register struct proc *q, *nq;
- thread_t self = current_thread();
- thread_act_t th_act_self = current_act();
+ register struct proc *q, *nq, *pp;
struct task *task = p->task;
register int i,s;
- struct uthread *ut;
boolean_t funnel_state;
/* This can happen if thread_terminate of the single thread
MALLOC_ZONE(p->p_ru, struct rusage *,
sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
+ /*
+ * need to cancel async IO requests that can be cancelled and wait for those
+ * already active. MAY BLOCK!
+ */
+ _aio_exit( p );
+
/*
* Close open files and release open-file table.
* This may block!
/* Close ref SYSV Shared memory*/
if (p->vm_shm)
shmexit(p);
+ /* Release SYSV semaphores */
+ semexit(p);
if (SESS_LEADER(p)) {
register struct session *sp = p->p_session;
if (sp->s_ttyvp) {
+ struct vnode *ttyvp;
+
/*
* Controlling process.
* Signal foreground pgrp,
if (sp->s_ttyvp)
VOP_REVOKE(sp->s_ttyvp, REVOKEALL);
}
- if (sp->s_ttyvp)
- vrele(sp->s_ttyvp);
+ ttyvp = sp->s_ttyvp;
sp->s_ttyvp = NULL;
+ if (ttyvp)
+ vrele(ttyvp);
/*
* s_ttyp is not zero'd; we use this to indicate
* that the session once had a controlling terminal.
fixjobc(p, p->p_pgrp, 0);
p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+ (void)acct_process(p);
+
#if KTRACE
/*
* release trace file
*/
p->p_traceflag = 0; /* don't trace the vrele() */
- if (p->p_tracep)
- vrele(p->p_tracep);
-#endif
+ if (p->p_tracep) {
+ struct vnode *tvp = p->p_tracep;
+ p->p_tracep = NULL;
+ if (UBCINFOEXISTS(tvp))
+ ubc_rele(tvp);
+ vrele(tvp);
+ }
+#endif
q = p->p_children.lh_first;
if (q) /* only need this if any child is S_ZOMB */
if (q->p_flag & P_TRACED) {
q->p_flag &= ~P_TRACED;
if (q->sigwait_thread) {
- thread_t sig_shuttle = getshuttle_thread(q->sigwait_thread);
/*
* The sigwait_thread could be stopped at a
* breakpoint. Wake it up to kill.
* the first thread in the task. So any attempts to kill
* the process would result into a deadlock on q->sigwait.
*/
- thread_resume((struct thread *)q->sigwait_thread);
- clear_wait(sig_shuttle, THREAD_INTERRUPTED);
- threadsignal(q->sigwait_thread, SIGKILL, 0);
+ thread_resume((thread_act_t)q->sigwait_thread);
+ clear_wait(q->sigwait_thread, THREAD_INTERRUPTED);
+ threadsignal((thread_act_t)q->sigwait_thread, SIGKILL, 0);
}
psignal(q, SIGKILL);
}
}
-
/*
* Save exit status and final rusage info, adding in child rusage
* info and self times.
timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
}
-
ruadd(p->p_ru, &p->p_stats->p_cru);
/*
FREE_ZONE(p->p_limit, sizeof *p->p_limit, M_SUBPROC);
p->p_limit = NULL;
+ /* Free the auditing info */
+ audit_proc_free(p);
+
/*
* Finish up by terminating the task
* and halt this thread (only if a
//task->proc = NULL;
set_bsdtask_info(task, NULL);
+ KNOTE(&p->p_klist, NOTE_EXIT);
+
/*
* Notify parent that we're gone.
*/
- psignal(p->p_pptr, SIGCHLD);
+ if (p->p_pptr->p_flag & P_NOCLDWAIT) {
+ struct proc * pp = p->p_pptr;
+
+ /*
+ * Add child resource usage to parent before giving
+ * zombie to init
+ */
+ ruadd(&p->p_pptr->p_stats->p_cru, p->p_ru);
+
+ proc_reparent(p, initproc);
+ /* If there are no more children wakeup parent */
+ if (LIST_EMPTY(&pp->p_children))
+ wakeup((caddr_t)pp);
+ }
+ /* should be fine as parent proc would be initproc */
+ pp = p->p_pptr;
+ if (pp != initproc) {
+ pp->si_pid = p->p_pid;
+ pp->si_status = p->p_xstat;
+ pp->si_code = CLD_EXITED;
+ pp->si_uid = p->p_cred->p_ruid;
+ }
+ psignal(pp, SIGCHLD);
+
- /* Place onto zombproc. */
- LIST_INSERT_HEAD(&zombproc, p, p_list);
+ /* mark as a zombie */
p->p_stat = SZOMB;
/* and now wakeup the parent */
struct wait4_args *uap;
int *retval;
{
-
return (wait1(p, uap, retval, 0));
}
{
struct wait4_args *a;
- a = (struct wait4_args *)get_bsduthreadarg(current_act);
+ a = (struct wait4_args *)get_bsduthreadarg(current_act());
a->rusage = uap->rusage;
a->options = uap->options;
int
wait1continue(result)
{
- void *vt;
- thread_act_t thread;
- struct uthread *ut;
- int *retval;
- struct proc *p;
-
- p = get_bsdtask_info(current_task());
- p->p_flag &= ~P_WAITING;
+ void *vt;
+ thread_act_t thread;
+ int *retval;
+ struct proc *p;
- if (result != 0) {
- return(result);
- }
+ if (result)
+ return(result);
- thread = current_act();
- ut = get_bsdthread_info(thread);
- vt = get_bsduthreadarg(thread);
- retval = get_bsduthreadrval(thread);
- wait1((struct proc *)p, (struct wait4_args *)vt, retval, 0);
+ p = current_proc();
+ thread = current_act();
+ vt = (void *)get_bsduthreadarg(thread);
+ retval = (int *)get_bsduthreadrval(thread);
+ return(wait1((struct proc *)p, (struct wait4_args *)vt, retval, 0));
}
int
register int nfound;
register struct proc *p, *t;
int status, error;
+ struct vnode *tvp;
-
-#if 0
- /* since we are funneled we don't need to do this atomically, yet */
- if (q->p_flag & P_WAITING) {
- return(EINVAL);
- }
- q->p_flag |= P_WAITING; /* only allow single thread to wait() */
-#endif
-
+retry:
if (uap->pid == 0)
uap->pid = -q->p_pgid;
p->p_pgid != -(uap->pid))
continue;
nfound++;
+ if (p->p_flag & P_WAITING) {
+ (void)tsleep(&p->p_stat, PWAIT, "waitcoll", 0);
+ goto loop;
+ }
+ p->p_flag |= P_WAITING; /* only allow single thread to wait() */
+
if (p->p_stat == SZOMB) {
retval[0] = p->p_pid;
#if COMPAT_43
if (error = copyout((caddr_t)&status,
(caddr_t)uap->status,
sizeof(status))) {
- q->p_flag &= ~P_WAITING;
+ p->p_flag &= ~P_WAITING;
+ wakeup(&p->p_stat);
return (error);
}
}
(error = copyout((caddr_t)p->p_ru,
(caddr_t)uap->rusage,
sizeof (struct rusage)))) {
- q->p_flag &= ~P_WAITING;
+ p->p_flag &= ~P_WAITING;
+ wakeup(&p->p_stat);
return (error);
}
/*
if (p->p_oppid && (t = pfind(p->p_oppid))) {
p->p_oppid = 0;
proc_reparent(p, t);
+ if (t != initproc) {
+ t->si_pid = p->p_pid;
+ t->si_status = p->p_xstat;
+ t->si_code = CLD_CONTINUED;
+ t->si_uid = p->p_cred->p_ruid;
+ }
psignal(t, SIGCHLD);
wakeup((caddr_t)t);
- q->p_flag &= ~P_WAITING;
+ p->p_flag &= ~P_WAITING;
+ wakeup(&p->p_stat);
return (0);
}
p->p_xstat = 0;
/*
* Release reference to text vnode
*/
- if (p->p_textvp)
- vrele(p->p_textvp);
+ tvp = p->p_textvp;
+ p->p_textvp = NULL;
+ if (tvp)
+ vrele(tvp);
/*
* Finally finished with old proc entry.
leavepgrp(p);
LIST_REMOVE(p, p_list); /* off zombproc */
LIST_REMOVE(p, p_sibling);
+ p->p_flag &= ~P_WAITING;
FREE_ZONE(p, sizeof *p, M_PROC);
nprocs--;
- q->p_flag &= ~P_WAITING;
+ wakeup(&p->p_stat);
return (0);
}
if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
sizeof(status));
} else
error = 0;
- q->p_flag &= ~P_WAITING;
+ p->p_flag &= ~P_WAITING;
+ wakeup(&p->p_stat);
return (error);
}
+ p->p_flag &= ~P_WAITING;
+ wakeup(&p->p_stat);
}
- if (nfound == 0) {
- q->p_flag &= ~P_WAITING;
+ if (nfound == 0)
return (ECHILD);
- }
+
if (uap->options & WNOHANG) {
retval[0] = 0;
- q->p_flag &= ~P_WAITING;
return (0);
}
- if (error = tsleep0((caddr_t)q, PWAIT | PCATCH, "wait", 0, wait1continue)) {
- q->p_flag &= ~P_WAITING;
+ if (error = tsleep0((caddr_t)q, PWAIT | PCATCH, "wait", 0, wait1continue))
return (error);
- }
+
goto loop;
}
child->p_pptr = parent;
}
-kern_return_t
-init_process(void)
/*
* Make the current process an "init" process, meaning
* that it doesn't have a parent, and that it won't be
* gunned down by kill(-1, 0).
*/
+kern_return_t
+init_process(void)
{
register struct proc *p = current_proc();
- if (suser(p->p_ucred, &p->p_acflag))
+ AUDIT_MACH_SYSCALL_ENTER(AUE_INITPROCESS);
+ if (suser(p->p_ucred, &p->p_acflag)) {
+ AUDIT_MACH_SYSCALL_EXIT(KERN_NO_ACCESS);
return(KERN_NO_ACCESS);
+ }
if (p->p_pid != 1 && p->p_pgid != p->p_pid)
enterpgrp(p, p->p_pid, 0);
p->p_sibling.le_next = NULL;
p->p_pptr = kernproc;
+ AUDIT_MACH_SYSCALL_EXIT(KERN_SUCCESS);
return(KERN_SUCCESS);
}
struct proc *p = current_proc();
if (p != NULL) {
- exit1(p, W_EXITCODE(0, SIGKILL));
+ exit1(p, W_EXITCODE(0, SIGKILL), (int *)NULL);
/*NOTREACHED*/
}
}
+
+/*
+ * Exit: deallocate address space and other resources, change proc state
+ * to zombie, and unlink proc from allproc and parent's lists. Save exit
+ * status and rusage for wait(). Check for child processes and orphan them.
+ */
+
+int
+vfork_exit(p, rv)
+ struct proc *p;
+ int rv;
+{
+ register struct proc *q, *nq;
+ thread_act_t self = current_act();
+ struct task *task = p->task;
+ register int i,s;
+ struct uthread *ut;
+ exception_data_t code[EXCEPTION_CODE_MAX];
+
+ ut = get_bsdthread_info(self);
+ if (p->exit_thread) {
+ return(1);
+ }
+ p->exit_thread = self;
+
+ s = splsched();
+ p->p_flag |= P_WEXIT;
+ splx(s);
+
+ code[0] = 0xFF000001; /* Set terminate code */
+ code[1] = p->p_pid; /* Pass out the pid */
+ (void)sys_perf_notify(p->task, &code, 2); /* Notify the perf server */
+
+ /*
+ * Remove proc from allproc queue and from pidhash chain.
+ * Need to do this before we do anything that can block.
+ * Not doing causes things like mount() find this on allproc
+ * in partially cleaned state.
+ */
+ LIST_REMOVE(p, p_list);
+ LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
+ LIST_REMOVE(p, p_hash);
+ /*
+ * If parent is waiting for us to exit or exec,
+ * P_PPWAIT is set; we will wakeup the parent below.
+ */
+ p->p_flag &= ~(P_TRACED | P_PPWAIT);
+ p->p_sigignore = ~0;
+ p->p_siglist = 0;
+
+ ut->uu_siglist = 0;
+ untimeout(realitexpire, (caddr_t)p->p_pid);
+
+ p->p_xstat = rv;
+
+ vproc_exit(p);
+ return(0);
+}
+
+void
+vproc_exit(struct proc *p)
+{
+ register struct proc *q, *nq, *pp;
+ struct task *task = p->task;
+ register int i,s;
+ boolean_t funnel_state;
+
+ MALLOC_ZONE(p->p_ru, struct rusage *,
+ sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
+
+ /*
+ * Close open files and release open-file table.
+ * This may block!
+ */
+ fdfree(p);
+
+ if (SESS_LEADER(p)) {
+ register struct session *sp = p->p_session;
+
+ if (sp->s_ttyvp) {
+ struct vnode *ttyvp;
+
+ /*
+ * Controlling process.
+ * Signal foreground pgrp,
+ * drain controlling terminal
+ * and revoke access to controlling terminal.
+ */
+ if (sp->s_ttyp->t_session == sp) {
+ if (sp->s_ttyp->t_pgrp)
+ pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
+ (void) ttywait(sp->s_ttyp);
+ /*
+ * The tty could have been revoked
+ * if we blocked.
+ */
+ if (sp->s_ttyvp)
+ VOP_REVOKE(sp->s_ttyvp, REVOKEALL);
+ }
+ ttyvp = sp->s_ttyvp;
+ sp->s_ttyvp = NULL;
+ if (ttyvp)
+ vrele(ttyvp);
+ /*
+ * s_ttyp is not zero'd; we use this to indicate
+ * that the session once had a controlling terminal.
+ * (for logging and informational purposes)
+ */
+ }
+ sp->s_leader = NULL;
+ }
+
+ fixjobc(p, p->p_pgrp, 0);
+ p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+
+#if KTRACE
+ /*
+ * release trace file
+ */
+ p->p_traceflag = 0; /* don't trace the vrele() */
+ if (p->p_tracep) {
+ struct vnode *tvp = p->p_tracep;
+ p->p_tracep = NULL;
+
+ if (UBCINFOEXISTS(tvp))
+ ubc_rele(tvp);
+ vrele(tvp);
+ }
+#endif
+
+ q = p->p_children.lh_first;
+ if (q) /* only need this if any child is S_ZOMB */
+ wakeup((caddr_t) initproc);
+ for (; q != 0; q = nq) {
+ nq = q->p_sibling.le_next;
+ proc_reparent(q, initproc);
+ /*
+ * Traced processes are killed
+ * since their existence means someone is messing up.
+ */
+ if (q->p_flag & P_TRACED) {
+ q->p_flag &= ~P_TRACED;
+ if (q->sigwait_thread) {
+ /*
+ * The sigwait_thread could be stopped at a
+ * breakpoint. Wake it up to kill.
+ * Need to do this as it could be a thread which is not
+ * the first thread in the task. So any attempts to kill
+ * the process would result into a deadlock on q->sigwait.
+ */
+ thread_resume((thread_act_t)q->sigwait_thread);
+ clear_wait(q->sigwait_thread, THREAD_INTERRUPTED);
+ threadsignal((thread_act_t)q->sigwait_thread, SIGKILL, 0);
+ }
+ psignal(q, SIGKILL);
+ }
+ }
+
+ /*
+ * Save exit status and final rusage info, adding in child rusage
+ * info and self times.
+ */
+ *p->p_ru = p->p_stats->p_ru;
+
+ timerclear(&p->p_ru->ru_utime);
+ timerclear(&p->p_ru->ru_stime);
+
+#ifdef FIXME
+ if (task) {
+ task_basic_info_data_t tinfo;
+ task_thread_times_info_data_t ttimesinfo;
+ int task_info_stuff, task_ttimes_stuff;
+ struct timeval ut,st;
+
+ task_info_stuff = TASK_BASIC_INFO_COUNT;
+ task_info(task, TASK_BASIC_INFO,
+ &tinfo, &task_info_stuff);
+ p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds;
+ p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds;
+ p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds;
+ p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds;
+
+ task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT;
+ task_info(task, TASK_THREAD_TIMES_INFO,
+ &ttimesinfo, &task_ttimes_stuff);
+
+ ut.tv_sec = ttimesinfo.user_time.seconds;
+ ut.tv_usec = ttimesinfo.user_time.microseconds;
+ st.tv_sec = ttimesinfo.system_time.seconds;
+ st.tv_usec = ttimesinfo.system_time.microseconds;
+ timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime);
+ timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
+ }
+#endif /* FIXME */
+
+ ruadd(p->p_ru, &p->p_stats->p_cru);
+
+ /*
+ * Free up profiling buffers.
+ */
+ {
+ struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
+
+ p1 = p0->pr_next;
+ p0->pr_next = NULL;
+ p0->pr_scale = 0;
+
+ for (; p1 != NULL; p1 = pn) {
+ pn = p1->pr_next;
+ kfree((vm_offset_t)p1, sizeof *p1);
+ }
+ }
+
+ /*
+ * Other substructures are freed from wait().
+ */
+ FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_SUBPROC);
+ p->p_stats = NULL;
+
+ FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SUBPROC);
+ p->p_sigacts = NULL;
+
+ if (--p->p_limit->p_refcnt == 0)
+ FREE_ZONE(p->p_limit, sizeof *p->p_limit, M_SUBPROC);
+ p->p_limit = NULL;
+
+ /*
+ * Finish up by terminating the task
+ * and halt this thread (only if a
+ * member of the task exiting).
+ */
+ p->task = TASK_NULL;
+
+ /*
+ * Notify parent that we're gone.
+ */
+ pp = p->p_pptr;
+ if (pp != initproc) {
+ pp->si_pid = p->p_pid;
+ pp->si_status = p->p_xstat;
+ pp->si_code = CLD_EXITED;
+ pp->si_uid = p->p_cred->p_ruid;
+ }
+ psignal(p->p_pptr, SIGCHLD);
+
+ /* mark as a zombie */
+ p->p_stat = SZOMB;
+
+ /* and now wakeup the parent */
+ wakeup((caddr_t)p->p_pptr);
+}