]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_exit.c
xnu-344.21.73.tar.gz
[apple/xnu.git] / bsd / kern / kern_exit.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
26 /*
27 * Copyright (c) 1982, 1986, 1989, 1991, 1993
28 * The Regents of the University of California. All rights reserved.
29 * (c) UNIX System Laboratories, Inc.
30 * All or some portions of this file are derived from material licensed
31 * to the University of California by American Telephone and Telegraph
32 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
33 * the permission of UNIX System Laboratories, Inc.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
64 */
65
66 #include <machine/reg.h>
67 #include <machine/psl.h>
68
69 #include "compat_43.h"
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/ioctl.h>
74 #include <sys/proc.h>
75 #include <sys/tty.h>
76 #include <sys/time.h>
77 #include <sys/resource.h>
78 #include <sys/kernel.h>
79 #include <sys/buf.h>
80 #include <sys/wait.h>
81 #include <sys/file.h>
82 #include <sys/vnode.h>
83 #include <sys/syslog.h>
84 #include <sys/malloc.h>
85 #include <sys/resourcevar.h>
86 #include <sys/ptrace.h>
87 #include <sys/user.h>
88
89 #include <mach/mach_types.h>
90 #include <kern/thread.h>
91 #include <kern/thread_act.h>
92 #include <kern/sched_prim.h>
93 #include <kern/assert.h>
94 #if KTRACE
95 #include <sys/ktrace.h>
96 #endif
97
98 extern char init_task_failure_data[];
99 int exit1 __P((struct proc *, int, int *));
100
101 /*
102 * exit --
103 * Death of process.
104 */
105 struct exit_args {
106 int rval;
107 };
108 void
109 exit(p, uap, retval)
110 struct proc *p;
111 struct exit_args *uap;
112 int *retval;
113 {
114 exit1(p, W_EXITCODE(uap->rval, 0), retval);
115
116 /* drop funnel before we return */
117 thread_funnel_set(kernel_flock, FALSE);
118 thread_exception_return();
119 /* NOTREACHED */
120 while (TRUE)
121 thread_block(THREAD_CONTINUE_NULL);
122 /* NOTREACHED */
123 }
124
125 /*
126 * Exit: deallocate address space and other resources, change proc state
127 * to zombie, and unlink proc from allproc and parent's lists. Save exit
128 * status and rusage for wait(). Check for child processes and orphan them.
129 */
130 int
131 exit1(p, rv, retval)
132 register struct proc *p;
133 int rv;
134 int * retval;
135 {
136 register struct proc *q, *nq;
137 thread_t self = current_thread();
138 thread_act_t th_act_self = current_act();
139 struct task *task = p->task;
140 register int i,s;
141 struct uthread *ut;
142
143 /*
144 * If a thread in this task has already
145 * called exit(), then halt any others
146 * right here.
147 */
148
149 ut = get_bsdthread_info(th_act_self);
150 if (ut->uu_flag & P_VFORK) {
151 (void)vfork_exit(p, rv);
152 vfork_return(th_act_self, p->p_pptr, p , retval);
153 unix_syscall_return(0);
154 /* NOT REACHED */
155 }
156 signal_lock(p);
157 while (p->exit_thread != self) {
158 if (sig_try_locked(p) <= 0) {
159 if (get_threadtask(th_act_self) != task) {
160 signal_unlock(p);
161 return(0);
162 }
163 signal_unlock(p);
164 thread_terminate(th_act_self);
165 thread_funnel_set(kernel_flock, FALSE);
166 thread_exception_return();
167 /* NOTREACHED */
168 }
169 sig_lock_to_exit(p);
170 }
171 signal_unlock(p);
172 if (p->p_pid == 1) {
173 printf("pid 1 exited (signal %d, exit %d)",
174 WTERMSIG(rv), WEXITSTATUS(rv));
175 panic("init died\nState at Last Exception:\n\n%s",
176 init_task_failure_data);
177 }
178
179 s = splsched();
180 p->p_flag |= P_WEXIT;
181 splx(s);
182 (void)proc_prepareexit(p);
183 p->p_xstat = rv;
184
185 /* task terminate will call proc_terminate and that cleans it up */
186 task_terminate_internal(task);
187
188 /*
189 * we come back and returns to AST which
190 * should cleanup the rest
191 */
192 #if 0
193 if (task == current_task()) {
194 thread_exception_return();
195 /*NOTREACHED*/
196 }
197
198 while (task == current_task()) {
199 thread_terminate_self();
200 /*NOTREACHED*/
201 }
202 #endif
203 return(0);
204 }
205
206 void
207 proc_prepareexit(struct proc *p)
208 {
209 int s;
210 struct uthread *ut;
211 thread_t self = current_thread();
212 thread_act_t th_act_self = current_act();
213
214 /*
215 * Remove proc from allproc queue and from pidhash chain.
216 * Need to do this before we do anything that can block.
217 * Not doing causes things like mount() find this on allproc
218 * in partially cleaned state.
219 */
220 LIST_REMOVE(p, p_list);
221 LIST_REMOVE(p, p_hash);
222
223 #ifdef PGINPROF
224 vmsizmon();
225 #endif
226 /*
227 * If parent is waiting for us to exit or exec,
228 * P_PPWAIT is set; we will wakeup the parent below.
229 */
230 p->p_flag &= ~(P_TRACED | P_PPWAIT);
231 p->p_sigignore = ~0;
232 p->p_siglist = 0;
233 ut = get_bsdthread_info(th_act_self);
234 ut->uu_siglist = 0;
235 untimeout(realitexpire, (caddr_t)p->p_pid);
236 }
237
238 void
239 proc_exit(struct proc *p)
240 {
241 register struct proc *q, *nq, *pp;
242 thread_t self = current_thread();
243 thread_act_t th_act_self = current_act();
244 struct task *task = p->task;
245 register int i,s;
246 boolean_t funnel_state;
247
248 /* This can happen if thread_terminate of the single thread
249 * process
250 */
251
252 funnel_state = thread_funnel_set(kernel_flock, TRUE);
253 if( !(p->p_flag & P_WEXIT)) {
254 s = splsched();
255 p->p_flag |= P_WEXIT;
256 splx(s);
257 proc_prepareexit(p);
258 }
259
260 MALLOC_ZONE(p->p_ru, struct rusage *,
261 sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
262
263 /*
264 * Close open files and release open-file table.
265 * This may block!
266 */
267 fdfree(p);
268
269 /* Close ref SYSV Shared memory*/
270 if (p->vm_shm)
271 shmexit(p);
272 /* Release SYSV semaphores */
273 semexit(p);
274
275 if (SESS_LEADER(p)) {
276 register struct session *sp = p->p_session;
277
278 if (sp->s_ttyvp) {
279 struct vnode *ttyvp;
280
281 /*
282 * Controlling process.
283 * Signal foreground pgrp,
284 * drain controlling terminal
285 * and revoke access to controlling terminal.
286 */
287 if (sp->s_ttyp->t_session == sp) {
288 if (sp->s_ttyp->t_pgrp)
289 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
290 (void) ttywait(sp->s_ttyp);
291 /*
292 * The tty could have been revoked
293 * if we blocked.
294 */
295 if (sp->s_ttyvp)
296 VOP_REVOKE(sp->s_ttyvp, REVOKEALL);
297 }
298 ttyvp = sp->s_ttyvp;
299 sp->s_ttyvp = NULL;
300 if (ttyvp)
301 vrele(ttyvp);
302 /*
303 * s_ttyp is not zero'd; we use this to indicate
304 * that the session once had a controlling terminal.
305 * (for logging and informational purposes)
306 */
307 }
308 sp->s_leader = NULL;
309 }
310
311 fixjobc(p, p->p_pgrp, 0);
312 p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
313 (void)acct_process(p);
314
315 #if KTRACE
316 /*
317 * release trace file
318 */
319 p->p_traceflag = 0; /* don't trace the vrele() */
320 if (p->p_tracep) {
321 struct vnode *tvp = p->p_tracep;
322 p->p_tracep = NULL;
323 vrele(tvp);
324 }
325 #endif
326
327 q = p->p_children.lh_first;
328 if (q) /* only need this if any child is S_ZOMB */
329 wakeup((caddr_t) initproc);
330 for (; q != 0; q = nq) {
331 nq = q->p_sibling.le_next;
332 proc_reparent(q, initproc);
333 /*
334 * Traced processes are killed
335 * since their existence means someone is messing up.
336 */
337 if (q->p_flag & P_TRACED) {
338 q->p_flag &= ~P_TRACED;
339 if (q->sigwait_thread) {
340 thread_t sig_shuttle;
341
342 sig_shuttle = (thread_t)getshuttle_thread((thread_act_t)q->sigwait_thread);
343 /*
344 * The sigwait_thread could be stopped at a
345 * breakpoint. Wake it up to kill.
346 * Need to do this as it could be a thread which is not
347 * the first thread in the task. So any attempts to kill
348 * the process would result into a deadlock on q->sigwait.
349 */
350 thread_resume((thread_act_t)q->sigwait_thread);
351 clear_wait(sig_shuttle, THREAD_INTERRUPTED);
352 threadsignal((thread_act_t)q->sigwait_thread, SIGKILL, 0);
353 }
354 psignal(q, SIGKILL);
355 }
356 }
357
358 /*
359 * Save exit status and final rusage info, adding in child rusage
360 * info and self times.
361 */
362 *p->p_ru = p->p_stats->p_ru;
363
364 timerclear(&p->p_ru->ru_utime);
365 timerclear(&p->p_ru->ru_stime);
366
367 if (task) {
368 task_basic_info_data_t tinfo;
369 task_thread_times_info_data_t ttimesinfo;
370 int task_info_stuff, task_ttimes_stuff;
371 struct timeval ut,st;
372
373 task_info_stuff = TASK_BASIC_INFO_COUNT;
374 task_info(task, TASK_BASIC_INFO,
375 &tinfo, &task_info_stuff);
376 p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds;
377 p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds;
378 p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds;
379 p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds;
380
381 task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT;
382 task_info(task, TASK_THREAD_TIMES_INFO,
383 &ttimesinfo, &task_ttimes_stuff);
384
385 ut.tv_sec = ttimesinfo.user_time.seconds;
386 ut.tv_usec = ttimesinfo.user_time.microseconds;
387 st.tv_sec = ttimesinfo.system_time.seconds;
388 st.tv_usec = ttimesinfo.system_time.microseconds;
389 timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime);
390 timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
391 }
392
393 ruadd(p->p_ru, &p->p_stats->p_cru);
394
395 /*
396 * Free up profiling buffers.
397 */
398 {
399 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
400
401 p1 = p0->pr_next;
402 p0->pr_next = NULL;
403 p0->pr_scale = 0;
404
405 for (; p1 != NULL; p1 = pn) {
406 pn = p1->pr_next;
407 kfree((vm_offset_t)p1, sizeof *p1);
408 }
409 }
410
411 /*
412 * Other substructures are freed from wait().
413 */
414 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_SUBPROC);
415 p->p_stats = NULL;
416
417 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SUBPROC);
418 p->p_sigacts = NULL;
419
420 if (--p->p_limit->p_refcnt == 0)
421 FREE_ZONE(p->p_limit, sizeof *p->p_limit, M_SUBPROC);
422 p->p_limit = NULL;
423
424 /*
425 * Finish up by terminating the task
426 * and halt this thread (only if a
427 * member of the task exiting).
428 */
429 p->task = TASK_NULL;
430 //task->proc = NULL;
431 set_bsdtask_info(task, NULL);
432
433 /*
434 * Notify parent that we're gone.
435 */
436 if (p->p_pptr->p_flag & P_NOCLDWAIT) {
437 struct proc * pp = p->p_pptr;
438
439 proc_reparent(p, initproc);
440 /* If there are no more children wakeup parent */
441 if (LIST_EMPTY(&pp->p_children))
442 wakeup((caddr_t)pp);
443 }
444 /* should be fine as parent proc would be initproc */
445 pp = p->p_pptr;
446 if (pp != initproc) {
447 pp->si_pid = p->p_pid;
448 pp->si_status = p->p_xstat;
449 pp->si_code = CLD_EXITED;
450 pp->si_uid = p->p_cred->p_ruid;
451 }
452 psignal(pp, SIGCHLD);
453
454
455 /* Place onto zombproc. */
456 LIST_INSERT_HEAD(&zombproc, p, p_list);
457 p->p_stat = SZOMB;
458
459 /* and now wakeup the parent */
460 wakeup((caddr_t)p->p_pptr);
461
462 (void) thread_funnel_set(kernel_flock, funnel_state);
463 }
464
465
466 struct wait4_args {
467 int pid;
468 int *status;
469 int options;
470 struct rusage *rusage;
471 };
472
473 #if COMPAT_43
474 int
475 owait(p, uap, retval)
476 struct proc *p;
477 void *uap;
478 int *retval;
479 {
480 struct wait4_args *a;
481
482 a = (struct wait4_args *)get_bsduthreadarg(current_act());
483
484 a->options = 0;
485 a->rusage = NULL;
486 a->pid = WAIT_ANY;
487 a->status = NULL;
488 return (wait1(p, a, retval, 1));
489 }
490
491 int
492 wait4(p, uap, retval)
493 struct proc *p;
494 struct wait4_args *uap;
495 int *retval;
496 {
497 return (wait1(p, uap, retval, 0));
498 }
499
500 struct owait3_args {
501 int *status;
502 int options;
503 struct rusage *rusage;
504 };
505
506 int
507 owait3(p, uap, retval)
508 struct proc *p;
509 struct owait3_args *uap;
510 int *retval;
511 {
512 struct wait4_args *a;
513
514 a = (struct wait4_args *)get_bsduthreadarg(current_act);
515
516 a->rusage = uap->rusage;
517 a->options = uap->options;
518 a->status = uap->status;
519 a->pid = WAIT_ANY;
520
521 return (wait1(p, a, retval, 1));
522 }
523
524 #else
525 #define wait1 wait4
526 #endif
527
528 int
529 wait1continue(result)
530 {
531 void *vt;
532 thread_act_t thread;
533 int *retval;
534 struct proc *p;
535
536 if (result)
537 return(result);
538
539 p = current_proc();
540 thread = current_act();
541 vt = (void *)get_bsduthreadarg(thread);
542 retval = (int *)get_bsduthreadrval(thread);
543 wait1((struct proc *)p, (struct wait4_args *)vt, retval, 0);
544 }
545
546 int
547 wait1(q, uap, retval, compat)
548 register struct proc *q;
549 register struct wait4_args *uap;
550 register_t *retval;
551 #if COMPAT_43
552 int compat;
553 #endif
554 {
555 register int nfound;
556 register struct proc *p, *t;
557 int status, error;
558 struct vnode *tvp;
559
560 retry:
561 if (uap->pid == 0)
562 uap->pid = -q->p_pgid;
563
564 loop:
565 nfound = 0;
566 for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) {
567 if (uap->pid != WAIT_ANY &&
568 p->p_pid != uap->pid &&
569 p->p_pgid != -(uap->pid))
570 continue;
571 nfound++;
572 if (p->p_flag & P_WAITING) {
573 (void)tsleep(&p->p_stat, PWAIT, "waitcoll", 0);
574 goto loop;
575 }
576 p->p_flag |= P_WAITING; /* only allow single thread to wait() */
577
578 if (p->p_stat == SZOMB) {
579 retval[0] = p->p_pid;
580 #if COMPAT_43
581 if (compat)
582 retval[1] = p->p_xstat;
583 else
584 #endif
585 if (uap->status) {
586 status = p->p_xstat; /* convert to int */
587 if (error = copyout((caddr_t)&status,
588 (caddr_t)uap->status,
589 sizeof(status))) {
590 p->p_flag &= ~P_WAITING;
591 wakeup(&p->p_stat);
592 return (error);
593 }
594 }
595 if (uap->rusage &&
596 (error = copyout((caddr_t)p->p_ru,
597 (caddr_t)uap->rusage,
598 sizeof (struct rusage)))) {
599 p->p_flag &= ~P_WAITING;
600 wakeup(&p->p_stat);
601 return (error);
602 }
603 /*
604 * If we got the child via a ptrace 'attach',
605 * we need to give it back to the old parent.
606 */
607 if (p->p_oppid && (t = pfind(p->p_oppid))) {
608 p->p_oppid = 0;
609 proc_reparent(p, t);
610 if (t != initproc) {
611 t->si_pid = p->p_pid;
612 t->si_status = p->p_xstat;
613 t->si_code = CLD_CONTINUED;
614 t->si_uid = p->p_cred->p_ruid;
615 }
616 psignal(t, SIGCHLD);
617 wakeup((caddr_t)t);
618 p->p_flag &= ~P_WAITING;
619 wakeup(&p->p_stat);
620 return (0);
621 }
622 p->p_xstat = 0;
623 if (p->p_ru) {
624 ruadd(&q->p_stats->p_cru, p->p_ru);
625 FREE_ZONE(p->p_ru, sizeof *p->p_ru, M_ZOMBIE);
626 p->p_ru = NULL;
627 } else {
628 printf("Warning : lost p_ru for %s\n", p->p_comm);
629 }
630
631 /*
632 * Decrement the count of procs running with this uid.
633 */
634 (void)chgproccnt(p->p_cred->p_ruid, -1);
635
636 /*
637 * Free up credentials.
638 */
639 if (--p->p_cred->p_refcnt == 0) {
640 struct ucred *ucr = p->p_ucred;
641 struct pcred *pcr;
642
643 if (ucr != NOCRED) {
644 p->p_ucred = NOCRED;
645 crfree(ucr);
646 }
647 pcr = p->p_cred;
648 p->p_cred = NULL;
649 FREE_ZONE(pcr, sizeof *pcr, M_SUBPROC);
650 }
651
652 /*
653 * Release reference to text vnode
654 */
655 tvp = p->p_textvp;
656 p->p_textvp = NULL;
657 if (tvp)
658 vrele(tvp);
659
660 /*
661 * Finally finished with old proc entry.
662 * Unlink it from its process group and free it.
663 */
664 leavepgrp(p);
665 LIST_REMOVE(p, p_list); /* off zombproc */
666 LIST_REMOVE(p, p_sibling);
667 p->p_flag &= ~P_WAITING;
668 FREE_ZONE(p, sizeof *p, M_PROC);
669 nprocs--;
670 wakeup(&p->p_stat);
671 return (0);
672 }
673 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
674 (p->p_flag & P_TRACED || uap->options & WUNTRACED)) {
675 p->p_flag |= P_WAITED;
676 retval[0] = p->p_pid;
677 #if COMPAT_43
678 if (compat) {
679 retval[1] = W_STOPCODE(p->p_xstat);
680 error = 0;
681 } else
682 #endif
683 if (uap->status) {
684 status = W_STOPCODE(p->p_xstat);
685 error = copyout((caddr_t)&status,
686 (caddr_t)uap->status,
687 sizeof(status));
688 } else
689 error = 0;
690 p->p_flag &= ~P_WAITING;
691 wakeup(&p->p_stat);
692 return (error);
693 }
694 p->p_flag &= ~P_WAITING;
695 wakeup(&p->p_stat);
696 }
697 if (nfound == 0)
698 return (ECHILD);
699
700 if (uap->options & WNOHANG) {
701 retval[0] = 0;
702 return (0);
703 }
704
705 if (error = tsleep0((caddr_t)q, PWAIT | PCATCH, "wait", 0, wait1continue))
706 return (error);
707
708 goto loop;
709 }
710
711 /*
712 * make process 'parent' the new parent of process 'child'.
713 */
714 void
715 proc_reparent(child, parent)
716 register struct proc *child;
717 register struct proc *parent;
718 {
719
720 if (child->p_pptr == parent)
721 return;
722
723 LIST_REMOVE(child, p_sibling);
724 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
725 child->p_pptr = parent;
726 }
727
728 /*
729 * Make the current process an "init" process, meaning
730 * that it doesn't have a parent, and that it won't be
731 * gunned down by kill(-1, 0).
732 */
733 kern_return_t
734 init_process(void)
735 {
736 register struct proc *p = current_proc();
737
738 if (suser(p->p_ucred, &p->p_acflag))
739 return(KERN_NO_ACCESS);
740
741 if (p->p_pid != 1 && p->p_pgid != p->p_pid)
742 enterpgrp(p, p->p_pid, 0);
743 p->p_flag |= P_SYSTEM;
744
745 /*
746 * Take us out of the sibling chain, and
747 * out of our parent's child chain.
748 */
749 LIST_REMOVE(p, p_sibling);
750 p->p_sibling.le_prev = NULL;
751 p->p_sibling.le_next = NULL;
752 p->p_pptr = kernproc;
753
754 return(KERN_SUCCESS);
755 }
756
757 void
758 process_terminate_self(void)
759 {
760 struct proc *p = current_proc();
761
762 if (p != NULL) {
763 exit1(p, W_EXITCODE(0, SIGKILL), (int *)NULL);
764 /*NOTREACHED*/
765 }
766 }
767
768 /*
769 * Exit: deallocate address space and other resources, change proc state
770 * to zombie, and unlink proc from allproc and parent's lists. Save exit
771 * status and rusage for wait(). Check for child processes and orphan them.
772 */
773
774 void
775 vfork_exit(p, rv)
776 struct proc *p;
777 int rv;
778 {
779 register struct proc *q, *nq;
780 thread_t self = current_thread();
781 thread_act_t th_act_self = current_act();
782 struct task *task = p->task;
783 register int i,s;
784 struct uthread *ut;
785
786 /*
787 * If a thread in this task has already
788 * called exit(), then halt any others
789 * right here.
790 */
791
792 ut = get_bsdthread_info(th_act_self);
793 #ifdef FIXME
794 signal_lock(p);
795 while (p->exit_thread != self) {
796 if (sig_try_locked(p) <= 0) {
797 if (get_threadtask(th_act_self) != task) {
798 signal_unlock(p);
799 return;
800 }
801 signal_unlock(p);
802 thread_terminate(th_act_self);
803 thread_funnel_set(kernel_flock, FALSE);
804 thread_exception_return();
805 /* NOTREACHED */
806 }
807 sig_lock_to_exit(p);
808 }
809 signal_unlock(p);
810 if (p->p_pid == 1) {
811 printf("pid 1 exited (signal %d, exit %d)",
812 WTERMSIG(rv), WEXITSTATUS(rv));
813 panic("init died\nState at Last Exception:\n\n%s", init_task_failure_data);
814 }
815 #endif /* FIXME */
816
817 s = splsched();
818 p->p_flag |= P_WEXIT;
819 splx(s);
820 /*
821 * Remove proc from allproc queue and from pidhash chain.
822 * Need to do this before we do anything that can block.
823 * Not doing causes things like mount() find this on allproc
824 * in partially cleaned state.
825 */
826 LIST_REMOVE(p, p_list);
827 LIST_REMOVE(p, p_hash);
828 /*
829 * If parent is waiting for us to exit or exec,
830 * P_PPWAIT is set; we will wakeup the parent below.
831 */
832 p->p_flag &= ~(P_TRACED | P_PPWAIT);
833 p->p_sigignore = ~0;
834 p->p_siglist = 0;
835
836 ut->uu_siglist = 0;
837 untimeout(realitexpire, (caddr_t)p->p_pid);
838
839 p->p_xstat = rv;
840
841 (void)vproc_exit(p);
842 }
843
844 void
845 vproc_exit(struct proc *p)
846 {
847 register struct proc *q, *nq, *pp;
848 thread_t self = current_thread();
849 thread_act_t th_act_self = current_act();
850 struct task *task = p->task;
851 register int i,s;
852 boolean_t funnel_state;
853
854 MALLOC_ZONE(p->p_ru, struct rusage *,
855 sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
856
857 /*
858 * Close open files and release open-file table.
859 * This may block!
860 */
861 fdfree(p);
862
863 if (SESS_LEADER(p)) {
864 register struct session *sp = p->p_session;
865
866 if (sp->s_ttyvp) {
867 struct vnode *ttyvp;
868
869 /*
870 * Controlling process.
871 * Signal foreground pgrp,
872 * drain controlling terminal
873 * and revoke access to controlling terminal.
874 */
875 if (sp->s_ttyp->t_session == sp) {
876 if (sp->s_ttyp->t_pgrp)
877 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
878 (void) ttywait(sp->s_ttyp);
879 /*
880 * The tty could have been revoked
881 * if we blocked.
882 */
883 if (sp->s_ttyvp)
884 VOP_REVOKE(sp->s_ttyvp, REVOKEALL);
885 }
886 ttyvp = sp->s_ttyvp;
887 sp->s_ttyvp = NULL;
888 if (ttyvp)
889 vrele(ttyvp);
890 /*
891 * s_ttyp is not zero'd; we use this to indicate
892 * that the session once had a controlling terminal.
893 * (for logging and informational purposes)
894 */
895 }
896 sp->s_leader = NULL;
897 }
898
899 fixjobc(p, p->p_pgrp, 0);
900 p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
901
902 #if KTRACE
903 /*
904 * release trace file
905 */
906 p->p_traceflag = 0; /* don't trace the vrele() */
907 if (p->p_tracep) {
908 struct vnode *tvp = p->p_tracep;
909 p->p_tracep = NULL;
910 vrele(tvp);
911 }
912 #endif
913
914 q = p->p_children.lh_first;
915 if (q) /* only need this if any child is S_ZOMB */
916 wakeup((caddr_t) initproc);
917 for (; q != 0; q = nq) {
918 nq = q->p_sibling.le_next;
919 proc_reparent(q, initproc);
920 /*
921 * Traced processes are killed
922 * since their existence means someone is messing up.
923 */
924 if (q->p_flag & P_TRACED) {
925 q->p_flag &= ~P_TRACED;
926 if (q->sigwait_thread) {
927 thread_t sig_shuttle;
928
929 sig_shuttle = (thread_t) getshuttle_thread((thread_act_t)q->sigwait_thread);
930 /*
931 * The sigwait_thread could be stopped at a
932 * breakpoint. Wake it up to kill.
933 * Need to do this as it could be a thread which is not
934 * the first thread in the task. So any attempts to kill
935 * the process would result into a deadlock on q->sigwait.
936 */
937 thread_resume((thread_act_t)q->sigwait_thread);
938 clear_wait(sig_shuttle, THREAD_INTERRUPTED);
939 threadsignal((thread_act_t)q->sigwait_thread, SIGKILL, 0);
940 }
941 psignal(q, SIGKILL);
942 }
943 }
944
945 /*
946 * Save exit status and final rusage info, adding in child rusage
947 * info and self times.
948 */
949 *p->p_ru = p->p_stats->p_ru;
950
951 timerclear(&p->p_ru->ru_utime);
952 timerclear(&p->p_ru->ru_stime);
953
954 #ifdef FIXME
955 if (task) {
956 task_basic_info_data_t tinfo;
957 task_thread_times_info_data_t ttimesinfo;
958 int task_info_stuff, task_ttimes_stuff;
959 struct timeval ut,st;
960
961 task_info_stuff = TASK_BASIC_INFO_COUNT;
962 task_info(task, TASK_BASIC_INFO,
963 &tinfo, &task_info_stuff);
964 p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds;
965 p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds;
966 p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds;
967 p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds;
968
969 task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT;
970 task_info(task, TASK_THREAD_TIMES_INFO,
971 &ttimesinfo, &task_ttimes_stuff);
972
973 ut.tv_sec = ttimesinfo.user_time.seconds;
974 ut.tv_usec = ttimesinfo.user_time.microseconds;
975 st.tv_sec = ttimesinfo.system_time.seconds;
976 st.tv_usec = ttimesinfo.system_time.microseconds;
977 timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime);
978 timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
979 }
980 #endif /* FIXME */
981
982 ruadd(p->p_ru, &p->p_stats->p_cru);
983
984 /*
985 * Free up profiling buffers.
986 */
987 {
988 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
989
990 p1 = p0->pr_next;
991 p0->pr_next = NULL;
992 p0->pr_scale = 0;
993
994 for (; p1 != NULL; p1 = pn) {
995 pn = p1->pr_next;
996 kfree((vm_offset_t)p1, sizeof *p1);
997 }
998 }
999
1000 /*
1001 * Other substructures are freed from wait().
1002 */
1003 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_SUBPROC);
1004 p->p_stats = NULL;
1005
1006 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SUBPROC);
1007 p->p_sigacts = NULL;
1008
1009 if (--p->p_limit->p_refcnt == 0)
1010 FREE_ZONE(p->p_limit, sizeof *p->p_limit, M_SUBPROC);
1011 p->p_limit = NULL;
1012
1013 /*
1014 * Finish up by terminating the task
1015 * and halt this thread (only if a
1016 * member of the task exiting).
1017 */
1018 p->task = TASK_NULL;
1019
1020 /*
1021 * Notify parent that we're gone.
1022 */
1023 pp = p->p_pptr;
1024 if (pp != initproc) {
1025 pp->si_pid = p->p_pid;
1026 pp->si_status = p->p_xstat;
1027 pp->si_code = CLD_EXITED;
1028 pp->si_uid = p->p_cred->p_ruid;
1029 }
1030 psignal(p->p_pptr, SIGCHLD);
1031
1032 /* Place onto zombproc. */
1033 LIST_INSERT_HEAD(&zombproc, p, p_list);
1034 p->p_stat = SZOMB;
1035
1036 /* and now wakeup the parent */
1037 wakeup((caddr_t)p->p_pptr);
1038 }