2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1982, 1986, 1989, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
63 #include <machine/reg.h>
64 #include <machine/psl.h>
66 #include "compat_43.h"
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/ioctl.h>
74 #include <sys/resource.h>
75 #include <sys/kernel.h>
79 #include <sys/vnode.h>
80 #include <sys/syslog.h>
81 #include <sys/malloc.h>
82 #include <sys/resourcevar.h>
83 #include <sys/ptrace.h>
86 #include <mach/mach_types.h>
87 #include <kern/thread.h>
88 #include <kern/thread_act.h>
89 #include <kern/assert.h>
91 extern char init_task_failure_data
[];
92 int exit1
__P((struct proc
*, int, int *));
104 struct exit_args
*uap
;
107 exit1(p
, W_EXITCODE(uap
->rval
, 0), retval
);
109 /* drop funnel befewo we return */
110 thread_funnel_set(kernel_flock
, FALSE
);
111 thread_exception_return();
119 * Exit: deallocate address space and other resources, change proc state
120 * to zombie, and unlink proc from allproc and parent's lists. Save exit
121 * status and rusage for wait(). Check for child processes and orphan them.
125 register struct proc
*p
;
129 register struct proc
*q
, *nq
;
130 thread_t self
= current_thread();
131 thread_act_t th_act_self
= current_act();
132 struct task
*task
= p
->task
;
137 * If a thread in this task has already
138 * called exit(), then halt any others
142 ut
= get_bsdthread_info(th_act_self
);
143 if (ut
->uu_flag
& P_VFORK
) {
145 vfork_return(th_act_self
, p
->p_pptr
, p
, retval
);
146 unix_syscall_return(0);
150 while (p
->exit_thread
!= self
) {
151 if (sig_try_locked(p
) <= 0) {
152 if (get_threadtask(th_act_self
) != task
) {
157 thread_terminate(th_act_self
);
158 thread_funnel_set(kernel_flock
, FALSE
);
159 thread_exception_return();
166 printf("pid 1 exited (signal %d, exit %d)",
167 WTERMSIG(rv
), WEXITSTATUS(rv
));
168 panic("init died\nState at Last Exception:\n\n%s",
169 init_task_failure_data
);
173 p
->p_flag
|= P_WEXIT
;
178 /* task terminate will call proc_terminate and that cleans it up */
179 task_terminate_internal(task
);
182 * we come back and returns to AST which
183 * should cleanup the rest
186 if (task
== current_task()) {
187 thread_exception_return();
191 while (task
== current_task()) {
192 thread_terminate_self();
200 proc_prepareexit(struct proc
*p
)
204 thread_t self
= current_thread();
205 thread_act_t th_act_self
= current_act();
209 * Remove proc from allproc queue and from pidhash chain.
210 * Need to do this before we do anything that can block.
211 * Not doing causes things like mount() find this on allproc
212 * in partially cleaned state.
214 LIST_REMOVE(p
, p_list
);
215 LIST_REMOVE(p
, p_hash
);
221 * If parent is waiting for us to exit or exec,
222 * P_PPWAIT is set; we will wakeup the parent below.
224 p
->p_flag
&= ~(P_TRACED
| P_PPWAIT
);
227 ut
= get_bsdthread_info(th_act_self
);
229 untimeout(realitexpire
, (caddr_t
)p
);
234 proc_exit(struct proc
*p
)
236 register struct proc
*q
, *nq
;
237 thread_t self
= current_thread();
238 thread_act_t th_act_self
= current_act();
239 struct task
*task
= p
->task
;
242 boolean_t funnel_state
;
244 /* This can happen if thread_terminate of the single thread
248 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
249 if( !(p
->p_flag
& P_WEXIT
)) {
251 p
->p_flag
|= P_WEXIT
;
256 MALLOC_ZONE(p
->p_ru
, struct rusage
*,
257 sizeof (*p
->p_ru
), M_ZOMBIE
, M_WAITOK
);
260 * Close open files and release open-file table.
265 /* Close ref SYSV Shared memory*/
269 if (SESS_LEADER(p
)) {
270 register struct session
*sp
= p
->p_session
;
274 * Controlling process.
275 * Signal foreground pgrp,
276 * drain controlling terminal
277 * and revoke access to controlling terminal.
279 if (sp
->s_ttyp
->t_session
== sp
) {
280 if (sp
->s_ttyp
->t_pgrp
)
281 pgsignal(sp
->s_ttyp
->t_pgrp
, SIGHUP
, 1);
282 (void) ttywait(sp
->s_ttyp
);
284 * The tty could have been revoked
288 VOP_REVOKE(sp
->s_ttyvp
, REVOKEALL
);
294 * s_ttyp is not zero'd; we use this to indicate
295 * that the session once had a controlling terminal.
296 * (for logging and informational purposes)
302 fixjobc(p
, p
->p_pgrp
, 0);
303 p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
= RLIM_INFINITY
;
308 p
->p_traceflag
= 0; /* don't trace the vrele() */
314 q
= p
->p_children
.lh_first
;
315 if (q
) /* only need this if any child is S_ZOMB */
316 wakeup((caddr_t
) initproc
);
317 for (; q
!= 0; q
= nq
) {
318 nq
= q
->p_sibling
.le_next
;
319 proc_reparent(q
, initproc
);
321 * Traced processes are killed
322 * since their existence means someone is messing up.
324 if (q
->p_flag
& P_TRACED
) {
325 q
->p_flag
&= ~P_TRACED
;
326 if (q
->sigwait_thread
) {
327 thread_t sig_shuttle
= getshuttle_thread(q
->sigwait_thread
);
329 * The sigwait_thread could be stopped at a
330 * breakpoint. Wake it up to kill.
331 * Need to do this as it could be a thread which is not
332 * the first thread in the task. So any attempts to kill
333 * the process would result into a deadlock on q->sigwait.
335 thread_resume((struct thread
*)q
->sigwait_thread
);
336 clear_wait(sig_shuttle
, THREAD_INTERRUPTED
);
337 threadsignal(q
->sigwait_thread
, SIGKILL
, 0);
345 * Save exit status and final rusage info, adding in child rusage
346 * info and self times.
348 *p
->p_ru
= p
->p_stats
->p_ru
;
350 timerclear(&p
->p_ru
->ru_utime
);
351 timerclear(&p
->p_ru
->ru_stime
);
354 task_basic_info_data_t tinfo
;
355 task_thread_times_info_data_t ttimesinfo
;
356 int task_info_stuff
, task_ttimes_stuff
;
357 struct timeval ut
,st
;
359 task_info_stuff
= TASK_BASIC_INFO_COUNT
;
360 task_info(task
, TASK_BASIC_INFO
,
361 &tinfo
, &task_info_stuff
);
362 p
->p_ru
->ru_utime
.tv_sec
= tinfo
.user_time
.seconds
;
363 p
->p_ru
->ru_utime
.tv_usec
= tinfo
.user_time
.microseconds
;
364 p
->p_ru
->ru_stime
.tv_sec
= tinfo
.system_time
.seconds
;
365 p
->p_ru
->ru_stime
.tv_usec
= tinfo
.system_time
.microseconds
;
367 task_ttimes_stuff
= TASK_THREAD_TIMES_INFO_COUNT
;
368 task_info(task
, TASK_THREAD_TIMES_INFO
,
369 &ttimesinfo
, &task_ttimes_stuff
);
371 ut
.tv_sec
= ttimesinfo
.user_time
.seconds
;
372 ut
.tv_usec
= ttimesinfo
.user_time
.microseconds
;
373 st
.tv_sec
= ttimesinfo
.system_time
.seconds
;
374 st
.tv_usec
= ttimesinfo
.system_time
.microseconds
;
375 timeradd(&ut
,&p
->p_ru
->ru_utime
,&p
->p_ru
->ru_utime
);
376 timeradd(&st
,&p
->p_ru
->ru_stime
,&p
->p_ru
->ru_stime
);
380 ruadd(p
->p_ru
, &p
->p_stats
->p_cru
);
383 * Free up profiling buffers.
386 struct uprof
*p0
= &p
->p_stats
->p_prof
, *p1
, *pn
;
392 for (; p1
!= NULL
; p1
= pn
) {
394 kfree((vm_offset_t
)p1
, sizeof *p1
);
399 * Other substructures are freed from wait().
401 FREE_ZONE(p
->p_stats
, sizeof *p
->p_stats
, M_SUBPROC
);
404 FREE_ZONE(p
->p_sigacts
, sizeof *p
->p_sigacts
, M_SUBPROC
);
407 if (--p
->p_limit
->p_refcnt
== 0)
408 FREE_ZONE(p
->p_limit
, sizeof *p
->p_limit
, M_SUBPROC
);
412 * Finish up by terminating the task
413 * and halt this thread (only if a
414 * member of the task exiting).
418 set_bsdtask_info(task
, NULL
);
421 * Notify parent that we're gone.
423 psignal(p
->p_pptr
, SIGCHLD
);
425 /* Place onto zombproc. */
426 LIST_INSERT_HEAD(&zombproc
, p
, p_list
);
429 /* and now wakeup the parent */
430 wakeup((caddr_t
)p
->p_pptr
);
432 (void) thread_funnel_set(kernel_flock
, funnel_state
);
440 struct rusage
*rusage
;
445 owait(p
, uap
, retval
)
450 struct wait4_args
*a
;
452 a
= (struct wait4_args
*)get_bsduthreadarg(current_act());
458 return (wait1(p
, a
, retval
, 1));
462 wait4(p
, uap
, retval
)
464 struct wait4_args
*uap
;
468 return (wait1(p
, uap
, retval
, 0));
474 struct rusage
*rusage
;
478 owait3(p
, uap
, retval
)
480 struct owait3_args
*uap
;
483 struct wait4_args
*a
;
485 a
= (struct wait4_args
*)get_bsduthreadarg(current_act
);
487 a
->rusage
= uap
->rusage
;
488 a
->options
= uap
->options
;
489 a
->status
= uap
->status
;
492 return (wait1(p
, a
, retval
, 1));
500 wait1continue(result
)
509 p
->p_flag
&= ~P_WAITING
;
515 thread
= current_act();
516 ut
= get_bsdthread_info(thread
);
517 vt
= get_bsduthreadarg(thread
);
518 retval
= get_bsduthreadrval(thread
);
519 wait1((struct proc
*)p
, (struct wait4_args
*)vt
, retval
, 0);
523 wait1(q
, uap
, retval
, compat
)
524 register struct proc
*q
;
525 register struct wait4_args
*uap
;
532 register struct proc
*p
, *t
;
537 /* since we are funneled we don't need to do this atomically, yet */
538 if (q
->p_flag
& P_WAITING
) {
541 q
->p_flag
|= P_WAITING
; /* only allow single thread to wait() */
545 uap
->pid
= -q
->p_pgid
;
549 for (p
= q
->p_children
.lh_first
; p
!= 0; p
= p
->p_sibling
.le_next
) {
550 if (uap
->pid
!= WAIT_ANY
&&
551 p
->p_pid
!= uap
->pid
&&
552 p
->p_pgid
!= -(uap
->pid
))
555 if (p
->p_stat
== SZOMB
) {
556 retval
[0] = p
->p_pid
;
559 retval
[1] = p
->p_xstat
;
563 status
= p
->p_xstat
; /* convert to int */
564 if (error
= copyout((caddr_t
)&status
,
565 (caddr_t
)uap
->status
,
567 q
->p_flag
&= ~P_WAITING
;
572 (error
= copyout((caddr_t
)p
->p_ru
,
573 (caddr_t
)uap
->rusage
,
574 sizeof (struct rusage
)))) {
575 q
->p_flag
&= ~P_WAITING
;
579 * If we got the child via a ptrace 'attach',
580 * we need to give it back to the old parent.
582 if (p
->p_oppid
&& (t
= pfind(p
->p_oppid
))) {
587 q
->p_flag
&= ~P_WAITING
;
592 ruadd(&q
->p_stats
->p_cru
, p
->p_ru
);
593 FREE_ZONE(p
->p_ru
, sizeof *p
->p_ru
, M_ZOMBIE
);
596 printf("Warning : lost p_ru for %s\n", p
->p_comm
);
600 * Decrement the count of procs running with this uid.
602 (void)chgproccnt(p
->p_cred
->p_ruid
, -1);
605 * Free up credentials.
607 if (--p
->p_cred
->p_refcnt
== 0) {
608 struct ucred
*ucr
= p
->p_ucred
;
617 FREE_ZONE(pcr
, sizeof *pcr
, M_SUBPROC
);
621 * Release reference to text vnode
627 * Finally finished with old proc entry.
628 * Unlink it from its process group and free it.
631 LIST_REMOVE(p
, p_list
); /* off zombproc */
632 LIST_REMOVE(p
, p_sibling
);
633 FREE_ZONE(p
, sizeof *p
, M_PROC
);
635 q
->p_flag
&= ~P_WAITING
;
638 if (p
->p_stat
== SSTOP
&& (p
->p_flag
& P_WAITED
) == 0 &&
639 (p
->p_flag
& P_TRACED
|| uap
->options
& WUNTRACED
)) {
640 p
->p_flag
|= P_WAITED
;
641 retval
[0] = p
->p_pid
;
644 retval
[1] = W_STOPCODE(p
->p_xstat
);
649 status
= W_STOPCODE(p
->p_xstat
);
650 error
= copyout((caddr_t
)&status
,
651 (caddr_t
)uap
->status
,
655 q
->p_flag
&= ~P_WAITING
;
660 q
->p_flag
&= ~P_WAITING
;
663 if (uap
->options
& WNOHANG
) {
665 q
->p_flag
&= ~P_WAITING
;
669 if (error
= tsleep0((caddr_t
)q
, PWAIT
| PCATCH
, "wait", 0, wait1continue
)) {
670 q
->p_flag
&= ~P_WAITING
;
677 * make process 'parent' the new parent of process 'child'.
680 proc_reparent(child
, parent
)
681 register struct proc
*child
;
682 register struct proc
*parent
;
685 if (child
->p_pptr
== parent
)
688 LIST_REMOVE(child
, p_sibling
);
689 LIST_INSERT_HEAD(&parent
->p_children
, child
, p_sibling
);
690 child
->p_pptr
= parent
;
696 * Make the current process an "init" process, meaning
697 * that it doesn't have a parent, and that it won't be
698 * gunned down by kill(-1, 0).
701 register struct proc
*p
= current_proc();
703 if (suser(p
->p_ucred
, &p
->p_acflag
))
704 return(KERN_NO_ACCESS
);
706 if (p
->p_pid
!= 1 && p
->p_pgid
!= p
->p_pid
)
707 enterpgrp(p
, p
->p_pid
, 0);
708 p
->p_flag
|= P_SYSTEM
;
711 * Take us out of the sibling chain, and
712 * out of our parent's child chain.
714 LIST_REMOVE(p
, p_sibling
);
715 p
->p_sibling
.le_prev
= NULL
;
716 p
->p_sibling
.le_next
= NULL
;
717 p
->p_pptr
= kernproc
;
719 return(KERN_SUCCESS
);
723 process_terminate_self(void)
725 struct proc
*p
= current_proc();
728 exit1(p
, W_EXITCODE(0, SIGKILL
), (int *)NULL
);
733 * Exit: deallocate address space and other resources, change proc state
734 * to zombie, and unlink proc from allproc and parent's lists. Save exit
735 * status and rusage for wait(). Check for child processes and orphan them.
740 register struct proc
*p
;
743 register struct proc
*q
, *nq
;
744 thread_t self
= current_thread();
745 thread_act_t th_act_self
= current_act();
746 struct task
*task
= p
->task
;
751 * If a thread in this task has already
752 * called exit(), then halt any others
756 ut
= get_bsdthread_info(th_act_self
);
759 while (p
->exit_thread
!= self
) {
760 if (sig_try_locked(p
) <= 0) {
761 if (get_threadtask(th_act_self
) != task
) {
766 thread_terminate(th_act_self
);
767 thread_funnel_set(kernel_flock
, FALSE
);
768 thread_exception_return();
775 printf("pid 1 exited (signal %d, exit %d)",
776 WTERMSIG(rv
), WEXITSTATUS(rv
));
777 panic("init died\nState at Last Exception:\n\n%s", init_task_failure_data
);
782 p
->p_flag
|= P_WEXIT
;
785 * Remove proc from allproc queue and from pidhash chain.
786 * Need to do this before we do anything that can block.
787 * Not doing causes things like mount() find this on allproc
788 * in partially cleaned state.
790 LIST_REMOVE(p
, p_list
);
791 LIST_REMOVE(p
, p_hash
);
793 * If parent is waiting for us to exit or exec,
794 * P_PPWAIT is set; we will wakeup the parent below.
796 p
->p_flag
&= ~(P_TRACED
| P_PPWAIT
);
801 untimeout(realitexpire
, (caddr_t
)p
);
810 vproc_exit(struct proc
*p
)
812 register struct proc
*q
, *nq
;
813 thread_t self
= current_thread();
814 thread_act_t th_act_self
= current_act();
815 struct task
*task
= p
->task
;
818 boolean_t funnel_state
;
820 MALLOC_ZONE(p
->p_ru
, struct rusage
*,
821 sizeof (*p
->p_ru
), M_ZOMBIE
, M_WAITOK
);
824 * Close open files and release open-file table.
829 /* Close ref SYSV Shared memory*/
833 if (SESS_LEADER(p
)) {
834 register struct session
*sp
= p
->p_session
;
838 * Controlling process.
839 * Signal foreground pgrp,
840 * drain controlling terminal
841 * and revoke access to controlling terminal.
843 if (sp
->s_ttyp
->t_session
== sp
) {
844 if (sp
->s_ttyp
->t_pgrp
)
845 pgsignal(sp
->s_ttyp
->t_pgrp
, SIGHUP
, 1);
846 (void) ttywait(sp
->s_ttyp
);
848 * The tty could have been revoked
852 VOP_REVOKE(sp
->s_ttyvp
, REVOKEALL
);
858 * s_ttyp is not zero'd; we use this to indicate
859 * that the session once had a controlling terminal.
860 * (for logging and informational purposes)
866 fixjobc(p
, p
->p_pgrp
, 0);
867 p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
= RLIM_INFINITY
;
872 p
->p_traceflag
= 0; /* don't trace the vrele() */
878 q
= p
->p_children
.lh_first
;
879 if (q
) /* only need this if any child is S_ZOMB */
880 wakeup((caddr_t
) initproc
);
881 for (; q
!= 0; q
= nq
) {
882 nq
= q
->p_sibling
.le_next
;
883 proc_reparent(q
, initproc
);
885 * Traced processes are killed
886 * since their existence means someone is messing up.
888 if (q
->p_flag
& P_TRACED
) {
889 q
->p_flag
&= ~P_TRACED
;
890 if (q
->sigwait_thread
) {
891 thread_t sig_shuttle
= getshuttle_thread(q
->sigwait_thread
);
893 * The sigwait_thread could be stopped at a
894 * breakpoint. Wake it up to kill.
895 * Need to do this as it could be a thread which is not
896 * the first thread in the task. So any attempts to kill
897 * the process would result into a deadlock on q->sigwait.
899 thread_resume((struct thread
*)q
->sigwait_thread
);
900 clear_wait(sig_shuttle
, THREAD_INTERRUPTED
);
901 threadsignal(q
->sigwait_thread
, SIGKILL
, 0);
909 * Save exit status and final rusage info, adding in child rusage
910 * info and self times.
912 *p
->p_ru
= p
->p_stats
->p_ru
;
914 timerclear(&p
->p_ru
->ru_utime
);
915 timerclear(&p
->p_ru
->ru_stime
);
919 task_basic_info_data_t tinfo
;
920 task_thread_times_info_data_t ttimesinfo
;
921 int task_info_stuff
, task_ttimes_stuff
;
922 struct timeval ut
,st
;
924 task_info_stuff
= TASK_BASIC_INFO_COUNT
;
925 task_info(task
, TASK_BASIC_INFO
,
926 &tinfo
, &task_info_stuff
);
927 p
->p_ru
->ru_utime
.tv_sec
= tinfo
.user_time
.seconds
;
928 p
->p_ru
->ru_utime
.tv_usec
= tinfo
.user_time
.microseconds
;
929 p
->p_ru
->ru_stime
.tv_sec
= tinfo
.system_time
.seconds
;
930 p
->p_ru
->ru_stime
.tv_usec
= tinfo
.system_time
.microseconds
;
932 task_ttimes_stuff
= TASK_THREAD_TIMES_INFO_COUNT
;
933 task_info(task
, TASK_THREAD_TIMES_INFO
,
934 &ttimesinfo
, &task_ttimes_stuff
);
936 ut
.tv_sec
= ttimesinfo
.user_time
.seconds
;
937 ut
.tv_usec
= ttimesinfo
.user_time
.microseconds
;
938 st
.tv_sec
= ttimesinfo
.system_time
.seconds
;
939 st
.tv_usec
= ttimesinfo
.system_time
.microseconds
;
940 timeradd(&ut
,&p
->p_ru
->ru_utime
,&p
->p_ru
->ru_utime
);
941 timeradd(&st
,&p
->p_ru
->ru_stime
,&p
->p_ru
->ru_stime
);
945 ruadd(p
->p_ru
, &p
->p_stats
->p_cru
);
948 * Free up profiling buffers.
951 struct uprof
*p0
= &p
->p_stats
->p_prof
, *p1
, *pn
;
957 for (; p1
!= NULL
; p1
= pn
) {
959 kfree((vm_offset_t
)p1
, sizeof *p1
);
964 * Other substructures are freed from wait().
966 FREE_ZONE(p
->p_stats
, sizeof *p
->p_stats
, M_SUBPROC
);
969 FREE_ZONE(p
->p_sigacts
, sizeof *p
->p_sigacts
, M_SUBPROC
);
972 if (--p
->p_limit
->p_refcnt
== 0)
973 FREE_ZONE(p
->p_limit
, sizeof *p
->p_limit
, M_SUBPROC
);
977 * Finish up by terminating the task
978 * and halt this thread (only if a
979 * member of the task exiting).
984 * Notify parent that we're gone.
986 psignal(p
->p_pptr
, SIGCHLD
);
988 /* Place onto zombproc. */
989 LIST_INSERT_HEAD(&zombproc
, p
, p_list
);
992 /* and now wakeup the parent */
993 wakeup((caddr_t
)p
->p_pptr
);