2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
25 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
27 * Copyright (c) 1982, 1986, 1989, 1991, 1993
28 * The Regents of the University of California. All rights reserved.
29 * (c) UNIX System Laboratories, Inc.
30 * All or some portions of this file are derived from material licensed
31 * to the University of California by American Telephone and Telegraph
32 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
33 * the permission of UNIX System Laboratories, Inc.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
66 #include <machine/reg.h>
67 #include <machine/psl.h>
69 #include "compat_43.h"
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/ioctl.h>
77 #include <sys/resource.h>
78 #include <sys/kernel.h>
82 #include <sys/vnode.h>
83 #include <sys/syslog.h>
84 #include <sys/malloc.h>
85 #include <sys/resourcevar.h>
86 #include <sys/ptrace.h>
88 #include <sys/aio_kern.h>
89 #include <sys/kern_audit.h>
91 #include <mach/mach_types.h>
92 #include <kern/thread.h>
93 #include <kern/thread_act.h>
94 #include <kern/sched_prim.h>
95 #include <kern/assert.h>
97 #include <sys/ktrace.h>
100 extern char init_task_failure_data
[];
101 int exit1
__P((struct proc
*, int, int *));
102 void proc_prepareexit(struct proc
*p
);
103 void vfork_exit(struct proc
*p
, int rv
);
104 void vproc_exit(struct proc
*p
);
116 struct exit_args
*uap
;
119 exit1(p
, W_EXITCODE(uap
->rval
, 0), retval
);
121 /* drop funnel before we return */
122 thread_funnel_set(kernel_flock
, FALSE
);
123 thread_exception_return();
126 thread_block(THREAD_CONTINUE_NULL
);
131 * Exit: deallocate address space and other resources, change proc state
132 * to zombie, and unlink proc from allproc and parent's lists. Save exit
133 * status and rusage for wait(). Check for child processes and orphan them.
137 register struct proc
*p
;
141 register struct proc
*q
, *nq
;
142 thread_act_t self
= current_act();
143 struct task
*task
= p
->task
;
148 * If a thread in this task has already
149 * called exit(), then halt any others
153 ut
= get_bsdthread_info(self
);
154 if (ut
->uu_flag
& P_VFORK
) {
156 vfork_return(self
, p
->p_pptr
, p
, retval
);
157 unix_syscall_return(0);
160 audit_syscall_exit(0, p
, ut
); /* Exit is always successfull */
162 while (p
->exit_thread
!= self
) {
163 if (sig_try_locked(p
) <= 0) {
164 if (get_threadtask(self
) != task
) {
169 thread_terminate(self
);
170 thread_funnel_set(kernel_flock
, FALSE
);
171 thread_exception_return();
178 printf("pid 1 exited (signal %d, exit %d)",
179 WTERMSIG(rv
), WEXITSTATUS(rv
));
180 panic("init died\nState at Last Exception:\n\n%s",
181 init_task_failure_data
);
185 p
->p_flag
|= P_WEXIT
;
190 /* task terminate will call proc_terminate and that cleans it up */
191 task_terminate_internal(task
);
197 proc_prepareexit(struct proc
*p
)
201 exception_data_t code
[EXCEPTION_CODE_MAX
];
202 thread_act_t self
= current_act();
204 code
[0] = 0xFF000001; /* Set terminate code */
205 code
[1] = p
->p_pid
; /* Pass out the pid */
206 (void)sys_perf_notify(p
->task
, &code
, 2); /* Notify the perf server */
209 * Remove proc from allproc queue and from pidhash chain.
210 * Need to do this before we do anything that can block.
211 * Not doing causes things like mount() find this on allproc
212 * in partially cleaned state.
214 LIST_REMOVE(p
, p_list
);
215 LIST_INSERT_HEAD(&zombproc
, p
, p_list
); /* Place onto zombproc. */
216 LIST_REMOVE(p
, p_hash
);
222 * If parent is waiting for us to exit or exec,
223 * P_PPWAIT is set; we will wakeup the parent below.
225 p
->p_flag
&= ~(P_TRACED
| P_PPWAIT
);
228 ut
= get_bsdthread_info(self
);
230 untimeout(realitexpire
, (caddr_t
)p
->p_pid
);
234 proc_exit(struct proc
*p
)
236 register struct proc
*q
, *nq
, *pp
;
237 struct task
*task
= p
->task
;
239 boolean_t funnel_state
;
241 /* This can happen if thread_terminate of the single thread
245 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
246 if( !(p
->p_flag
& P_WEXIT
)) {
248 p
->p_flag
|= P_WEXIT
;
253 MALLOC_ZONE(p
->p_ru
, struct rusage
*,
254 sizeof (*p
->p_ru
), M_ZOMBIE
, M_WAITOK
);
257 * need to cancel async IO requests that can be cancelled and wait for those
258 * already active. MAY BLOCK!
263 * Close open files and release open-file table.
268 /* Close ref SYSV Shared memory*/
271 /* Release SYSV semaphores */
274 if (SESS_LEADER(p
)) {
275 register struct session
*sp
= p
->p_session
;
281 * Controlling process.
282 * Signal foreground pgrp,
283 * drain controlling terminal
284 * and revoke access to controlling terminal.
286 if (sp
->s_ttyp
->t_session
== sp
) {
287 if (sp
->s_ttyp
->t_pgrp
)
288 pgsignal(sp
->s_ttyp
->t_pgrp
, SIGHUP
, 1);
289 (void) ttywait(sp
->s_ttyp
);
291 * The tty could have been revoked
295 VOP_REVOKE(sp
->s_ttyvp
, REVOKEALL
);
302 * s_ttyp is not zero'd; we use this to indicate
303 * that the session once had a controlling terminal.
304 * (for logging and informational purposes)
310 fixjobc(p
, p
->p_pgrp
, 0);
311 p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
= RLIM_INFINITY
;
312 (void)acct_process(p
);
318 p
->p_traceflag
= 0; /* don't trace the vrele() */
320 struct vnode
*tvp
= p
->p_tracep
;
326 q
= p
->p_children
.lh_first
;
327 if (q
) /* only need this if any child is S_ZOMB */
328 wakeup((caddr_t
) initproc
);
329 for (; q
!= 0; q
= nq
) {
330 nq
= q
->p_sibling
.le_next
;
331 proc_reparent(q
, initproc
);
333 * Traced processes are killed
334 * since their existence means someone is messing up.
336 if (q
->p_flag
& P_TRACED
) {
337 q
->p_flag
&= ~P_TRACED
;
338 if (q
->sigwait_thread
) {
340 * The sigwait_thread could be stopped at a
341 * breakpoint. Wake it up to kill.
342 * Need to do this as it could be a thread which is not
343 * the first thread in the task. So any attempts to kill
344 * the process would result into a deadlock on q->sigwait.
346 thread_resume((thread_act_t
)q
->sigwait_thread
);
347 clear_wait(q
->sigwait_thread
, THREAD_INTERRUPTED
);
348 threadsignal((thread_act_t
)q
->sigwait_thread
, SIGKILL
, 0);
355 * Save exit status and final rusage info, adding in child rusage
356 * info and self times.
358 *p
->p_ru
= p
->p_stats
->p_ru
;
360 timerclear(&p
->p_ru
->ru_utime
);
361 timerclear(&p
->p_ru
->ru_stime
);
364 task_basic_info_data_t tinfo
;
365 task_thread_times_info_data_t ttimesinfo
;
366 int task_info_stuff
, task_ttimes_stuff
;
367 struct timeval ut
,st
;
369 task_info_stuff
= TASK_BASIC_INFO_COUNT
;
370 task_info(task
, TASK_BASIC_INFO
,
371 &tinfo
, &task_info_stuff
);
372 p
->p_ru
->ru_utime
.tv_sec
= tinfo
.user_time
.seconds
;
373 p
->p_ru
->ru_utime
.tv_usec
= tinfo
.user_time
.microseconds
;
374 p
->p_ru
->ru_stime
.tv_sec
= tinfo
.system_time
.seconds
;
375 p
->p_ru
->ru_stime
.tv_usec
= tinfo
.system_time
.microseconds
;
377 task_ttimes_stuff
= TASK_THREAD_TIMES_INFO_COUNT
;
378 task_info(task
, TASK_THREAD_TIMES_INFO
,
379 &ttimesinfo
, &task_ttimes_stuff
);
381 ut
.tv_sec
= ttimesinfo
.user_time
.seconds
;
382 ut
.tv_usec
= ttimesinfo
.user_time
.microseconds
;
383 st
.tv_sec
= ttimesinfo
.system_time
.seconds
;
384 st
.tv_usec
= ttimesinfo
.system_time
.microseconds
;
385 timeradd(&ut
,&p
->p_ru
->ru_utime
,&p
->p_ru
->ru_utime
);
386 timeradd(&st
,&p
->p_ru
->ru_stime
,&p
->p_ru
->ru_stime
);
389 ruadd(p
->p_ru
, &p
->p_stats
->p_cru
);
392 * Free up profiling buffers.
395 struct uprof
*p0
= &p
->p_stats
->p_prof
, *p1
, *pn
;
401 for (; p1
!= NULL
; p1
= pn
) {
403 kfree((vm_offset_t
)p1
, sizeof *p1
);
408 * Other substructures are freed from wait().
410 FREE_ZONE(p
->p_stats
, sizeof *p
->p_stats
, M_SUBPROC
);
413 FREE_ZONE(p
->p_sigacts
, sizeof *p
->p_sigacts
, M_SUBPROC
);
416 if (--p
->p_limit
->p_refcnt
== 0)
417 FREE_ZONE(p
->p_limit
, sizeof *p
->p_limit
, M_SUBPROC
);
420 /* Free the auditing info */
424 * Finish up by terminating the task
425 * and halt this thread (only if a
426 * member of the task exiting).
430 set_bsdtask_info(task
, NULL
);
432 KNOTE(&p
->p_klist
, NOTE_EXIT
);
435 * Notify parent that we're gone.
437 if (p
->p_pptr
->p_flag
& P_NOCLDWAIT
) {
438 struct proc
* pp
= p
->p_pptr
;
441 * Add child resource usage to parent before giving
444 ruadd(&p
->p_pptr
->p_stats
->p_cru
, p
->p_ru
);
446 proc_reparent(p
, initproc
);
447 /* If there are no more children wakeup parent */
448 if (LIST_EMPTY(&pp
->p_children
))
451 /* should be fine as parent proc would be initproc */
453 if (pp
!= initproc
) {
454 pp
->si_pid
= p
->p_pid
;
455 pp
->si_status
= p
->p_xstat
;
456 pp
->si_code
= CLD_EXITED
;
457 pp
->si_uid
= p
->p_cred
->p_ruid
;
459 psignal(pp
, SIGCHLD
);
462 /* mark as a zombie */
465 /* and now wakeup the parent */
466 wakeup((caddr_t
)p
->p_pptr
);
468 (void) thread_funnel_set(kernel_flock
, funnel_state
);
476 struct rusage
*rusage
;
481 owait(p
, uap
, retval
)
486 struct wait4_args
*a
;
488 a
= (struct wait4_args
*)get_bsduthreadarg(current_act());
494 return (wait1(p
, a
, retval
, 1));
498 wait4(p
, uap
, retval
)
500 struct wait4_args
*uap
;
503 return (wait1(p
, uap
, retval
, 0));
509 struct rusage
*rusage
;
513 owait3(p
, uap
, retval
)
515 struct owait3_args
*uap
;
518 struct wait4_args
*a
;
520 a
= (struct wait4_args
*)get_bsduthreadarg(current_act
);
522 a
->rusage
= uap
->rusage
;
523 a
->options
= uap
->options
;
524 a
->status
= uap
->status
;
527 return (wait1(p
, a
, retval
, 1));
535 wait1continue(result
)
546 thread
= current_act();
547 vt
= (void *)get_bsduthreadarg(thread
);
548 retval
= (int *)get_bsduthreadrval(thread
);
549 return(wait1((struct proc
*)p
, (struct wait4_args
*)vt
, retval
, 0));
553 wait1(q
, uap
, retval
, compat
)
554 register struct proc
*q
;
555 register struct wait4_args
*uap
;
562 register struct proc
*p
, *t
;
568 uap
->pid
= -q
->p_pgid
;
572 for (p
= q
->p_children
.lh_first
; p
!= 0; p
= p
->p_sibling
.le_next
) {
573 if (uap
->pid
!= WAIT_ANY
&&
574 p
->p_pid
!= uap
->pid
&&
575 p
->p_pgid
!= -(uap
->pid
))
578 if (p
->p_flag
& P_WAITING
) {
579 (void)tsleep(&p
->p_stat
, PWAIT
, "waitcoll", 0);
582 p
->p_flag
|= P_WAITING
; /* only allow single thread to wait() */
584 if (p
->p_stat
== SZOMB
) {
585 retval
[0] = p
->p_pid
;
588 retval
[1] = p
->p_xstat
;
592 status
= p
->p_xstat
; /* convert to int */
593 if (error
= copyout((caddr_t
)&status
,
594 (caddr_t
)uap
->status
,
596 p
->p_flag
&= ~P_WAITING
;
602 (error
= copyout((caddr_t
)p
->p_ru
,
603 (caddr_t
)uap
->rusage
,
604 sizeof (struct rusage
)))) {
605 p
->p_flag
&= ~P_WAITING
;
610 * If we got the child via a ptrace 'attach',
611 * we need to give it back to the old parent.
613 if (p
->p_oppid
&& (t
= pfind(p
->p_oppid
))) {
617 t
->si_pid
= p
->p_pid
;
618 t
->si_status
= p
->p_xstat
;
619 t
->si_code
= CLD_CONTINUED
;
620 t
->si_uid
= p
->p_cred
->p_ruid
;
624 p
->p_flag
&= ~P_WAITING
;
630 ruadd(&q
->p_stats
->p_cru
, p
->p_ru
);
631 FREE_ZONE(p
->p_ru
, sizeof *p
->p_ru
, M_ZOMBIE
);
634 printf("Warning : lost p_ru for %s\n", p
->p_comm
);
638 * Decrement the count of procs running with this uid.
640 (void)chgproccnt(p
->p_cred
->p_ruid
, -1);
643 * Free up credentials.
645 if (--p
->p_cred
->p_refcnt
== 0) {
646 struct ucred
*ucr
= p
->p_ucred
;
655 FREE_ZONE(pcr
, sizeof *pcr
, M_SUBPROC
);
659 * Release reference to text vnode
667 * Finally finished with old proc entry.
668 * Unlink it from its process group and free it.
671 LIST_REMOVE(p
, p_list
); /* off zombproc */
672 LIST_REMOVE(p
, p_sibling
);
673 p
->p_flag
&= ~P_WAITING
;
674 FREE_ZONE(p
, sizeof *p
, M_PROC
);
679 if (p
->p_stat
== SSTOP
&& (p
->p_flag
& P_WAITED
) == 0 &&
680 (p
->p_flag
& P_TRACED
|| uap
->options
& WUNTRACED
)) {
681 p
->p_flag
|= P_WAITED
;
682 retval
[0] = p
->p_pid
;
685 retval
[1] = W_STOPCODE(p
->p_xstat
);
690 status
= W_STOPCODE(p
->p_xstat
);
691 error
= copyout((caddr_t
)&status
,
692 (caddr_t
)uap
->status
,
696 p
->p_flag
&= ~P_WAITING
;
700 p
->p_flag
&= ~P_WAITING
;
706 if (uap
->options
& WNOHANG
) {
711 if (error
= tsleep0((caddr_t
)q
, PWAIT
| PCATCH
, "wait", 0, wait1continue
))
718 * make process 'parent' the new parent of process 'child'.
721 proc_reparent(child
, parent
)
722 register struct proc
*child
;
723 register struct proc
*parent
;
726 if (child
->p_pptr
== parent
)
729 LIST_REMOVE(child
, p_sibling
);
730 LIST_INSERT_HEAD(&parent
->p_children
, child
, p_sibling
);
731 child
->p_pptr
= parent
;
735 * Make the current process an "init" process, meaning
736 * that it doesn't have a parent, and that it won't be
737 * gunned down by kill(-1, 0).
742 register struct proc
*p
= current_proc();
744 if (suser(p
->p_ucred
, &p
->p_acflag
))
745 return(KERN_NO_ACCESS
);
747 if (p
->p_pid
!= 1 && p
->p_pgid
!= p
->p_pid
)
748 enterpgrp(p
, p
->p_pid
, 0);
749 p
->p_flag
|= P_SYSTEM
;
752 * Take us out of the sibling chain, and
753 * out of our parent's child chain.
755 LIST_REMOVE(p
, p_sibling
);
756 p
->p_sibling
.le_prev
= NULL
;
757 p
->p_sibling
.le_next
= NULL
;
758 p
->p_pptr
= kernproc
;
760 return(KERN_SUCCESS
);
764 process_terminate_self(void)
766 struct proc
*p
= current_proc();
769 exit1(p
, W_EXITCODE(0, SIGKILL
), (int *)NULL
);
775 * Exit: deallocate address space and other resources, change proc state
776 * to zombie, and unlink proc from allproc and parent's lists. Save exit
777 * status and rusage for wait(). Check for child processes and orphan them.
785 register struct proc
*q
, *nq
;
786 thread_act_t self
= current_act();
787 struct task
*task
= p
->task
;
790 exception_data_t code
[EXCEPTION_CODE_MAX
];
793 * If a thread in this task has already
794 * called exit(), then halt any others
798 ut
= get_bsdthread_info(self
);
801 while (p
->exit_thread
!= self
) {
802 if (sig_try_locked(p
) <= 0) {
803 if (get_threadtask(self
) != task
) {
808 thread_terminate(self
);
809 thread_funnel_set(kernel_flock
, FALSE
);
810 thread_exception_return();
817 printf("pid 1 exited (signal %d, exit %d)",
818 WTERMSIG(rv
), WEXITSTATUS(rv
));
819 panic("init died\nState at Last Exception:\n\n%s", init_task_failure_data
);
824 p
->p_flag
|= P_WEXIT
;
827 code
[0] = 0xFF000001; /* Set terminate code */
828 code
[1] = p
->p_pid
; /* Pass out the pid */
829 (void)sys_perf_notify(p
->task
, &code
, 2); /* Notify the perf server */
832 * Remove proc from allproc queue and from pidhash chain.
833 * Need to do this before we do anything that can block.
834 * Not doing causes things like mount() find this on allproc
835 * in partially cleaned state.
837 LIST_REMOVE(p
, p_list
);
838 LIST_INSERT_HEAD(&zombproc
, p
, p_list
); /* Place onto zombproc. */
839 LIST_REMOVE(p
, p_hash
);
841 * If parent is waiting for us to exit or exec,
842 * P_PPWAIT is set; we will wakeup the parent below.
844 p
->p_flag
&= ~(P_TRACED
| P_PPWAIT
);
849 untimeout(realitexpire
, (caddr_t
)p
->p_pid
);
857 vproc_exit(struct proc
*p
)
859 register struct proc
*q
, *nq
, *pp
;
860 struct task
*task
= p
->task
;
862 boolean_t funnel_state
;
864 MALLOC_ZONE(p
->p_ru
, struct rusage
*,
865 sizeof (*p
->p_ru
), M_ZOMBIE
, M_WAITOK
);
868 * Close open files and release open-file table.
873 if (SESS_LEADER(p
)) {
874 register struct session
*sp
= p
->p_session
;
880 * Controlling process.
881 * Signal foreground pgrp,
882 * drain controlling terminal
883 * and revoke access to controlling terminal.
885 if (sp
->s_ttyp
->t_session
== sp
) {
886 if (sp
->s_ttyp
->t_pgrp
)
887 pgsignal(sp
->s_ttyp
->t_pgrp
, SIGHUP
, 1);
888 (void) ttywait(sp
->s_ttyp
);
890 * The tty could have been revoked
894 VOP_REVOKE(sp
->s_ttyvp
, REVOKEALL
);
901 * s_ttyp is not zero'd; we use this to indicate
902 * that the session once had a controlling terminal.
903 * (for logging and informational purposes)
909 fixjobc(p
, p
->p_pgrp
, 0);
910 p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
= RLIM_INFINITY
;
916 p
->p_traceflag
= 0; /* don't trace the vrele() */
918 struct vnode
*tvp
= p
->p_tracep
;
924 q
= p
->p_children
.lh_first
;
925 if (q
) /* only need this if any child is S_ZOMB */
926 wakeup((caddr_t
) initproc
);
927 for (; q
!= 0; q
= nq
) {
928 nq
= q
->p_sibling
.le_next
;
929 proc_reparent(q
, initproc
);
931 * Traced processes are killed
932 * since their existence means someone is messing up.
934 if (q
->p_flag
& P_TRACED
) {
935 q
->p_flag
&= ~P_TRACED
;
936 if (q
->sigwait_thread
) {
938 * The sigwait_thread could be stopped at a
939 * breakpoint. Wake it up to kill.
940 * Need to do this as it could be a thread which is not
941 * the first thread in the task. So any attempts to kill
942 * the process would result into a deadlock on q->sigwait.
944 thread_resume((thread_act_t
)q
->sigwait_thread
);
945 clear_wait(q
->sigwait_thread
, THREAD_INTERRUPTED
);
946 threadsignal((thread_act_t
)q
->sigwait_thread
, SIGKILL
, 0);
953 * Save exit status and final rusage info, adding in child rusage
954 * info and self times.
956 *p
->p_ru
= p
->p_stats
->p_ru
;
958 timerclear(&p
->p_ru
->ru_utime
);
959 timerclear(&p
->p_ru
->ru_stime
);
963 task_basic_info_data_t tinfo
;
964 task_thread_times_info_data_t ttimesinfo
;
965 int task_info_stuff
, task_ttimes_stuff
;
966 struct timeval ut
,st
;
968 task_info_stuff
= TASK_BASIC_INFO_COUNT
;
969 task_info(task
, TASK_BASIC_INFO
,
970 &tinfo
, &task_info_stuff
);
971 p
->p_ru
->ru_utime
.tv_sec
= tinfo
.user_time
.seconds
;
972 p
->p_ru
->ru_utime
.tv_usec
= tinfo
.user_time
.microseconds
;
973 p
->p_ru
->ru_stime
.tv_sec
= tinfo
.system_time
.seconds
;
974 p
->p_ru
->ru_stime
.tv_usec
= tinfo
.system_time
.microseconds
;
976 task_ttimes_stuff
= TASK_THREAD_TIMES_INFO_COUNT
;
977 task_info(task
, TASK_THREAD_TIMES_INFO
,
978 &ttimesinfo
, &task_ttimes_stuff
);
980 ut
.tv_sec
= ttimesinfo
.user_time
.seconds
;
981 ut
.tv_usec
= ttimesinfo
.user_time
.microseconds
;
982 st
.tv_sec
= ttimesinfo
.system_time
.seconds
;
983 st
.tv_usec
= ttimesinfo
.system_time
.microseconds
;
984 timeradd(&ut
,&p
->p_ru
->ru_utime
,&p
->p_ru
->ru_utime
);
985 timeradd(&st
,&p
->p_ru
->ru_stime
,&p
->p_ru
->ru_stime
);
989 ruadd(p
->p_ru
, &p
->p_stats
->p_cru
);
992 * Free up profiling buffers.
995 struct uprof
*p0
= &p
->p_stats
->p_prof
, *p1
, *pn
;
1001 for (; p1
!= NULL
; p1
= pn
) {
1003 kfree((vm_offset_t
)p1
, sizeof *p1
);
1008 * Other substructures are freed from wait().
1010 FREE_ZONE(p
->p_stats
, sizeof *p
->p_stats
, M_SUBPROC
);
1013 FREE_ZONE(p
->p_sigacts
, sizeof *p
->p_sigacts
, M_SUBPROC
);
1014 p
->p_sigacts
= NULL
;
1016 if (--p
->p_limit
->p_refcnt
== 0)
1017 FREE_ZONE(p
->p_limit
, sizeof *p
->p_limit
, M_SUBPROC
);
1021 * Finish up by terminating the task
1022 * and halt this thread (only if a
1023 * member of the task exiting).
1025 p
->task
= TASK_NULL
;
1028 * Notify parent that we're gone.
1031 if (pp
!= initproc
) {
1032 pp
->si_pid
= p
->p_pid
;
1033 pp
->si_status
= p
->p_xstat
;
1034 pp
->si_code
= CLD_EXITED
;
1035 pp
->si_uid
= p
->p_cred
->p_ruid
;
1037 psignal(p
->p_pptr
, SIGCHLD
);
1039 /* mark as a zombie */
1042 /* and now wakeup the parent */
1043 wakeup((caddr_t
)p
->p_pptr
);