2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1982, 1986, 1989, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
63 #include <machine/reg.h>
64 #include <machine/psl.h>
66 #include "compat_43.h"
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/ioctl.h>
74 #include <sys/resource.h>
75 #include <sys/kernel.h>
79 #include <sys/vnode.h>
80 #include <sys/syslog.h>
81 #include <sys/malloc.h>
82 #include <sys/resourcevar.h>
83 #include <sys/ptrace.h>
85 #include <sys/aio_kern.h>
87 #include <bsm/audit_kernel.h>
88 #include <bsm/audit_kevents.h>
90 #include <mach/mach_types.h>
91 #include <kern/thread.h>
92 #include <kern/thread_act.h>
93 #include <kern/sched_prim.h>
94 #include <kern/assert.h>
96 #include <sys/ktrace.h>
100 extern char init_task_failure_data
[];
101 int exit1
__P((struct proc
*, int, int *));
102 void proc_prepareexit(struct proc
*p
);
103 int vfork_exit(struct proc
*p
, int rv
);
104 void vproc_exit(struct proc
*p
);
116 struct exit_args
*uap
;
119 exit1(p
, W_EXITCODE(uap
->rval
, 0), retval
);
121 /* drop funnel before we return */
122 thread_funnel_set(kernel_flock
, FALSE
);
123 thread_exception_return();
126 thread_block(THREAD_CONTINUE_NULL
);
131 * Exit: deallocate address space and other resources, change proc state
132 * to zombie, and unlink proc from allproc and parent's lists. Save exit
133 * status and rusage for wait(). Check for child processes and orphan them.
137 register struct proc
*p
;
141 register struct proc
*q
, *nq
;
142 thread_act_t self
= current_act();
143 struct task
*task
= p
->task
;
148 * If a thread in this task has already
149 * called exit(), then halt any others
153 ut
= get_bsdthread_info(self
);
154 if (ut
->uu_flag
& P_VFORK
) {
155 if (!vfork_exit(p
, rv
)) {
156 vfork_return(self
, p
->p_pptr
, p
, retval
);
157 unix_syscall_return(0);
162 AUDIT_SYSCALL_EXIT(0, p
, ut
); /* Exit is always successfull */
164 while (p
->exit_thread
!= self
) {
165 if (sig_try_locked(p
) <= 0) {
166 if (get_threadtask(self
) != task
) {
171 thread_terminate(self
);
172 thread_funnel_set(kernel_flock
, FALSE
);
173 thread_exception_return();
180 printf("pid 1 exited (signal %d, exit %d)",
181 WTERMSIG(rv
), WEXITSTATUS(rv
));
182 panic("init died\nState at Last Exception:\n\n%s",
183 init_task_failure_data
);
187 p
->p_flag
|= P_WEXIT
;
192 /* task terminate will call proc_terminate and that cleans it up */
193 task_terminate_internal(task
);
199 proc_prepareexit(struct proc
*p
)
203 exception_data_t code
[EXCEPTION_CODE_MAX
];
204 thread_act_t self
= current_act();
206 code
[0] = 0xFF000001; /* Set terminate code */
207 code
[1] = p
->p_pid
; /* Pass out the pid */
208 (void)sys_perf_notify(p
->task
, &code
, 2); /* Notify the perf server */
211 * Remove proc from allproc queue and from pidhash chain.
212 * Need to do this before we do anything that can block.
213 * Not doing causes things like mount() find this on allproc
214 * in partially cleaned state.
216 LIST_REMOVE(p
, p_list
);
217 LIST_INSERT_HEAD(&zombproc
, p
, p_list
); /* Place onto zombproc. */
218 LIST_REMOVE(p
, p_hash
);
224 * If parent is waiting for us to exit or exec,
225 * P_PPWAIT is set; we will wakeup the parent below.
227 p
->p_flag
&= ~(P_TRACED
| P_PPWAIT
);
230 ut
= get_bsdthread_info(self
);
232 untimeout(realitexpire
, (caddr_t
)p
->p_pid
);
236 proc_exit(struct proc
*p
)
238 register struct proc
*q
, *nq
, *pp
;
239 struct task
*task
= p
->task
;
241 boolean_t funnel_state
;
243 /* This can happen if thread_terminate of the single thread
247 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
248 if( !(p
->p_flag
& P_WEXIT
)) {
250 p
->p_flag
|= P_WEXIT
;
255 MALLOC_ZONE(p
->p_ru
, struct rusage
*,
256 sizeof (*p
->p_ru
), M_ZOMBIE
, M_WAITOK
);
259 * need to cancel async IO requests that can be cancelled and wait for those
260 * already active. MAY BLOCK!
265 * Close open files and release open-file table.
270 /* Close ref SYSV Shared memory*/
273 /* Release SYSV semaphores */
276 if (SESS_LEADER(p
)) {
277 register struct session
*sp
= p
->p_session
;
283 * Controlling process.
284 * Signal foreground pgrp,
285 * drain controlling terminal
286 * and revoke access to controlling terminal.
288 if (sp
->s_ttyp
->t_session
== sp
) {
289 if (sp
->s_ttyp
->t_pgrp
)
290 pgsignal(sp
->s_ttyp
->t_pgrp
, SIGHUP
, 1);
291 (void) ttywait(sp
->s_ttyp
);
293 * The tty could have been revoked
297 VOP_REVOKE(sp
->s_ttyvp
, REVOKEALL
);
304 * s_ttyp is not zero'd; we use this to indicate
305 * that the session once had a controlling terminal.
306 * (for logging and informational purposes)
312 fixjobc(p
, p
->p_pgrp
, 0);
313 p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
= RLIM_INFINITY
;
314 (void)acct_process(p
);
320 p
->p_traceflag
= 0; /* don't trace the vrele() */
322 struct vnode
*tvp
= p
->p_tracep
;
325 if (UBCINFOEXISTS(tvp
))
331 q
= p
->p_children
.lh_first
;
332 if (q
) /* only need this if any child is S_ZOMB */
333 wakeup((caddr_t
) initproc
);
334 for (; q
!= 0; q
= nq
) {
335 nq
= q
->p_sibling
.le_next
;
336 proc_reparent(q
, initproc
);
338 * Traced processes are killed
339 * since their existence means someone is messing up.
341 if (q
->p_flag
& P_TRACED
) {
342 q
->p_flag
&= ~P_TRACED
;
343 if (q
->sigwait_thread
) {
345 * The sigwait_thread could be stopped at a
346 * breakpoint. Wake it up to kill.
347 * Need to do this as it could be a thread which is not
348 * the first thread in the task. So any attempts to kill
349 * the process would result into a deadlock on q->sigwait.
351 thread_resume((thread_act_t
)q
->sigwait_thread
);
352 clear_wait(q
->sigwait_thread
, THREAD_INTERRUPTED
);
353 threadsignal((thread_act_t
)q
->sigwait_thread
, SIGKILL
, 0);
360 * Save exit status and final rusage info, adding in child rusage
361 * info and self times.
363 *p
->p_ru
= p
->p_stats
->p_ru
;
365 timerclear(&p
->p_ru
->ru_utime
);
366 timerclear(&p
->p_ru
->ru_stime
);
369 task_basic_info_data_t tinfo
;
370 task_thread_times_info_data_t ttimesinfo
;
371 int task_info_stuff
, task_ttimes_stuff
;
372 struct timeval ut
,st
;
374 task_info_stuff
= TASK_BASIC_INFO_COUNT
;
375 task_info(task
, TASK_BASIC_INFO
,
376 &tinfo
, &task_info_stuff
);
377 p
->p_ru
->ru_utime
.tv_sec
= tinfo
.user_time
.seconds
;
378 p
->p_ru
->ru_utime
.tv_usec
= tinfo
.user_time
.microseconds
;
379 p
->p_ru
->ru_stime
.tv_sec
= tinfo
.system_time
.seconds
;
380 p
->p_ru
->ru_stime
.tv_usec
= tinfo
.system_time
.microseconds
;
382 task_ttimes_stuff
= TASK_THREAD_TIMES_INFO_COUNT
;
383 task_info(task
, TASK_THREAD_TIMES_INFO
,
384 &ttimesinfo
, &task_ttimes_stuff
);
386 ut
.tv_sec
= ttimesinfo
.user_time
.seconds
;
387 ut
.tv_usec
= ttimesinfo
.user_time
.microseconds
;
388 st
.tv_sec
= ttimesinfo
.system_time
.seconds
;
389 st
.tv_usec
= ttimesinfo
.system_time
.microseconds
;
390 timeradd(&ut
,&p
->p_ru
->ru_utime
,&p
->p_ru
->ru_utime
);
391 timeradd(&st
,&p
->p_ru
->ru_stime
,&p
->p_ru
->ru_stime
);
394 ruadd(p
->p_ru
, &p
->p_stats
->p_cru
);
397 * Free up profiling buffers.
400 struct uprof
*p0
= &p
->p_stats
->p_prof
, *p1
, *pn
;
406 for (; p1
!= NULL
; p1
= pn
) {
408 kfree((vm_offset_t
)p1
, sizeof *p1
);
413 * Other substructures are freed from wait().
415 FREE_ZONE(p
->p_stats
, sizeof *p
->p_stats
, M_SUBPROC
);
418 FREE_ZONE(p
->p_sigacts
, sizeof *p
->p_sigacts
, M_SUBPROC
);
421 if (--p
->p_limit
->p_refcnt
== 0)
422 FREE_ZONE(p
->p_limit
, sizeof *p
->p_limit
, M_SUBPROC
);
425 /* Free the auditing info */
429 * Finish up by terminating the task
430 * and halt this thread (only if a
431 * member of the task exiting).
435 set_bsdtask_info(task
, NULL
);
437 KNOTE(&p
->p_klist
, NOTE_EXIT
);
440 * Notify parent that we're gone.
442 if (p
->p_pptr
->p_flag
& P_NOCLDWAIT
) {
443 struct proc
* pp
= p
->p_pptr
;
446 * Add child resource usage to parent before giving
449 ruadd(&p
->p_pptr
->p_stats
->p_cru
, p
->p_ru
);
451 proc_reparent(p
, initproc
);
452 /* If there are no more children wakeup parent */
453 if (LIST_EMPTY(&pp
->p_children
))
456 /* should be fine as parent proc would be initproc */
458 if (pp
!= initproc
) {
459 pp
->si_pid
= p
->p_pid
;
460 pp
->si_status
= p
->p_xstat
;
461 pp
->si_code
= CLD_EXITED
;
462 pp
->si_uid
= p
->p_cred
->p_ruid
;
464 psignal(pp
, SIGCHLD
);
467 /* mark as a zombie */
470 /* and now wakeup the parent */
471 wakeup((caddr_t
)p
->p_pptr
);
473 (void) thread_funnel_set(kernel_flock
, funnel_state
);
481 struct rusage
*rusage
;
486 owait(p
, uap
, retval
)
491 struct wait4_args
*a
;
493 a
= (struct wait4_args
*)get_bsduthreadarg(current_act());
499 return (wait1(p
, a
, retval
, 1));
503 wait4(p
, uap
, retval
)
505 struct wait4_args
*uap
;
508 return (wait1(p
, uap
, retval
, 0));
514 struct rusage
*rusage
;
518 owait3(p
, uap
, retval
)
520 struct owait3_args
*uap
;
523 struct wait4_args
*a
;
525 a
= (struct wait4_args
*)get_bsduthreadarg(current_act());
527 a
->rusage
= uap
->rusage
;
528 a
->options
= uap
->options
;
529 a
->status
= uap
->status
;
532 return (wait1(p
, a
, retval
, 1));
540 wait1continue(result
)
551 thread
= current_act();
552 vt
= (void *)get_bsduthreadarg(thread
);
553 retval
= (int *)get_bsduthreadrval(thread
);
554 return(wait1((struct proc
*)p
, (struct wait4_args
*)vt
, retval
, 0));
558 wait1(q
, uap
, retval
, compat
)
559 register struct proc
*q
;
560 register struct wait4_args
*uap
;
567 register struct proc
*p
, *t
;
573 uap
->pid
= -q
->p_pgid
;
577 for (p
= q
->p_children
.lh_first
; p
!= 0; p
= p
->p_sibling
.le_next
) {
578 if (uap
->pid
!= WAIT_ANY
&&
579 p
->p_pid
!= uap
->pid
&&
580 p
->p_pgid
!= -(uap
->pid
))
583 if (p
->p_flag
& P_WAITING
) {
584 (void)tsleep(&p
->p_stat
, PWAIT
, "waitcoll", 0);
587 p
->p_flag
|= P_WAITING
; /* only allow single thread to wait() */
589 if (p
->p_stat
== SZOMB
) {
590 retval
[0] = p
->p_pid
;
593 retval
[1] = p
->p_xstat
;
597 status
= p
->p_xstat
; /* convert to int */
598 if (error
= copyout((caddr_t
)&status
,
599 (caddr_t
)uap
->status
,
601 p
->p_flag
&= ~P_WAITING
;
607 (error
= copyout((caddr_t
)p
->p_ru
,
608 (caddr_t
)uap
->rusage
,
609 sizeof (struct rusage
)))) {
610 p
->p_flag
&= ~P_WAITING
;
615 * If we got the child via a ptrace 'attach',
616 * we need to give it back to the old parent.
618 if (p
->p_oppid
&& (t
= pfind(p
->p_oppid
))) {
622 t
->si_pid
= p
->p_pid
;
623 t
->si_status
= p
->p_xstat
;
624 t
->si_code
= CLD_CONTINUED
;
625 t
->si_uid
= p
->p_cred
->p_ruid
;
629 p
->p_flag
&= ~P_WAITING
;
635 ruadd(&q
->p_stats
->p_cru
, p
->p_ru
);
636 FREE_ZONE(p
->p_ru
, sizeof *p
->p_ru
, M_ZOMBIE
);
639 printf("Warning : lost p_ru for %s\n", p
->p_comm
);
643 * Decrement the count of procs running with this uid.
645 (void)chgproccnt(p
->p_cred
->p_ruid
, -1);
648 * Free up credentials.
650 if (--p
->p_cred
->p_refcnt
== 0) {
651 struct ucred
*ucr
= p
->p_ucred
;
660 FREE_ZONE(pcr
, sizeof *pcr
, M_SUBPROC
);
664 * Release reference to text vnode
672 * Finally finished with old proc entry.
673 * Unlink it from its process group and free it.
676 LIST_REMOVE(p
, p_list
); /* off zombproc */
677 LIST_REMOVE(p
, p_sibling
);
678 p
->p_flag
&= ~P_WAITING
;
679 FREE_ZONE(p
, sizeof *p
, M_PROC
);
684 if (p
->p_stat
== SSTOP
&& (p
->p_flag
& P_WAITED
) == 0 &&
685 (p
->p_flag
& P_TRACED
|| uap
->options
& WUNTRACED
)) {
686 p
->p_flag
|= P_WAITED
;
687 retval
[0] = p
->p_pid
;
690 retval
[1] = W_STOPCODE(p
->p_xstat
);
695 status
= W_STOPCODE(p
->p_xstat
);
696 error
= copyout((caddr_t
)&status
,
697 (caddr_t
)uap
->status
,
701 p
->p_flag
&= ~P_WAITING
;
705 p
->p_flag
&= ~P_WAITING
;
711 if (uap
->options
& WNOHANG
) {
716 if (error
= tsleep0((caddr_t
)q
, PWAIT
| PCATCH
, "wait", 0, wait1continue
))
723 * make process 'parent' the new parent of process 'child'.
726 proc_reparent(child
, parent
)
727 register struct proc
*child
;
728 register struct proc
*parent
;
731 if (child
->p_pptr
== parent
)
734 LIST_REMOVE(child
, p_sibling
);
735 LIST_INSERT_HEAD(&parent
->p_children
, child
, p_sibling
);
736 child
->p_pptr
= parent
;
740 * Make the current process an "init" process, meaning
741 * that it doesn't have a parent, and that it won't be
742 * gunned down by kill(-1, 0).
747 register struct proc
*p
= current_proc();
749 AUDIT_MACH_SYSCALL_ENTER(AUE_INITPROCESS
);
750 if (suser(p
->p_ucred
, &p
->p_acflag
)) {
751 AUDIT_MACH_SYSCALL_EXIT(KERN_NO_ACCESS
);
752 return(KERN_NO_ACCESS
);
755 if (p
->p_pid
!= 1 && p
->p_pgid
!= p
->p_pid
)
756 enterpgrp(p
, p
->p_pid
, 0);
757 p
->p_flag
|= P_SYSTEM
;
760 * Take us out of the sibling chain, and
761 * out of our parent's child chain.
763 LIST_REMOVE(p
, p_sibling
);
764 p
->p_sibling
.le_prev
= NULL
;
765 p
->p_sibling
.le_next
= NULL
;
766 p
->p_pptr
= kernproc
;
768 AUDIT_MACH_SYSCALL_EXIT(KERN_SUCCESS
);
769 return(KERN_SUCCESS
);
773 process_terminate_self(void)
775 struct proc
*p
= current_proc();
778 exit1(p
, W_EXITCODE(0, SIGKILL
), (int *)NULL
);
784 * Exit: deallocate address space and other resources, change proc state
785 * to zombie, and unlink proc from allproc and parent's lists. Save exit
786 * status and rusage for wait(). Check for child processes and orphan them.
794 register struct proc
*q
, *nq
;
795 thread_act_t self
= current_act();
796 struct task
*task
= p
->task
;
799 exception_data_t code
[EXCEPTION_CODE_MAX
];
801 ut
= get_bsdthread_info(self
);
802 if (p
->exit_thread
) {
805 p
->exit_thread
= self
;
808 p
->p_flag
|= P_WEXIT
;
811 code
[0] = 0xFF000001; /* Set terminate code */
812 code
[1] = p
->p_pid
; /* Pass out the pid */
813 (void)sys_perf_notify(p
->task
, &code
, 2); /* Notify the perf server */
816 * Remove proc from allproc queue and from pidhash chain.
817 * Need to do this before we do anything that can block.
818 * Not doing causes things like mount() find this on allproc
819 * in partially cleaned state.
821 LIST_REMOVE(p
, p_list
);
822 LIST_INSERT_HEAD(&zombproc
, p
, p_list
); /* Place onto zombproc. */
823 LIST_REMOVE(p
, p_hash
);
825 * If parent is waiting for us to exit or exec,
826 * P_PPWAIT is set; we will wakeup the parent below.
828 p
->p_flag
&= ~(P_TRACED
| P_PPWAIT
);
833 untimeout(realitexpire
, (caddr_t
)p
->p_pid
);
842 vproc_exit(struct proc
*p
)
844 register struct proc
*q
, *nq
, *pp
;
845 struct task
*task
= p
->task
;
847 boolean_t funnel_state
;
849 MALLOC_ZONE(p
->p_ru
, struct rusage
*,
850 sizeof (*p
->p_ru
), M_ZOMBIE
, M_WAITOK
);
853 * Close open files and release open-file table.
858 if (SESS_LEADER(p
)) {
859 register struct session
*sp
= p
->p_session
;
865 * Controlling process.
866 * Signal foreground pgrp,
867 * drain controlling terminal
868 * and revoke access to controlling terminal.
870 if (sp
->s_ttyp
->t_session
== sp
) {
871 if (sp
->s_ttyp
->t_pgrp
)
872 pgsignal(sp
->s_ttyp
->t_pgrp
, SIGHUP
, 1);
873 (void) ttywait(sp
->s_ttyp
);
875 * The tty could have been revoked
879 VOP_REVOKE(sp
->s_ttyvp
, REVOKEALL
);
886 * s_ttyp is not zero'd; we use this to indicate
887 * that the session once had a controlling terminal.
888 * (for logging and informational purposes)
894 fixjobc(p
, p
->p_pgrp
, 0);
895 p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
= RLIM_INFINITY
;
901 p
->p_traceflag
= 0; /* don't trace the vrele() */
903 struct vnode
*tvp
= p
->p_tracep
;
906 if (UBCINFOEXISTS(tvp
))
912 q
= p
->p_children
.lh_first
;
913 if (q
) /* only need this if any child is S_ZOMB */
914 wakeup((caddr_t
) initproc
);
915 for (; q
!= 0; q
= nq
) {
916 nq
= q
->p_sibling
.le_next
;
917 proc_reparent(q
, initproc
);
919 * Traced processes are killed
920 * since their existence means someone is messing up.
922 if (q
->p_flag
& P_TRACED
) {
923 q
->p_flag
&= ~P_TRACED
;
924 if (q
->sigwait_thread
) {
926 * The sigwait_thread could be stopped at a
927 * breakpoint. Wake it up to kill.
928 * Need to do this as it could be a thread which is not
929 * the first thread in the task. So any attempts to kill
930 * the process would result into a deadlock on q->sigwait.
932 thread_resume((thread_act_t
)q
->sigwait_thread
);
933 clear_wait(q
->sigwait_thread
, THREAD_INTERRUPTED
);
934 threadsignal((thread_act_t
)q
->sigwait_thread
, SIGKILL
, 0);
941 * Save exit status and final rusage info, adding in child rusage
942 * info and self times.
944 *p
->p_ru
= p
->p_stats
->p_ru
;
946 timerclear(&p
->p_ru
->ru_utime
);
947 timerclear(&p
->p_ru
->ru_stime
);
951 task_basic_info_data_t tinfo
;
952 task_thread_times_info_data_t ttimesinfo
;
953 int task_info_stuff
, task_ttimes_stuff
;
954 struct timeval ut
,st
;
956 task_info_stuff
= TASK_BASIC_INFO_COUNT
;
957 task_info(task
, TASK_BASIC_INFO
,
958 &tinfo
, &task_info_stuff
);
959 p
->p_ru
->ru_utime
.tv_sec
= tinfo
.user_time
.seconds
;
960 p
->p_ru
->ru_utime
.tv_usec
= tinfo
.user_time
.microseconds
;
961 p
->p_ru
->ru_stime
.tv_sec
= tinfo
.system_time
.seconds
;
962 p
->p_ru
->ru_stime
.tv_usec
= tinfo
.system_time
.microseconds
;
964 task_ttimes_stuff
= TASK_THREAD_TIMES_INFO_COUNT
;
965 task_info(task
, TASK_THREAD_TIMES_INFO
,
966 &ttimesinfo
, &task_ttimes_stuff
);
968 ut
.tv_sec
= ttimesinfo
.user_time
.seconds
;
969 ut
.tv_usec
= ttimesinfo
.user_time
.microseconds
;
970 st
.tv_sec
= ttimesinfo
.system_time
.seconds
;
971 st
.tv_usec
= ttimesinfo
.system_time
.microseconds
;
972 timeradd(&ut
,&p
->p_ru
->ru_utime
,&p
->p_ru
->ru_utime
);
973 timeradd(&st
,&p
->p_ru
->ru_stime
,&p
->p_ru
->ru_stime
);
977 ruadd(p
->p_ru
, &p
->p_stats
->p_cru
);
980 * Free up profiling buffers.
983 struct uprof
*p0
= &p
->p_stats
->p_prof
, *p1
, *pn
;
989 for (; p1
!= NULL
; p1
= pn
) {
991 kfree((vm_offset_t
)p1
, sizeof *p1
);
996 * Other substructures are freed from wait().
998 FREE_ZONE(p
->p_stats
, sizeof *p
->p_stats
, M_SUBPROC
);
1001 FREE_ZONE(p
->p_sigacts
, sizeof *p
->p_sigacts
, M_SUBPROC
);
1002 p
->p_sigacts
= NULL
;
1004 if (--p
->p_limit
->p_refcnt
== 0)
1005 FREE_ZONE(p
->p_limit
, sizeof *p
->p_limit
, M_SUBPROC
);
1009 * Finish up by terminating the task
1010 * and halt this thread (only if a
1011 * member of the task exiting).
1013 p
->task
= TASK_NULL
;
1016 * Notify parent that we're gone.
1019 if (pp
!= initproc
) {
1020 pp
->si_pid
= p
->p_pid
;
1021 pp
->si_status
= p
->p_xstat
;
1022 pp
->si_code
= CLD_EXITED
;
1023 pp
->si_uid
= p
->p_cred
->p_ruid
;
1025 psignal(p
->p_pptr
, SIGCHLD
);
1027 /* mark as a zombie */
1030 /* and now wakeup the parent */
1031 wakeup((caddr_t
)p
->p_pptr
);