2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
69 #include <machine/reg.h>
70 #include <machine/psl.h>
72 #include "compat_43.h"
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/ioctl.h>
77 #include <sys/proc_internal.h>
78 #include <sys/kauth.h>
81 #include <sys/resource.h>
82 #include <sys/kernel.h>
84 #include <sys/file_internal.h>
85 #include <sys/vnode_internal.h>
86 #include <sys/syslog.h>
87 #include <sys/malloc.h>
88 #include <sys/resourcevar.h>
89 #include <sys/ptrace.h>
91 #include <sys/aio_kern.h>
92 #include <sys/sysproto.h>
93 #include <sys/signalvar.h>
94 #include <sys/filedesc.h> /* fdfree */
95 #include <sys/shm_internal.h> /* shmexit */
96 #include <sys/acct.h> /* acct_process */
97 #include <machine/spl.h>
99 #include <bsm/audit_kernel.h>
100 #include <bsm/audit_kevents.h>
102 #include <mach/mach_types.h>
104 #include <kern/kern_types.h>
105 #include <kern/kalloc.h>
106 #include <kern/task.h>
107 #include <kern/thread.h>
108 #include <kern/sched_prim.h>
109 #include <kern/assert.h>
111 #include <sys/ktrace.h>
114 #include <mach/mach_types.h>
115 #include <mach/task.h>
116 #include <mach/thread_act.h>
117 #include <mach/mach_traps.h> /* init_process */
119 extern char init_task_failure_data
[];
120 int exit1(struct proc
*, int, int *);
121 void proc_prepareexit(struct proc
*p
);
122 void vfork_exit(struct proc
*p
, int rv
);
123 void vproc_exit(struct proc
*p
);
124 __private_extern__
void munge_rusage(struct rusage
*a_rusage_p
, struct user_rusage
*a_user_rusage_p
);
127 * Things which should have prototypes in headers, but don't
129 void unix_syscall_return(int);
130 void *get_bsduthreadarg(thread_t
);
131 void proc_exit(struct proc
*p
);
132 int wait1continue(int result
);
133 int waitidcontinue(int result
);
134 int *get_bsduthreadrval(thread_t
);
135 kern_return_t
sys_perf_notify(struct task
*task
, exception_data_t code
,
136 mach_msg_type_number_t codeCnt
);
139 * NOTE: Source and target may *NOT* overlap!
140 * XXX Should share code with bsd/dev/ppc/unix_signal.c
143 siginfo_64to32(user_siginfo_t
*in
, siginfo_t
*out
)
145 out
->si_signo
= in
->si_signo
;
146 out
->si_errno
= in
->si_errno
;
147 out
->si_code
= in
->si_code
;
148 out
->si_pid
= in
->si_pid
;
149 out
->si_uid
= in
->si_uid
;
150 out
->si_status
= in
->si_status
;
151 out
->si_addr
= CAST_DOWN(void *,in
->si_addr
);
152 /* following cast works for sival_int because of padding */
153 out
->si_value
.sival_ptr
= CAST_DOWN(void *,in
->si_value
.sival_ptr
);
154 out
->si_band
= in
->si_band
; /* range reduction */
155 out
->pad
[0] = in
->pad
[0]; /* mcontext.ss.r1 */
163 exit(struct proc
*p
, struct exit_args
*uap
, int *retval
)
165 exit1(p
, W_EXITCODE(uap
->rval
, 0), retval
);
167 /* drop funnel before we return */
168 thread_funnel_set(kernel_flock
, FALSE
);
169 thread_exception_return();
172 thread_block(THREAD_CONTINUE_NULL
);
177 * Exit: deallocate address space and other resources, change proc state
178 * to zombie, and unlink proc from allproc and parent's lists. Save exit
179 * status and rusage for wait(). Check for child processes and orphan them.
182 exit1(struct proc
*p
, int rv
, int *retval
)
184 thread_t self
= current_thread();
185 struct task
*task
= p
->task
;
190 * If a thread in this task has already
191 * called exit(), then halt any others
195 ut
= get_bsdthread_info(self
);
196 if (ut
->uu_flag
& UT_VFORK
) {
198 vfork_return(self
, p
->p_pptr
, p
, retval
);
199 unix_syscall_return(0);
202 AUDIT_SYSCALL_EXIT(0, p
, ut
); /* Exit is always successfull */
204 while (p
->exit_thread
!= self
) {
205 if (sig_try_locked(p
) <= 0) {
206 if (get_threadtask(self
) != task
) {
211 thread_terminate(self
);
212 thread_funnel_set(kernel_flock
, FALSE
);
213 thread_exception_return();
220 printf("pid 1 exited (signal %d, exit %d)",
221 WTERMSIG(rv
), WEXITSTATUS(rv
));
222 panic("init died\nState at Last Exception:\n\n%s",
223 init_task_failure_data
);
227 p
->p_flag
|= P_WEXIT
;
232 /* task terminate will call proc_terminate and that cleans it up */
233 task_terminate_internal(task
);
239 proc_prepareexit(struct proc
*p
)
242 exception_data_t code
[EXCEPTION_CODE_MAX
];
243 thread_t self
= current_thread();
245 code
[0] = (exception_data_t
)0xFF000001; /* Set terminate code */
246 code
[1] = (exception_data_t
)p
->p_pid
; /* Pass out the pid */
247 /* Notify the perf server */
248 (void)sys_perf_notify(p
->task
, (exception_data_t
)&code
, 2);
251 * Remove proc from allproc queue and from pidhash chain.
252 * Need to do this before we do anything that can block.
253 * Not doing causes things like mount() find this on allproc
254 * in partially cleaned state.
256 LIST_REMOVE(p
, p_list
);
257 LIST_INSERT_HEAD(&zombproc
, p
, p_list
); /* Place onto zombproc. */
258 LIST_REMOVE(p
, p_hash
);
264 * If parent is waiting for us to exit or exec,
265 * P_PPWAIT is set; we will wakeup the parent below.
267 p
->p_flag
&= ~(P_TRACED
| P_PPWAIT
);
268 p
->p_sigignore
= ~(sigcantmask
);
270 ut
= get_bsdthread_info(self
);
272 untimeout(realitexpire
, (caddr_t
)p
->p_pid
);
276 proc_exit(struct proc
*p
)
278 register struct proc
*q
, *nq
, *pp
;
279 struct task
*task
= p
->task
;
281 boolean_t funnel_state
;
283 /* This can happen if thread_terminate of the single thread
287 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
288 if( !(p
->p_flag
& P_WEXIT
)) {
290 p
->p_flag
|= P_WEXIT
;
295 p
->p_lflag
|= P_LPEXIT
;
296 /* XXX Zombie allocation may fail, in which case stats get lost */
297 MALLOC_ZONE(p
->p_ru
, struct rusage
*,
298 sizeof (*p
->p_ru
), M_ZOMBIE
, M_WAITOK
);
301 * need to cancel async IO requests that can be cancelled and wait for those
302 * already active. MAY BLOCK!
305 p
->p_lflag
|= P_LREFDRAIN
;
306 while (p
->p_internalref
) {
307 p
->p_lflag
|= P_LREFDRAINWAIT
;
308 msleep(&p
->p_internalref
, (lck_mtx_t
*)0, 0, "proc_refdrain", 0) ;
310 p
->p_lflag
&= ~P_LREFDRAIN
;
311 p
->p_lflag
|= P_LREFDEAD
;
316 * Close open files and release open-file table.
321 /* Close ref SYSV Shared memory*/
324 /* Release SYSV semaphores */
327 if (SESS_LEADER(p
)) {
328 register struct session
*sp
= p
->p_session
;
332 struct vfs_context context
;
335 * Controlling process.
336 * Signal foreground pgrp,
337 * drain controlling terminal
338 * and revoke access to controlling terminal.
340 if (sp
->s_ttyp
->t_session
== sp
) {
341 if (sp
->s_ttyp
->t_pgrp
)
342 pgsignal(sp
->s_ttyp
->t_pgrp
, SIGHUP
, 1);
343 (void) ttywait(sp
->s_ttyp
);
345 * The tty could have been revoked
349 context
.vc_ucred
= kauth_cred_proc_ref(p
);
351 VNOP_REVOKE(sp
->s_ttyvp
, REVOKEALL
, &context
);
352 kauth_cred_unref(&context
.vc_ucred
);
360 * s_ttyp is not zero'd; we use this to indicate
361 * that the session once had a controlling terminal.
362 * (for logging and informational purposes)
368 fixjobc(p
, p
->p_pgrp
, 0);
369 p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
= RLIM_INFINITY
;
370 (void)acct_process(p
);
376 p
->p_traceflag
= 0; /* don't trace the vnode_put() */
378 struct vnode
*tvp
= p
->p_tracep
;
384 while (q
= p
->p_children
.lh_first
) {
385 proc_reparent(q
, initproc
);
387 * Traced processes are killed
388 * since their existence means someone is messing up.
390 if (q
->p_flag
& P_TRACED
) {
391 q
->p_flag
&= ~P_TRACED
;
392 if (q
->sigwait_thread
) {
394 * The sigwait_thread could be stopped at a
395 * breakpoint. Wake it up to kill.
396 * Need to do this as it could be a thread which is not
397 * the first thread in the task. So any attempts to kill
398 * the process would result into a deadlock on q->sigwait.
400 thread_resume((thread_t
)q
->sigwait_thread
);
401 clear_wait(q
->sigwait_thread
, THREAD_INTERRUPTED
);
402 threadsignal((thread_t
)q
->sigwait_thread
, SIGKILL
, 0);
409 * Save exit status and final rusage info, adding in child rusage
410 * info and self times. If we were unable to allocate a zombie
411 * structure, this information is lost.
413 if (p
->p_ru
!= NULL
) {
414 *p
->p_ru
= p
->p_stats
->p_ru
;
416 timerclear(&p
->p_ru
->ru_utime
);
417 timerclear(&p
->p_ru
->ru_stime
);
420 task_basic_info_data_t tinfo
;
421 task_thread_times_info_data_t ttimesinfo
;
422 int task_info_stuff
, task_ttimes_stuff
;
423 struct timeval ut
,st
;
425 task_info_stuff
= TASK_BASIC_INFO_COUNT
;
426 task_info(task
, TASK_BASIC_INFO
,
427 (task_info_t
)&tinfo
, &task_info_stuff
);
428 p
->p_ru
->ru_utime
.tv_sec
= tinfo
.user_time
.seconds
;
429 p
->p_ru
->ru_utime
.tv_usec
= tinfo
.user_time
.microseconds
;
430 p
->p_ru
->ru_stime
.tv_sec
= tinfo
.system_time
.seconds
;
431 p
->p_ru
->ru_stime
.tv_usec
= tinfo
.system_time
.microseconds
;
433 task_ttimes_stuff
= TASK_THREAD_TIMES_INFO_COUNT
;
434 task_info(task
, TASK_THREAD_TIMES_INFO
,
435 (task_info_t
)&ttimesinfo
, &task_ttimes_stuff
);
437 ut
.tv_sec
= ttimesinfo
.user_time
.seconds
;
438 ut
.tv_usec
= ttimesinfo
.user_time
.microseconds
;
439 st
.tv_sec
= ttimesinfo
.system_time
.seconds
;
440 st
.tv_usec
= ttimesinfo
.system_time
.microseconds
;
441 timeradd(&ut
,&p
->p_ru
->ru_utime
,&p
->p_ru
->ru_utime
);
442 timeradd(&st
,&p
->p_ru
->ru_stime
,&p
->p_ru
->ru_stime
);
445 ruadd(p
->p_ru
, &p
->p_stats
->p_cru
);
449 * Free up profiling buffers.
452 struct uprof
*p0
= &p
->p_stats
->p_prof
, *p1
, *pn
;
458 for (; p1
!= NULL
; p1
= pn
) {
460 kfree(p1
, sizeof *p1
);
465 * Other substructures are freed from wait().
467 FREE_ZONE(p
->p_stats
, sizeof *p
->p_stats
, M_SUBPROC
);
470 FREE_ZONE(p
->p_sigacts
, sizeof *p
->p_sigacts
, M_SUBPROC
);
473 if (--p
->p_limit
->p_refcnt
== 0)
474 FREE_ZONE(p
->p_limit
, sizeof *p
->p_limit
, M_SUBPROC
);
478 * Finish up by terminating the task
479 * and halt this thread (only if a
480 * member of the task exiting).
484 set_bsdtask_info(task
, NULL
);
486 KNOTE(&p
->p_klist
, NOTE_EXIT
);
489 * Notify parent that we're gone.
491 if (p
->p_pptr
->p_flag
& P_NOCLDWAIT
) {
492 struct proc
*opp
= p
->p_pptr
;
495 * Add child resource usage to parent before giving
496 * zombie to init. If we were unable to allocate a
497 * zombie structure, this information is lost.
500 ruadd(&p
->p_pptr
->p_stats
->p_cru
, p
->p_ru
);
502 proc_reparent(p
, initproc
);
503 /* If there are no more children wakeup parent */
504 if (LIST_EMPTY(&opp
->p_children
))
505 wakeup((caddr_t
)opp
);
507 /* should be fine as parent proc would be initproc */
509 if (pp
!= initproc
) {
510 pp
->si_pid
= p
->p_pid
;
511 pp
->si_status
= p
->p_xstat
;
512 pp
->si_code
= CLD_EXITED
;
513 pp
->si_uid
= p
->p_ucred
->cr_ruid
;
515 /* mark as a zombie */
518 psignal(pp
, SIGCHLD
);
520 /* and now wakeup the parent */
521 wakeup((caddr_t
)p
->p_pptr
);
523 (void) thread_funnel_set(kernel_flock
, funnel_state
);
530 * Description: Given a process from which all status information needed
531 * has already been extracted, if the process is a ptrace
532 * attach process, detach it and give it back to its real
533 * parent, else recover all resources remaining associated
536 * Parameters: struct proc *parent Parent of process being reaped
537 * struct proc *child Process to reap
539 * Returns: 0 Process was not reaped because it
540 * came from an attach
541 * 1 Process was reaped
544 reap_child_process(struct proc
*parent
, struct proc
*child
)
546 struct proc
*trace_parent
; /* Traced parent process, if tracing */
547 struct vnode
*tvp
; /* Traced vnode pointer, if used */
550 * If we got the child via a ptrace 'attach',
551 * we need to give it back to the old parent.
553 if (child
->p_oppid
&& (trace_parent
= pfind(child
->p_oppid
))) {
555 proc_reparent(child
, trace_parent
);
556 if (trace_parent
!= initproc
) {
557 trace_parent
->si_pid
= child
->p_pid
;
558 trace_parent
->si_status
= child
->p_xstat
;
559 trace_parent
->si_code
= CLD_CONTINUED
;
560 trace_parent
->si_uid
= child
->p_ucred
->cr_ruid
;
562 psignal(trace_parent
, SIGCHLD
);
563 wakeup((caddr_t
)trace_parent
);
568 ruadd(&parent
->p_stats
->p_cru
, child
->p_ru
);
569 FREE_ZONE(child
->p_ru
, sizeof *child
->p_ru
, M_ZOMBIE
);
572 printf("Warning : lost p_ru for %s\n", child
->p_comm
);
576 * Decrement the count of procs running with this uid.
578 (void)chgproccnt(child
->p_ucred
->cr_ruid
, -1);
581 * Free up credentials.
583 if (IS_VALID_CRED(child
->p_ucred
)) {
584 kauth_cred_unref(&child
->p_ucred
);
588 * Release reference to text vnode
590 tvp
= child
->p_textvp
;
591 child
->p_textvp
= NULL
;
596 * Finally finished with old proc entry.
597 * Unlink it from its process group and free it.
600 LIST_REMOVE(child
, p_list
); /* off zombproc */
601 LIST_REMOVE(child
, p_sibling
);
602 child
->p_lflag
&= ~P_LWAITING
;
603 wakeup(&child
->p_stat
);
605 lck_mtx_destroy(&child
->p_mlock
, proc_lck_grp
);
606 lck_mtx_destroy(&child
->p_fdmlock
, proc_lck_grp
);
607 FREE_ZONE(child
, sizeof *child
, M_PROC
);
614 wait1continue(int result
)
625 thread
= current_thread();
626 vt
= get_bsduthreadarg(thread
);
627 retval
= get_bsduthreadrval(thread
);
628 return(wait4((struct proc
*)p
, (struct wait4_args
*)vt
, retval
));
632 wait4(struct proc
*q
, struct wait4_args
*uap
, register_t
*retval
)
635 register struct proc
*p
;
639 uap
->pid
= -q
->p_pgid
;
643 for (p
= q
->p_children
.lh_first
; p
!= 0; p
= p
->p_sibling
.le_next
) {
644 if (uap
->pid
!= WAIT_ANY
&&
645 p
->p_pid
!= uap
->pid
&&
646 p
->p_pgid
!= -(uap
->pid
))
650 /* XXX This is racy because we don't get the lock!!!! */
652 if (p
->p_lflag
& P_LWAITING
) {
653 (void)tsleep(&p
->p_stat
, PWAIT
, "waitcoll", 0);
656 p
->p_lflag
|= P_LWAITING
; /* only allow single thread to wait() */
658 if (p
->p_stat
== SZOMB
) {
659 retval
[0] = p
->p_pid
;
661 status
= p
->p_xstat
; /* convert to int */
662 error
= copyout((caddr_t
)&status
,
666 p
->p_lflag
&= ~P_LWAITING
;
672 if (p
->p_ru
== NULL
) {
675 if (IS_64BIT_PROCESS(q
)) {
676 struct user_rusage my_rusage
;
677 munge_rusage(p
->p_ru
, &my_rusage
);
678 error
= copyout((caddr_t
)&my_rusage
,
683 error
= copyout((caddr_t
)p
->p_ru
,
685 sizeof (struct rusage
));
688 /* information unavailable? */
690 p
->p_lflag
&= ~P_LWAITING
;
697 if (!reap_child_process(q
, p
)) {
698 p
->p_lflag
&= ~P_LWAITING
;
704 if (p
->p_stat
== SSTOP
&& (p
->p_flag
& P_WAITED
) == 0 &&
705 (p
->p_flag
& P_TRACED
|| uap
->options
& WUNTRACED
)) {
706 p
->p_flag
|= P_WAITED
;
707 retval
[0] = p
->p_pid
;
709 status
= W_STOPCODE(p
->p_xstat
);
710 error
= copyout((caddr_t
)&status
,
715 p
->p_lflag
&= ~P_LWAITING
;
719 p
->p_lflag
&= ~P_LWAITING
;
725 if (uap
->options
& WNOHANG
) {
730 if ((error
= tsleep0((caddr_t
)q
, PWAIT
| PCATCH
, "wait", 0, wait1continue
)))
738 waitidcontinue(int result
)
749 thread
= current_thread();
750 vt
= get_bsduthreadarg(thread
);
751 retval
= get_bsduthreadrval(thread
);
752 return(waitid((struct proc
*)p
, (struct waitid_args
*)vt
, retval
));
756 * Description: Suspend the calling thread until one child of the process
757 * containing the calling thread changes state.
759 * Parameters: uap->idtype one of P_PID, P_PGID, P_ALL
760 * uap->id pid_t or gid_t or ignored
761 * uap->infop Address of signinfo_t struct in
762 * user space into which to return status
763 * uap->options flag values
766 * !0 Error returning status to user space
769 waitid(struct proc
*q
, struct waitid_args
*uap
, register_t
*retval
)
771 user_siginfo_t collect64
; /* siginfo data to return to caller */
774 register struct proc
*p
;
779 for (p
= q
->p_children
.lh_first
; p
!= 0; p
= p
->p_sibling
.le_next
) {
780 switch(uap
->idtype
) {
781 case P_PID
: /* child with process ID equal to... */
782 if (p
->p_pid
!= (pid_t
)uap
->id
)
785 case P_PGID
: /* child with process group ID equal to... */
786 if (p
->p_pgid
!= (pid_t
)uap
->id
)
789 case P_ALL
: /* any child */
793 /* XXX This is racy because we don't get the lock!!!! */
796 * Wait collision; go to sleep and restart; used to maintain
797 * the single return for waited process guarantee.
799 if (p
->p_lflag
& P_LWAITING
) {
800 (void)tsleep(&p
->p_stat
, PWAIT
, "waitidcoll", 0);
803 p
->p_lflag
|= P_LWAITING
; /* mark busy */
808 * Types of processes we are interested in
810 * XXX Don't know what to do for WCONTINUED?!?
813 case SZOMB
: /* Exited */
814 if (!(uap
->options
& WEXITED
))
817 /* Collect "siginfo" information for caller */
818 collect64
.si_signo
= 0;
819 collect64
.si_code
= 0;
820 collect64
.si_errno
= 0;
821 collect64
.si_pid
= 0;
822 collect64
.si_uid
= 0;
823 collect64
.si_addr
= 0;
824 collect64
.si_status
= p
->p_xstat
;
825 collect64
.si_band
= 0;
827 if (IS_64BIT_PROCESS(p
)) {
828 error
= copyout((caddr_t
)&collect64
,
833 siginfo_64to32(&collect64
,&collect
);
834 error
= copyout((caddr_t
)&collect
,
838 /* information unavailable? */
840 p
->p_lflag
&= ~P_LWAITING
;
845 /* Prevent other process for waiting for this event? */
846 if (!(uap
->options
& WNOWAIT
)) {
848 if (!reap_child_process(q
, p
)) {
849 p
->p_lflag
&= ~P_LWAITING
;
856 case SSTOP
: /* Stopped */
858 * If we are not interested in stopped processes, then
861 if (!(uap
->options
& WSTOPPED
))
865 * If someone has already waited it, we lost a race
866 * to be the one to return status.
868 if ((p
->p_flag
& P_WAITED
) != 0)
872 * If this is not a traced process, and they haven't
873 * indicated an interest in untraced processes, then
876 if (!(p
->p_flag
& P_TRACED
) && !(uap
->options
& WUNTRACED
))
879 /* Collect "siginfo" information for caller */
880 collect64
.si_signo
= 0;
881 collect64
.si_code
= 0;
882 collect64
.si_errno
= 0;
883 collect64
.si_pid
= 0;
884 collect64
.si_uid
= 0;
885 collect64
.si_addr
= 0;
886 collect64
.si_status
= p
->p_xstat
;
887 collect64
.si_band
= 0;
889 if (IS_64BIT_PROCESS(p
)) {
890 error
= copyout((caddr_t
)&collect64
,
895 siginfo_64to32(&collect64
,&collect
);
896 error
= copyout((caddr_t
)&collect
,
900 /* information unavailable? */
902 p
->p_lflag
&= ~P_LWAITING
;
907 /* Prevent other process for waiting for this event? */
908 if (!(uap
->options
& WNOWAIT
)) {
909 p
->p_flag
|= P_WAITED
;
912 p
->p_lflag
&= ~P_LWAITING
;
916 default: /* All others */
917 /* ...meaning Continued */
918 if (!(uap
->options
& WCONTINUED
))
922 * If the flag isn't set, then this process has not
923 * been stopped and continued, or the status has
924 * already been reaped by another caller of waitid().
926 if ((p
->p_flag
& P_CONTINUED
) == 0)
929 /* Collect "siginfo" information for caller */
930 collect64
.si_signo
= 0;
931 collect64
.si_code
= 0;
932 collect64
.si_errno
= 0;
933 collect64
.si_pid
= 0;
934 collect64
.si_uid
= 0;
935 collect64
.si_addr
= 0;
936 collect64
.si_status
= p
->p_xstat
;
937 collect64
.si_band
= 0;
939 if (IS_64BIT_PROCESS(p
)) {
940 error
= copyout((caddr_t
)&collect64
,
945 siginfo_64to32(&collect64
,&collect
);
946 error
= copyout((caddr_t
)&collect
,
950 /* information unavailable? */
952 p
->p_lflag
&= ~P_LWAITING
;
957 /* Prevent other process for waiting for this event? */
958 if (!(uap
->options
& WNOWAIT
)) {
959 p
->p_flag
&= ~P_CONTINUED
;
962 p
->p_lflag
&= ~P_LWAITING
;
970 /* Not a process we are interested in; go on to next child */
971 p
->p_lflag
&= ~P_LWAITING
;
975 /* No child processes that could possibly satisfy the request? */
979 if (uap
->options
& WNOHANG
) {
984 if ((error
= tsleep0((caddr_t
)q
, PWAIT
| PCATCH
, "waitid", 0, waitidcontinue
)))
991 * make process 'parent' the new parent of process 'child'.
994 proc_reparent(struct proc
*child
, struct proc
*parent
)
997 if (child
->p_pptr
== parent
)
1000 LIST_REMOVE(child
, p_sibling
);
1001 LIST_INSERT_HEAD(&parent
->p_children
, child
, p_sibling
);
1002 child
->p_pptr
= parent
;
1004 if (initproc
== parent
&& child
->p_stat
== SZOMB
)
1005 psignal(initproc
, SIGCHLD
);
1009 * Make the current process an "init" process, meaning
1010 * that it doesn't have a parent, and that it won't be
1011 * gunned down by kill(-1, 0).
1014 init_process(__unused
struct init_process_args
*args
)
1016 register struct proc
*p
= current_proc();
1018 AUDIT_MACH_SYSCALL_ENTER(AUE_INITPROCESS
);
1019 if (suser(kauth_cred_get(), &p
->p_acflag
)) {
1020 AUDIT_MACH_SYSCALL_EXIT(KERN_NO_ACCESS
);
1021 return(KERN_NO_ACCESS
);
1024 if (p
->p_pid
!= 1 && p
->p_pgid
!= p
->p_pid
)
1025 enterpgrp(p
, p
->p_pid
, 0);
1026 p
->p_flag
|= P_SYSTEM
;
1029 * Take us out of the sibling chain, and
1030 * out of our parent's child chain.
1032 LIST_REMOVE(p
, p_sibling
);
1033 p
->p_sibling
.le_prev
= NULL
;
1034 p
->p_sibling
.le_next
= NULL
;
1035 p
->p_pptr
= kernproc
;
1037 AUDIT_MACH_SYSCALL_EXIT(KERN_SUCCESS
);
1038 return(KERN_SUCCESS
);
1043 * Exit: deallocate address space and other resources, change proc state
1044 * to zombie, and unlink proc from allproc and parent's lists. Save exit
1045 * status and rusage for wait(). Check for child processes and orphan them.
1049 vfork_exit(struct proc
*p
, int rv
)
1051 thread_t self
= current_thread();
1053 struct task
*task
= p
->task
;
1057 exception_data_t code
[EXCEPTION_CODE_MAX
];
1060 * If a thread in this task has already
1061 * called exit(), then halt any others
1065 ut
= get_bsdthread_info(self
);
1068 while (p
->exit_thread
!= self
) {
1069 if (sig_try_locked(p
) <= 0) {
1070 if (get_threadtask(self
) != task
) {
1075 thread_terminate(self
);
1076 thread_funnel_set(kernel_flock
, FALSE
);
1077 thread_exception_return();
1080 sig_lock_to_exit(p
);
1083 if (p
->p_pid
== 1) {
1084 printf("pid 1 exited (signal %d, exit %d)",
1085 WTERMSIG(rv
), WEXITSTATUS(rv
));
1086 panic("init died\nState at Last Exception:\n\n%s", init_task_failure_data
);
1091 p
->p_flag
|= P_WEXIT
;
1092 p
->p_lflag
|= P_LPEXIT
;
1095 code
[0] = (exception_data_t
)0xFF000001; /* Set terminate code */
1096 code
[1] = (exception_data_t
)p
->p_pid
; /* Pass out the pid */
1097 /* Notify the perf server */
1098 (void)sys_perf_notify(p
->task
, (exception_data_t
)&code
, 2);
1101 * Remove proc from allproc queue and from pidhash chain.
1102 * Need to do this before we do anything that can block.
1103 * Not doing causes things like mount() find this on allproc
1104 * in partially cleaned state.
1106 LIST_REMOVE(p
, p_list
);
1107 LIST_INSERT_HEAD(&zombproc
, p
, p_list
); /* Place onto zombproc. */
1108 LIST_REMOVE(p
, p_hash
);
1110 * If parent is waiting for us to exit or exec,
1111 * P_PPWAIT is set; we will wakeup the parent below.
1113 p
->p_flag
&= ~(P_TRACED
| P_PPWAIT
);
1114 p
->p_sigignore
= ~0;
1118 untimeout(realitexpire
, (caddr_t
)p
->p_pid
);
1126 vproc_exit(struct proc
*p
)
1128 register struct proc
*q
, *nq
, *pp
;
1130 struct task
*task
= p
->task
;
1133 /* XXX Zombie allocation may fail, in which case stats get lost */
1134 MALLOC_ZONE(p
->p_ru
, struct rusage
*,
1135 sizeof (*p
->p_ru
), M_ZOMBIE
, M_WAITOK
);
1138 * Close open files and release open-file table.
1143 if (SESS_LEADER(p
)) {
1144 register struct session
*sp
= p
->p_session
;
1147 struct vnode
*ttyvp
;
1148 struct vfs_context context
;
1151 * Controlling process.
1152 * Signal foreground pgrp,
1153 * drain controlling terminal
1154 * and revoke access to controlling terminal.
1156 if (sp
->s_ttyp
->t_session
== sp
) {
1157 if (sp
->s_ttyp
->t_pgrp
)
1158 pgsignal(sp
->s_ttyp
->t_pgrp
, SIGHUP
, 1);
1159 (void) ttywait(sp
->s_ttyp
);
1161 * The tty could have been revoked
1164 context
.vc_proc
= p
;
1165 context
.vc_ucred
= kauth_cred_proc_ref(p
);
1167 VNOP_REVOKE(sp
->s_ttyvp
, REVOKEALL
, &context
);
1168 kauth_cred_unref(&context
.vc_ucred
);
1170 ttyvp
= sp
->s_ttyvp
;
1176 * s_ttyp is not zero'd; we use this to indicate
1177 * that the session once had a controlling terminal.
1178 * (for logging and informational purposes)
1181 sp
->s_leader
= NULL
;
1184 fixjobc(p
, p
->p_pgrp
, 0);
1185 p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
= RLIM_INFINITY
;
1189 * release trace file
1191 p
->p_traceflag
= 0; /* don't trace the vnode_rele() */
1193 struct vnode
*tvp
= p
->p_tracep
;
1199 while (q
= p
->p_children
.lh_first
) {
1200 proc_reparent(q
, initproc
);
1202 * Traced processes are killed
1203 * since their existence means someone is messing up.
1205 if (q
->p_flag
& P_TRACED
) {
1206 q
->p_flag
&= ~P_TRACED
;
1207 if (q
->sigwait_thread
) {
1209 * The sigwait_thread could be stopped at a
1210 * breakpoint. Wake it up to kill.
1211 * Need to do this as it could be a thread which is not
1212 * the first thread in the task. So any attempts to kill
1213 * the process would result into a deadlock on q->sigwait.
1215 thread_resume((thread_t
)q
->sigwait_thread
);
1216 clear_wait(q
->sigwait_thread
, THREAD_INTERRUPTED
);
1217 threadsignal((thread_t
)q
->sigwait_thread
, SIGKILL
, 0);
1219 psignal(q
, SIGKILL
);
1224 * Save exit status and final rusage info, adding in child rusage
1225 * info and self times. If we were unable to allocate a zombie
1226 * structure, this information is lost.
1228 if (p
->p_ru
!= NULL
) {
1229 *p
->p_ru
= p
->p_stats
->p_ru
;
1230 timerclear(&p
->p_ru
->ru_utime
);
1231 timerclear(&p
->p_ru
->ru_stime
);
1235 task_basic_info_data_t tinfo
;
1236 task_thread_times_info_data_t ttimesinfo
;
1237 int task_info_stuff
, task_ttimes_stuff
;
1238 struct timeval ut
,st
;
1240 task_info_stuff
= TASK_BASIC_INFO_COUNT
;
1241 task_info(task
, TASK_BASIC_INFO
,
1242 &tinfo
, &task_info_stuff
);
1243 p
->p_ru
->ru_utime
.tv_sec
= tinfo
.user_time
.seconds
;
1244 p
->p_ru
->ru_utime
.tv_usec
= tinfo
.user_time
.microseconds
;
1245 p
->p_ru
->ru_stime
.tv_sec
= tinfo
.system_time
.seconds
;
1246 p
->p_ru
->ru_stime
.tv_usec
= tinfo
.system_time
.microseconds
;
1248 task_ttimes_stuff
= TASK_THREAD_TIMES_INFO_COUNT
;
1249 task_info(task
, TASK_THREAD_TIMES_INFO
,
1250 &ttimesinfo
, &task_ttimes_stuff
);
1252 ut
.tv_sec
= ttimesinfo
.user_time
.seconds
;
1253 ut
.tv_usec
= ttimesinfo
.user_time
.microseconds
;
1254 st
.tv_sec
= ttimesinfo
.system_time
.seconds
;
1255 st
.tv_usec
= ttimesinfo
.system_time
.microseconds
;
1256 timeradd(&ut
,&p
->p_ru
->ru_utime
,&p
->p_ru
->ru_utime
);
1257 timeradd(&st
,&p
->p_ru
->ru_stime
,&p
->p_ru
->ru_stime
);
1261 ruadd(p
->p_ru
, &p
->p_stats
->p_cru
);
1265 * Free up profiling buffers.
1268 struct uprof
*p0
= &p
->p_stats
->p_prof
, *p1
, *pn
;
1274 for (; p1
!= NULL
; p1
= pn
) {
1276 kfree(p1
, sizeof *p1
);
1281 * Other substructures are freed from wait().
1283 FREE_ZONE(p
->p_stats
, sizeof *p
->p_stats
, M_SUBPROC
);
1286 FREE_ZONE(p
->p_sigacts
, sizeof *p
->p_sigacts
, M_SUBPROC
);
1287 p
->p_sigacts
= NULL
;
1289 if (--p
->p_limit
->p_refcnt
== 0)
1290 FREE_ZONE(p
->p_limit
, sizeof *p
->p_limit
, M_SUBPROC
);
1294 * Finish up by terminating the task
1295 * and halt this thread (only if a
1296 * member of the task exiting).
1298 p
->task
= TASK_NULL
;
1301 * Notify parent that we're gone.
1304 if (pp
!= initproc
) {
1305 pp
->si_pid
= p
->p_pid
;
1306 pp
->si_status
= p
->p_xstat
;
1307 pp
->si_code
= CLD_EXITED
;
1308 pp
->si_uid
= p
->p_ucred
->cr_ruid
;
1310 /* mark as a zombie */
1313 psignal(p
->p_pptr
, SIGCHLD
);
1315 /* and now wakeup the parent */
1316 wakeup((caddr_t
)p
->p_pptr
);
1322 * LP64 support - long is 64 bits if we are dealing with a 64 bit user
1323 * process. We munge the kernel (32 bit) version of rusage into the
1326 __private_extern__
void
1327 munge_rusage(struct rusage
*a_rusage_p
, struct user_rusage
*a_user_rusage_p
)
1329 /* timeval changes size, so utime and stime need special handling */
1330 a_user_rusage_p
->ru_utime
.tv_sec
= a_rusage_p
->ru_utime
.tv_sec
;
1331 a_user_rusage_p
->ru_utime
.tv_usec
= a_rusage_p
->ru_utime
.tv_usec
;
1332 a_user_rusage_p
->ru_stime
.tv_sec
= a_rusage_p
->ru_stime
.tv_sec
;
1333 a_user_rusage_p
->ru_stime
.tv_usec
= a_rusage_p
->ru_stime
.tv_usec
;
1335 * everything else can be a direct assign, since there is no loss
1336 * of precision implied boing 32->64.
1338 a_user_rusage_p
->ru_maxrss
= a_rusage_p
->ru_maxrss
;
1339 a_user_rusage_p
->ru_ixrss
= a_rusage_p
->ru_ixrss
;
1340 a_user_rusage_p
->ru_idrss
= a_rusage_p
->ru_idrss
;
1341 a_user_rusage_p
->ru_isrss
= a_rusage_p
->ru_isrss
;
1342 a_user_rusage_p
->ru_minflt
= a_rusage_p
->ru_minflt
;
1343 a_user_rusage_p
->ru_majflt
= a_rusage_p
->ru_majflt
;
1344 a_user_rusage_p
->ru_nswap
= a_rusage_p
->ru_nswap
;
1345 a_user_rusage_p
->ru_inblock
= a_rusage_p
->ru_inblock
;
1346 a_user_rusage_p
->ru_oublock
= a_rusage_p
->ru_oublock
;
1347 a_user_rusage_p
->ru_msgsnd
= a_rusage_p
->ru_msgsnd
;
1348 a_user_rusage_p
->ru_msgrcv
= a_rusage_p
->ru_msgrcv
;
1349 a_user_rusage_p
->ru_nsignals
= a_rusage_p
->ru_nsignals
;
1350 a_user_rusage_p
->ru_nvcsw
= a_rusage_p
->ru_nvcsw
;
1351 a_user_rusage_p
->ru_nivcsw
= a_rusage_p
->ru_nivcsw
;