2  * Copyright (c) 2000-2011 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ 
  30  * Copyright (c) 1982, 1986, 1989, 1991, 1993 
  31  *      The Regents of the University of California.  All rights reserved. 
  32  * (c) UNIX System Laboratories, Inc. 
  33  * All or some portions of this file are derived from material licensed 
  34  * to the University of California by American Telephone and Telegraph 
  35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with 
  36  * the permission of UNIX System Laboratories, Inc. 
  38  * Redistribution and use in source and binary forms, with or without 
  39  * modification, are permitted provided that the following conditions 
  41  * 1. Redistributions of source code must retain the above copyright 
  42  *    notice, this list of conditions and the following disclaimer. 
  43  * 2. Redistributions in binary form must reproduce the above copyright 
  44  *    notice, this list of conditions and the following disclaimer in the 
  45  *    documentation and/or other materials provided with the distribution. 
  46  * 3. All advertising materials mentioning features or use of this software 
  47  *    must display the following acknowledgement: 
  48  *      This product includes software developed by the University of 
  49  *      California, Berkeley and its contributors. 
  50  * 4. Neither the name of the University nor the names of its contributors 
  51  *    may be used to endorse or promote products derived from this software 
  52  *    without specific prior written permission. 
  54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 
  55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
  57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 
  58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
  59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 
  60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 
  61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
  62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 
  63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 
  66  *      @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 
  69  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce 
  70  * support for mandatory and extensible security protections.  This notice 
  71  * is included in support of clause 2.2 (b) of the Apple Public License, 
  75 #include <machine/reg.h> 
  76 #include <machine/psl.h> 
  78 #include "compat_43.h" 
  80 #include <sys/param.h> 
  81 #include <sys/systm.h> 
  82 #include <sys/ioctl.h> 
  83 #include <sys/proc_internal.h> 
  85 #include <sys/kauth.h> 
  88 #include <sys/resource.h> 
  89 #include <sys/kernel.h> 
  91 #include <sys/file_internal.h> 
  92 #include <sys/vnode_internal.h> 
  93 #include <sys/syslog.h> 
  94 #include <sys/malloc.h> 
  95 #include <sys/resourcevar.h> 
  96 #include <sys/ptrace.h> 
  98 #include <sys/aio_kern.h> 
  99 #include <sys/sysproto.h> 
 100 #include <sys/signalvar.h> 
 101 #include <sys/kdebug.h> 
 102 #include <sys/filedesc.h>       /* fdfree */ 
 104 #include <sys/shm_internal.h>   /* shmexit */ 
 106 #include <sys/acct.h>           /* acct_process */ 
 108 #include <security/audit/audit.h> 
 109 #include <bsm/audit_kevents.h> 
 111 #include <mach/mach_types.h> 
 113 #include <kern/kern_types.h> 
 114 #include <kern/kalloc.h> 
 115 #include <kern/task.h> 
 116 #include <kern/thread.h> 
 117 #include <kern/thread_call.h> 
 118 #include <kern/sched_prim.h> 
 119 #include <kern/assert.h> 
 120 #include <sys/codesign.h> 
 122 #if VM_PRESSURE_EVENTS 
 123 #include <kern/vm_pressure.h> 
 126 #if CONFIG_MEMORYSTATUS 
 127 #include <sys/kern_memorystatus.h> 
 131 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */ 
 132 extern void (*dtrace_fasttrap_exit_ptr
)(proc_t
); 
 133 extern void (*dtrace_helpers_cleanup
)(proc_t
); 
 134 extern void dtrace_lazy_dofs_destroy(proc_t
); 
 136 #include <sys/dtrace_ptss.h> 
 140 #include <security/mac.h> 
 141 #include <sys/syscall.h> 
 144 #include <mach/mach_types.h> 
 145 #include <mach/task.h> 
 146 #include <mach/thread_act.h> 
 150 extern char init_task_failure_data
[]; 
 151 void proc_prepareexit(proc_t p
, int rv
, boolean_t perf_notify
); 
 152 void vfork_exit(proc_t p
, int rv
); 
 153 void vproc_exit(proc_t p
); 
 154 __private_extern__ 
void munge_user64_rusage(struct rusage 
*a_rusage_p
, struct user64_rusage 
*a_user_rusage_p
); 
 155 __private_extern__ 
void munge_user32_rusage(struct rusage 
*a_rusage_p
, struct user32_rusage 
*a_user_rusage_p
); 
 156 static int reap_child_locked(proc_t parent
, proc_t child
, int deadparent
, int reparentedtoinit
, int locked
, int droplock
); 
 159  * Things which should have prototypes in headers, but don't 
 161 void    *get_bsduthreadarg(thread_t
); 
 162 void    proc_exit(proc_t p
); 
 163 int     wait1continue(int result
); 
 164 int     waitidcontinue(int result
); 
 165 int     *get_bsduthreadrval(thread_t
); 
 166 kern_return_t 
sys_perf_notify(thread_t thread
, int pid
); 
 167 kern_return_t 
task_exception_notify(exception_type_t exception
, 
 168         mach_exception_data_type_t code
, mach_exception_data_type_t subcode
); 
 170 void gather_rusage_info_v2(proc_t p
, struct rusage_info_v2 
*ru
, int flavor
); 
 173  * NOTE: Source and target may *NOT* overlap! 
 174  * XXX Should share code with bsd/dev/ppc/unix_signal.c 
 177 siginfo_user_to_user32(user_siginfo_t 
*in
, user32_siginfo_t 
*out
) 
 179         out
->si_signo   
= in
->si_signo
; 
 180         out
->si_errno   
= in
->si_errno
; 
 181         out
->si_code    
= in
->si_code
; 
 182         out
->si_pid     
= in
->si_pid
; 
 183         out
->si_uid     
= in
->si_uid
; 
 184         out
->si_status  
= in
->si_status
; 
 185         out
->si_addr    
= CAST_DOWN_EXPLICIT(user32_addr_t
,in
->si_addr
); 
 186         /* following cast works for sival_int because of padding */ 
 187         out
->si_value
.sival_ptr 
= CAST_DOWN_EXPLICIT(user32_addr_t
,in
->si_value
.sival_ptr
); 
 188         out
->si_band    
= in
->si_band
;                  /* range reduction */ 
 192 siginfo_user_to_user64(user_siginfo_t 
*in
, user64_siginfo_t 
*out
) 
 194         out
->si_signo   
= in
->si_signo
; 
 195         out
->si_errno   
= in
->si_errno
; 
 196         out
->si_code    
= in
->si_code
; 
 197         out
->si_pid     
= in
->si_pid
; 
 198         out
->si_uid     
= in
->si_uid
; 
 199         out
->si_status  
= in
->si_status
; 
 200         out
->si_addr    
= in
->si_addr
; 
 201         /* following cast works for sival_int because of padding */ 
 202         out
->si_value
.sival_ptr 
= in
->si_value
.sival_ptr
; 
 203         out
->si_band    
= in
->si_band
;                  /* range reduction */ 
 207 copyoutsiginfo(user_siginfo_t 
*native
, boolean_t is64
, user_addr_t uaddr
) 
 210                 user64_siginfo_t sinfo64
; 
 212                 bzero(&sinfo64
, sizeof (sinfo64
)); 
 213                 siginfo_user_to_user64(native
, &sinfo64
); 
 214                 return (copyout(&sinfo64
, uaddr
, sizeof (sinfo64
))); 
 216                 user32_siginfo_t sinfo32
; 
 218                 bzero(&sinfo32
, sizeof (sinfo32
)); 
 219                 siginfo_user_to_user32(native
, &sinfo32
); 
 220                 return (copyout(&sinfo32
, uaddr
, sizeof (sinfo32
))); 
 229 exit(proc_t p
, struct exit_args 
*uap
, int *retval
) 
 231         exit1(p
, W_EXITCODE(uap
->rval
, 0), retval
); 
 233         /* drop funnel before we return */ 
 234         thread_exception_return(); 
 237                 thread_block(THREAD_CONTINUE_NULL
); 
 242  * Exit: deallocate address space and other resources, change proc state 
 243  * to zombie, and unlink proc from allproc and parent's lists.  Save exit 
 244  * status and rusage for wait().  Check for child processes and orphan them. 
 247 exit1(proc_t p
, int rv
, int *retval
) 
 249         return exit1_internal(p
, rv
, retval
, TRUE
, TRUE
, 0); 
 253 exit1_internal(proc_t p
, int rv
, int *retval
, boolean_t thread_can_terminate
, boolean_t perf_notify
, 
 256         thread_t self 
= current_thread(); 
 257         struct task 
*task 
= p
->task
; 
 262          * If a thread in this task has already 
 263          * called exit(), then halt any others 
 267          ut 
= get_bsdthread_info(self
); 
 268          if (ut
->uu_flag 
& UT_VFORK
) { 
 269                 if (!thread_can_terminate
) { 
 274                 vfork_return(p 
, retval
, p
->p_pid
); 
 275                 unix_syscall_return(0); 
 280          * The parameter list of audit_syscall_exit() was augmented to 
 281          * take the Darwin syscall number as the first parameter, 
 282          * which is currently required by mac_audit_postselect(). 
 286          * The BSM token contains two components: an exit status as passed 
 287          * to exit(), and a return value to indicate what sort of exit it  
 288          * was.  The exit status is WEXITSTATUS(rv), but it's not clear 
 289          * what the return value is. 
 291         AUDIT_ARG(exit
, WEXITSTATUS(rv
), 0); 
 292         AUDIT_SYSCALL_EXIT(SYS_exit
, p
, ut
, 0); /* Exit is always successfull */ 
 294         DTRACE_PROC1(exit
, int, CLD_EXITED
); 
 296         /* mark process is going to exit and pull out of DBG/disk throttle */ 
 297         /* TODO: This should be done after becoming exit thread */ 
 298         proc_set_task_policy(p
->task
, THREAD_NULL
, TASK_POLICY_ATTRIBUTE
, 
 299                              TASK_POLICY_TERMINATED
, TASK_POLICY_ENABLE
); 
 302         error 
= proc_transstart(p
, 1); 
 303         if (error 
== EDEADLK
) { 
 304                 /* Temp: If deadlock error, then it implies multithreaded exec is 
 305                  * in progress. Instread of letting exit continue and  
 306                  * corrupting the freed memory, let the exit thread 
 307                  * return. This will save corruption in remote case. 
 310                 if (current_proc() == p
){ 
 311                         if (p
->exit_thread 
== self
) 
 312                                 printf("exit_thread failed to exit, leaving process %s[%d] in unkillable limbo\n", 
 313                                        p
->p_comm
, p
->p_pid
); 
 314                         thread_exception_return(); 
 316                         /* external termination like jetsam */ 
 321         while (p
->exit_thread 
!= self
) { 
 322                 if (sig_try_locked(p
) <= 0) { 
 324                         if (get_threadtask(self
) != task
) { 
 330                         thread_terminate(self
); 
 331                         if (!thread_can_terminate
) { 
 335                         thread_exception_return(); 
 342                 printf("pid 1 exited (signal %d, exit %d)", 
 343                     WTERMSIG(rv
), WEXITSTATUS(rv
)); 
 344                 panic("%s died\nState at Last Exception:\n\n%s",  
 345                                                         (p
->p_comm
[0] != '\0' ? 
 348                                                         init_task_failure_data
); 
 351         p
->p_lflag 
|= P_LEXIT
; 
 353         p
->p_lflag 
|= jetsam_flags
; 
 358         proc_prepareexit(p
, rv
, perf_notify
); 
 360         /* Last thread to terminate will call proc_exit() */ 
 361         task_terminate_internal(task
); 
 367 proc_prepareexit(proc_t p
, int rv
, boolean_t perf_notify
)  
 369         mach_exception_data_type_t code
, subcode
; 
 371         thread_t self 
= current_thread(); 
 372         ut 
= get_bsdthread_info(self
); 
 373         struct rusage_superset 
*rup
; 
 375         /* If a core should be generated, notify crash reporter */ 
 376         if (hassigprop(WTERMSIG(rv
), SA_CORE
) || ((p
->p_csflags 
& CS_KILLED
) != 0)) { 
 378                  * Workaround for processes checking up on PT_DENY_ATTACH: 
 379                  * should be backed out post-Leopard (details in 5431025). 
 381                 if ((SIGSEGV 
== WTERMSIG(rv
)) &&  
 382                                 (p
->p_pptr
->p_lflag 
& P_LNOATTACH
)) { 
 387                  * Crash Reporter looks for the signal value, original exception 
 388                  * type, and low 20 bits of the original code in code[0]  
 389                  * (8, 4, and 20 bits respectively). code[1] is unmodified.  
 391                 code 
= ((WTERMSIG(rv
) & 0xff) << 24) | 
 392                         ((ut
->uu_exception 
& 0x0f) << 20) |  
 393                         ((int)ut
->uu_code 
& 0xfffff); 
 394                 subcode 
= ut
->uu_subcode
; 
 395                 (void) task_exception_notify(EXC_CRASH
, code
, subcode
); 
 399         /* Notify the perf server? */ 
 401                 (void)sys_perf_notify(self
, p
->p_pid
); 
 405          * Before this process becomes a zombie, stash resource usage 
 406          * stats in the proc for external observers to query 
 407          * via proc_pid_rusage(). 
 409          * If the zombie allocation fails, just punt the stats. 
 411         MALLOC_ZONE(rup
, struct rusage_superset 
*, 
 412                         sizeof (*rup
), M_ZOMBIE
, M_WAITOK
); 
 414                 gather_rusage_info_v2(p
, &rup
->ri
, RUSAGE_INFO_V2
); 
 415                 rup
->ri
.ri_phys_footprint 
= 0; 
 416                 rup
->ri
.ri_proc_exit_abstime 
= mach_absolute_time(); 
 419                  * Make the rusage_info visible to external observers 
 420                  * only after it has been completely filled in. 
 426          * Remove proc from allproc queue and from pidhash chain. 
 427          * Need to do this before we do anything that can block. 
 428          * Not doing causes things like mount() find this on allproc 
 429          * in partially cleaned state. 
 434 #if CONFIG_MEMORYSTATUS 
 435         memorystatus_remove(p
, TRUE
); 
 438         LIST_REMOVE(p
, p_list
); 
 439         LIST_INSERT_HEAD(&zombproc
, p
, p_list
); /* Place onto zombproc. */ 
 440         /* will not be visible via proc_find */ 
 441         p
->p_listflag 
|= P_LIST_EXITED
; 
 450          * If parent is waiting for us to exit or exec, 
 451          * P_LPPWAIT is set; we will wakeup the parent below. 
 454         p
->p_lflag 
&= ~(P_LTRACED 
| P_LPPWAIT
); 
 455         p
->p_sigignore 
= ~(sigcantmask
); 
 465         struct task 
*task 
= p
->task
; 
 466         vnode_t tvp 
= NULLVP
; 
 468         struct session 
*sessp
; 
 469         struct uthread 
* uth
; 
 474         uth 
= (struct uthread 
*)get_bsdthread_info(current_thread()); 
 477         proc_transstart(p
, 1); 
 478         if( !(p
->p_lflag 
& P_LEXIT
)) { 
 480                  * This can happen if a thread_terminate() occurs 
 481                  * in a single-threaded process. 
 483                 p
->p_lflag 
|= P_LEXIT
; 
 486                 proc_prepareexit(p
, 0, TRUE
);    
 487                 (void) task_terminate_internal(task
); 
 493         p
->p_lflag 
|= P_LPEXIT
; 
 496          * Other kernel threads may be in the middle of signalling this process. 
 497          * Wait for those threads to wrap it up before making the process 
 500         if ((p
->p_lflag 
& P_LINSIGNAL
) || (p
->p_sigwaitcnt 
> 0)) { 
 502                 while ((p
->p_lflag 
& P_LINSIGNAL
) || (p
->p_sigwaitcnt 
> 1))  
 503                         msleep(&p
->p_sigmask
, &p
->p_mlock
, PWAIT
, "proc_sigdrain", NULL
); 
 509         exitval 
= p
->p_xstat
; 
 510         KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON
,  
 511                 BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXIT
) | DBG_FUNC_START
, 
 512                 pid
, exitval
, 0, 0, 0); 
 516          * Free any outstanding lazy dof entries. It is imperative we 
 517          * always call dtrace_lazy_dofs_destroy, rather than null check 
 518          * and call if !NULL. If we NULL test, during lazy dof faulting 
 519          * we can race with the faulting code and proceed from here to 
 520          * beyond the helpers cleanup. The lazy dof faulting will then 
 521          * install new helpers which will never be cleaned up, and leak. 
 523         dtrace_lazy_dofs_destroy(p
); 
 526          * Clean up any DTrace helper actions or probes for the process. 
 528         if (p
->p_dtrace_helpers 
!= NULL
) { 
 529                 (*dtrace_helpers_cleanup
)(p
); 
 533          * Clean up any DTrace probes associated with this process. 
 536          * APPLE NOTE: We release ptss pages/entries in dtrace_fasttrap_exit_ptr(), 
 537          * call this after dtrace_helpers_cleanup() 
 540         if (p
->p_dtrace_probes 
&& dtrace_fasttrap_exit_ptr
) { 
 541                 (*dtrace_fasttrap_exit_ptr
)(p
); 
 548 #if VM_PRESSURE_EVENTS 
 549         vm_pressure_proc_cleanup(p
); 
 553          * need to cancel async IO requests that can be cancelled and wait for those 
 554          * already active.  MAY BLOCK! 
 559         /* if any pending cpu limits action, clear it */ 
 560         task_clear_cpuusage(p
->task
, TRUE
); 
 562         workqueue_mark_exiting(p
); 
 568          * Close open files and release open-file table. 
 573         if (uth
->uu_lowpri_window
) { 
 575                  * task is marked as a low priority I/O type 
 576                  * and the I/O we issued while in flushing files on close 
 577                  * collided with normal I/O operations... 
 578                  * no need to throttle this thread since its going away 
 579                  * but we do need to update our bookeeping w/r to throttled threads 
 581                 throttle_lowpri_io(0); 
 585         /* Close ref SYSV Shared memory*/ 
 590         /* Release SYSV semaphores */ 
 595         pth_proc_hashdelete(p
); 
 598         sessp 
= proc_session(p
); 
 599         if (SESS_LEADER(p
, sessp
)) { 
 601                 if (sessp
->s_ttyvp 
!= NULLVP
) { 
 605                         struct vfs_context context
; 
 609                          * Controlling process. 
 610                          * Signal foreground pgrp, 
 611                          * drain controlling terminal 
 612                          * and revoke access to controlling terminal. 
 615                         tp 
= SESSION_TP(sessp
); 
 616                         if ((tp 
!= TTY_NULL
) && (tp
->t_session 
== sessp
)) { 
 617                                 session_unlock(sessp
); 
 619                                 tty_pgsignal(tp
, SIGHUP
, 1); 
 622                                 tp 
= SESSION_TP(sessp
); 
 624                         cttyflag 
= sessp
->s_flags 
& S_CTTYREF
; 
 625                         sessp
->s_flags 
&= ~S_CTTYREF
; 
 626                         ttyvp 
= sessp
->s_ttyvp
; 
 627                         ttyvid 
= sessp
->s_ttyvid
; 
 628                         sessp
->s_ttyvp 
= NULLVP
; 
 630                         sessp
->s_ttyp 
= TTY_NULL
; 
 631                         sessp
->s_ttypgrpid 
= NO_PID
; 
 632                         session_unlock(sessp
); 
 634                         if ((ttyvp 
!= NULLVP
) && (vnode_getwithvid(ttyvp
, ttyvid
) == 0)) { 
 635                                 if (tp 
!= TTY_NULL
) { 
 640                                 context
.vc_thread 
= proc_thread(p
); /* XXX */ 
 641                                 context
.vc_ucred 
= kauth_cred_proc_ref(p
); 
 643                                 VNOP_REVOKE(ttyvp
, REVOKEALL
, &context
); 
 646                                          * Release the extra usecount taken in cttyopen. 
 647                                          * usecount should be released after VNOP_REVOKE is called. 
 652                                 kauth_cred_unref(&context
.vc_ucred
); 
 661                 sessp
->s_leader 
= NULL
; 
 662                 session_unlock(sessp
); 
 670         p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur 
= RLIM_INFINITY
; 
 671         (void)acct_process(p
); 
 675         if ((p
->p_listflag 
& P_LIST_EXITCOUNT
) == P_LIST_EXITCOUNT
) { 
 676                 p
->p_listflag 
&= ~P_LIST_EXITCOUNT
; 
 677                 proc_shutdown_exitcount
--; 
 678                 if (proc_shutdown_exitcount 
== 0) 
 679                         wakeup(&proc_shutdown_exitcount
); 
 682         /* wait till parentrefs are dropped and grant no more */ 
 683         proc_childdrainstart(p
); 
 684         while ((q 
= p
->p_children
.lh_first
) != NULL
) { 
 685                 int reparentedtoinit 
= (q
->p_listflag 
& P_LIST_DEADPARENT
) ? 1 : 0; 
 686                 if (q
->p_stat 
== SZOMB
) { 
 688                                 panic("parent child linkage broken"); 
 689                         /* check for sysctl zomb lookup */ 
 690                         while ((q
->p_listflag 
& P_LIST_WAITING
) == P_LIST_WAITING
) { 
 691                                 msleep(&q
->p_stat
, proc_list_mlock
, PWAIT
, "waitcoll", 0); 
 693                         q
->p_listflag 
|= P_LIST_WAITING
; 
 695                          * This is a named reference and it is not granted 
 696                          * if the reap is already in progress. So we get 
 697                          * the reference here exclusively and their can be 
 698                          * no waiters. So there is no need for a wakeup 
 699                          * after we are done.  Also the reap frees the structure 
 700                          * and the proc struct cannot be used for wakeups as well.  
 701                          * It is safe to use q here as this is system reap 
 703                         (void)reap_child_locked(p
, q
, 1, reparentedtoinit
, 1, 0); 
 706                         * Traced processes are killed 
 707                         * since their existence means someone is messing up. 
 709                         if (q
->p_lflag 
& P_LTRACED
) { 
 713                                  * Take a reference on the child process to 
 714                                  * ensure it doesn't exit and disappear between 
 715                                  * the time we drop the list_lock and attempt 
 716                                  * to acquire its proc_lock. 
 718                                 if (proc_ref_locked(q
) != q
) 
 723                                 opp 
= proc_find(q
->p_oppid
); 
 724                                 if (opp 
!= PROC_NULL
) { 
 728                                         proc_reparentlocked(q
, opp
, 0, 0); 
 731                                         /* original parent exited while traced */ 
 733                                         q
->p_listflag 
|= P_LIST_DEADPARENT
; 
 736                                         proc_reparentlocked(q
, initproc
, 0, 0); 
 740                                 q
->p_lflag 
&= ~P_LTRACED
; 
 742                                 if (q
->sigwait_thread
) { 
 743                                         thread_t thread 
= q
->sigwait_thread
; 
 747                                         * The sigwait_thread could be stopped at a 
 748                                         * breakpoint. Wake it up to kill. 
 749                                         * Need to do this as it could be a thread which is not 
 750                                         * the first thread in the task. So any attempts to kill 
 751                                         * the process would result into a deadlock on q->sigwait. 
 753                                         thread_resume(thread
); 
 754                                         clear_wait(thread
, THREAD_INTERRUPTED
); 
 755                                         threadsignal(thread
, SIGKILL
, 0); 
 764                                 q
->p_listflag 
|= P_LIST_DEADPARENT
; 
 765                                 proc_reparentlocked(q
, initproc
, 0, 1); 
 770         proc_childdrainend(p
); 
 774          * Release reference to text vnode 
 783          * Save exit status and final rusage info, adding in child rusage 
 784          * info and self times.  If we were unable to allocate a zombie 
 785          * structure, this information is lost. 
 787         if (p
->p_ru 
!= NULL
) { 
 788             calcru(p
, &p
->p_stats
->p_ru
.ru_utime
, &p
->p_stats
->p_ru
.ru_stime
, NULL
); 
 789             p
->p_ru
->ru 
= p
->p_stats
->p_ru
; 
 791             ruadd(&(p
->p_ru
->ru
), &p
->p_stats
->p_cru
); 
 795          * Free up profiling buffers. 
 798                 struct uprof 
*p0 
= &p
->p_stats
->p_prof
, *p1
, *pn
; 
 804                 for (; p1 
!= NULL
; p1 
= pn
) { 
 806                         kfree(p1
, sizeof *p1
); 
 811         if (thread_call_cancel(p
->p_rcall
)) 
 814         while (p
->p_ractive 
> 0) { 
 823         thread_call_free(p
->p_rcall
); 
 827          * Other substructures are freed from wait(). 
 829         FREE_ZONE(p
->p_stats
, sizeof *p
->p_stats
, M_PSTATS
); 
 832         FREE_ZONE(p
->p_sigacts
, sizeof *p
->p_sigacts
, M_SIGACTS
); 
 835         proc_limitdrop(p
, 1); 
 840          * Finish up by terminating the task 
 841          * and halt this thread (only if a 
 842          * member of the task exiting). 
 845         set_bsdtask_info(task
, NULL
); 
 847         knote_hint 
= NOTE_EXIT 
| (p
->p_xstat 
& 0xffff); 
 848         proc_knote(p
, knote_hint
); 
 850         /* mark the thread as the one that is doing proc_exit 
 851          * no need to hold proc lock in uthread_free 
 853         uth
->uu_flag 
|= UT_PROCEXIT
; 
 855          * Notify parent that we're gone. 
 858         if (pp
->p_flag 
& P_NOCLDWAIT
) { 
 860                 if (p
->p_ru 
!= NULL
) { 
 864                  * If the parent is ignoring SIGCHLD, then POSIX requires 
 865                  * us to not add the resource usage to the parent process - 
 866                  * we are only going to hand it off to init to get reaped. 
 867                  * We should contest the standard in this case on the basis 
 872                  * Add child resource usage to parent before giving 
 873                  * zombie to init.  If we were unable to allocate a 
 874                  * zombie structure, this information is lost. 
 876                         ruadd(&pp
->p_stats
->p_cru
, &p
->p_ru
->ru
); 
 877 #endif  /* !3839178 */ 
 878                         update_rusage_info_child(&pp
->p_stats
->ri_child
, &p
->p_ru
->ri
); 
 882                 /* kernel can reap this one, no need to move it to launchd */ 
 884                 p
->p_listflag 
|= P_LIST_DEADPARENT
; 
 887         if ((p
->p_listflag 
& P_LIST_DEADPARENT
) == 0 || p
->p_oppid
) { 
 888                 if (pp 
!= initproc
) { 
 890                         pp
->si_pid 
= p
->p_pid
; 
 891                         pp
->si_status 
= p
->p_xstat
; 
 892                         pp
->si_code 
= CLD_EXITED
; 
 894                          * p_ucred usage is safe as it is an exiting process 
 895                          * and reference is dropped in reap 
 897                         pp
->si_uid 
= kauth_cred_getruid(p
->p_ucred
); 
 900                 /* mark as a zombie */ 
 901                 /* No need to take proc lock as all refs are drained and 
 902                  * no one except parent (reaping ) can look at this. 
 903                  * The write is to an int and is coherent. Also parent is 
 904                  *  keyed off of list lock for reaping 
 906                 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON
, 
 907                         BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXIT
) | DBG_FUNC_END
, 
 908                         pid
, exitval
, 0, 0, 0); 
 911                  * The current process can be reaped so, no one 
 915                 psignal(pp
, SIGCHLD
); 
 917                 /* and now wakeup the parent */ 
 922                 /* should be fine as parent proc would be initproc */ 
 923                 /* mark as a zombie */ 
 924                 /* No need to take proc lock as all refs are drained and 
 925                  * no one except parent (reaping ) can look at this. 
 926                  * The write is to an int and is coherent. Also parent is 
 927                  *  keyed off of list lock for reaping 
 930                 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON
, 
 931                         BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXIT
) | DBG_FUNC_END
, 
 932                         pid
, exitval
, 0, 0, 0); 
 933                 /* check for sysctl zomb lookup */ 
 934                 while ((p
->p_listflag 
& P_LIST_WAITING
) == P_LIST_WAITING
) { 
 935                         msleep(&p
->p_stat
, proc_list_mlock
, PWAIT
, "waitcoll", 0); 
 937                 /* safe to use p as this is a system reap */ 
 939                 p
->p_listflag 
|= P_LIST_WAITING
; 
 942                  * This is a named reference and it is not granted 
 943                  * if the reap is already in progress. So we get 
 944                  * the reference here exclusively and their can be 
 945                  * no waiters. So there is no need for a wakeup 
 946                  * after we are done. AlsO  the reap frees the structure 
 947                  * and the proc struct cannot be used for wakeups as well.  
 948                  * It is safe to use p here as this is system reap 
 950                 (void)reap_child_locked(pp
, p
, 1, 0, 1, 1); 
 951                 /* list lock dropped by reap_child_locked */ 
 953         if (uth
->uu_lowpri_window
) { 
 955                  * task is marked as a low priority I/O type and we've 
 956                  * somehow picked up another throttle during exit processing... 
 957                  * no need to throttle this thread since its going away 
 958                  * but we do need to update our bookeeping w/r to throttled threads 
 960                 throttle_lowpri_io(0); 
 971  * Description: Given a process from which all status information needed 
 972  *              has already been extracted, if the process is a ptrace 
 973  *              attach process, detach it and give it back to its real 
 974  *              parent, else recover all resources remaining associated 
 977  * Parameters:  proc_t parent           Parent of process being reaped 
 978  *              proc_t child            Process to reap 
 980  * Returns:     0                       Process was not reaped because it 
 981  *                                      came from an attach 
 982  *              1                       Process was reaped 
 985 reap_child_locked(proc_t parent
, proc_t child
, int deadparent
, int reparentedtoinit
, int locked
, int droplock
) 
 987         proc_t trace_parent 
= PROC_NULL
;        /* Traced parent process, if tracing */ 
 993          * If we got the child via a ptrace 'attach', 
 994          * we need to give it back to the old parent. 
 996          * Exception: someone who has been reparented to launchd before being 
 997          * ptraced can simply be reaped, refer to radar 5677288 
 999          *      trace_parent == initproc -> away from launchd 
1000          *      reparentedtoinit         -> came to launchd by reparenting 
1002         if (child
->p_oppid
) { 
1007                 oppid 
= child
->p_oppid
; 
1009                 knote_hint 
= NOTE_EXIT 
| (child
->p_xstat 
& 0xffff); 
1012                 if ((trace_parent 
= proc_find(oppid
)) 
1013                         && !((trace_parent 
== initproc
) && reparentedtoinit
)) { 
1015                         if (trace_parent 
!= initproc
) { 
1017                                  * proc internal fileds  and p_ucred usage safe  
1018                                  * here as child is dead and is not reaped or  
1021                                 proc_lock(trace_parent
); 
1022                                 trace_parent
->si_pid 
= child
->p_pid
; 
1023                                 trace_parent
->si_status 
= child
->p_xstat
; 
1024                                 trace_parent
->si_code 
= CLD_CONTINUED
; 
1025                                 trace_parent
->si_uid 
= kauth_cred_getruid(child
->p_ucred
); 
1026                                 proc_unlock(trace_parent
); 
1028                         proc_reparentlocked(child
, trace_parent
, 1, 0); 
1030                         /* resend knote to original parent (and others) after reparenting */ 
1031                         proc_knote(child
, knote_hint
); 
1033                         psignal(trace_parent
, SIGCHLD
); 
1035                         wakeup((caddr_t
)trace_parent
); 
1036                         child
->p_listflag 
&= ~P_LIST_WAITING
; 
1037                         wakeup(&child
->p_stat
); 
1039                         proc_rele(trace_parent
); 
1040                         if ((locked 
== 1) && (droplock 
== 0)) 
1046                  * If we can't reparent (e.g. the original parent exited while child was being debugged, or 
1047                  * original parent is the same as the debugger currently exiting), we still need to satisfy 
1048                  * the knote lifecycle for other observers on the system. While the debugger was attached, 
1049                  * the NOTE_EXIT would not have been broadcast during initial child termination. 
1051                 proc_knote(child
, knote_hint
); 
1053                 if (trace_parent 
!= PROC_NULL
) { 
1054                         proc_rele(trace_parent
); 
1058 #pragma clang diagnostic push 
1059 #pragma clang diagnostic ignored "-Wdeprecated-declarations" 
1060         proc_knote(child
, NOTE_REAP
); 
1061 #pragma clang diagnostic pop 
1063         proc_knote_drain(child
); 
1070                  * If the parent is ignoring SIGCHLD, then POSIX requires 
1071                  * us to not add the resource usage to the parent process - 
1072                  * we are only going to hand it off to init to get reaped. 
1073                  * We should contest the standard in this case on the basis 
1076                 if (!(parent
->p_flag 
& P_NOCLDWAIT
)) 
1077 #endif  /* 3839178 */ 
1078                         ruadd(&parent
->p_stats
->p_cru
, &child
->p_ru
->ru
); 
1079                 update_rusage_info_child(&parent
->p_stats
->ri_child
, &child
->p_ru
->ri
); 
1080                 proc_unlock(parent
); 
1081                 FREE_ZONE(child
->p_ru
, sizeof *child
->p_ru
, M_ZOMBIE
); 
1084                 printf("Warning : lost p_ru for %s\n", child
->p_comm
); 
1087         AUDIT_SESSION_PROCEXIT(child
); 
1090          * Decrement the count of procs running with this uid. 
1091          * p_ucred usage is safe here as it is an exited process. 
1092          * and refernce is dropped after these calls down below 
1093          * (locking protection is provided by list lock held in chgproccnt) 
1095         (void)chgproccnt(kauth_cred_getruid(child
->p_ucred
), -1); 
1104          * Free up credentials. 
1106         if (IS_VALID_CRED(child
->p_ucred
)) { 
1107                 kauth_cred_unref(&child
->p_ucred
); 
1110         /*  XXXX Note NOT SAFE TO USE p_ucred from this point onwards */ 
1113          * Finally finished with old proc entry. 
1114          * Unlink it from its process group and free it. 
1119         LIST_REMOVE(child
, p_list
);     /* off zombproc */ 
1120         parent
->p_childrencnt
--; 
1121         LIST_REMOVE(child
, p_sibling
); 
1122         /* If there are no more children wakeup parent */ 
1123         if ((deadparent 
!= 0) && (LIST_EMPTY(&parent
->p_children
))) 
1124                 wakeup((caddr_t
)parent
);        /* with list lock held */ 
1125         child
->p_listflag 
&= ~P_LIST_WAITING
; 
1126         wakeup(&child
->p_stat
); 
1128         /* Take it out of process hash */ 
1129         LIST_REMOVE(child
, p_hash
); 
1130         child
->p_listflag 
&= ~P_LIST_INHASH
; 
1131         proc_checkdeadrefs(child
); 
1136                  * If a child zombie is being reaped because its parent 
1137                  * is exiting, make sure we update the list flag 
1139                 child
->p_listflag 
|= P_LIST_DEADPARENT
; 
1144 #if CONFIG_FINE_LOCK_GROUPS 
1145         lck_mtx_destroy(&child
->p_mlock
, proc_mlock_grp
); 
1146         lck_mtx_destroy(&child
->p_fdmlock
, proc_fdmlock_grp
); 
1148         lck_mtx_destroy(&child
->p_dtrace_sprlock
, proc_lck_grp
); 
1150         lck_spin_destroy(&child
->p_slock
, proc_slock_grp
); 
1151 #else /* CONFIG_FINE_LOCK_GROUPS */ 
1152         lck_mtx_destroy(&child
->p_mlock
, proc_lck_grp
); 
1153         lck_mtx_destroy(&child
->p_fdmlock
, proc_lck_grp
); 
1155         lck_mtx_destroy(&child
->p_dtrace_sprlock
, proc_lck_grp
); 
1157         lck_spin_destroy(&child
->p_slock
, proc_lck_grp
); 
1158 #endif /* CONFIG_FINE_LOCK_GROUPS */ 
1159         workqueue_destroy_lock(child
); 
1161         FREE_ZONE(child
, sizeof *child
, M_PROC
); 
1162         if ((locked 
== 1) && (droplock 
== 0)) 
1170 wait1continue(int result
) 
1181         thread 
= current_thread(); 
1182         vt 
= get_bsduthreadarg(thread
); 
1183         retval 
= get_bsduthreadrval(thread
); 
1184         return(wait4(p
, (struct wait4_args 
*)vt
, retval
)); 
1188 wait4(proc_t q
, struct wait4_args 
*uap
, int32_t *retval
) 
1190         __pthread_testcancel(1); 
1191         return(wait4_nocancel(q
, (struct wait4_nocancel_args 
*)uap
, retval
)); 
1195 wait4_nocancel(proc_t q
, struct wait4_nocancel_args 
*uap
, int32_t *retval
) 
1202         AUDIT_ARG(pid
, uap
->pid
); 
1205                 uap
->pid 
= -q
->p_pgrpid
; 
1213         for (p 
= q
->p_children
.lh_first
; p 
!= 0; p 
= p
->p_sibling
.le_next
) { 
1214                 if ( p
->p_sibling
.le_next 
!= 0 ) 
1216                 if (uap
->pid 
!= WAIT_ANY 
&& 
1217                     p
->p_pid 
!= uap
->pid 
&& 
1218                     p
->p_pgrpid 
!= -(uap
->pid
)) 
1223                 /* XXX This is racy because we don't get the lock!!!! */ 
1225                 if (p
->p_listflag 
& P_LIST_WAITING
) { 
1226                         (void)msleep(&p
->p_stat
, proc_list_mlock
, PWAIT
, "waitcoll", 0); 
1229                 p
->p_listflag 
|= P_LIST_WAITING
;   /* only allow single thread to wait() */ 
1232                 if (p
->p_stat 
== SZOMB
) { 
1233                         int reparentedtoinit 
= (p
->p_listflag 
& P_LIST_DEADPARENT
) ? 1 : 0; 
1237                         if ((error 
= mac_proc_check_wait(q
, p
)) != 0) 
1240                         retval
[0] = p
->p_pid
; 
1242                                 /* Legacy apps expect only 8 bits of status */ 
1243                                 status 
= 0xffff & p
->p_xstat
;   /* convert to int */ 
1244                                 error 
= copyout((caddr_t
)&status
, 
1251                                 if (p
->p_ru 
== NULL
) { 
1254                                         if (IS_64BIT_PROCESS(q
)) { 
1255                                                 struct user64_rusage    my_rusage
; 
1256                                                 munge_user64_rusage(&p
->p_ru
->ru
, &my_rusage
); 
1257                                                 error 
= copyout((caddr_t
)&my_rusage
, 
1259                                                         sizeof (my_rusage
)); 
1262                                                 struct user32_rusage    my_rusage
; 
1263                                                 munge_user32_rusage(&p
->p_ru
->ru
, &my_rusage
); 
1264                                                 error 
= copyout((caddr_t
)&my_rusage
, 
1266                                                         sizeof (my_rusage
)); 
1269                                 /* information unavailable? */ 
1274                         /* Conformance change for 6577252. 
1275                          * When SIGCHLD is blocked and wait() returns because the status 
1276                          * of a child process is available and there are no other  
1277                          * children processes, then any pending SIGCHLD signal is cleared. 
1279                         if ( sibling_count 
== 0 ) { 
1280                                 int mask 
= sigmask(SIGCHLD
); 
1281                                 uthread_t uth 
= (struct uthread 
*)get_bsdthread_info(current_thread()); 
1283                                 if ( (uth
->uu_sigmask 
& mask
) != 0 ) { 
1284                                         /* we are blocking SIGCHLD signals.  clear any pending SIGCHLD. 
1285                                          * This locking looks funny but it is protecting access to the  
1286                                          * thread via p_uthlist. 
1289                                         uth
->uu_siglist 
&= ~mask
;       /* clear pending signal */ 
1295                         (void)reap_child_locked(q
, p
, 0, reparentedtoinit
, 0, 0); 
1299                 if (p
->p_stat 
== SSTOP 
&& (p
->p_lflag 
& P_LWAITED
) == 0 && 
1300                     (p
->p_lflag 
& P_LTRACED 
|| uap
->options 
& WUNTRACED
)) { 
1303                         if ((error 
= mac_proc_check_wait(q
, p
)) != 0) 
1307                         p
->p_lflag 
|= P_LWAITED
; 
1309                         retval
[0] = p
->p_pid
; 
1311                                 status 
= W_STOPCODE(p
->p_xstat
); 
1312                                 error 
= copyout((caddr_t
)&status
, 
1320                  * If we are waiting for continued processses, and this 
1321                  * process was continued 
1323                 if ((uap
->options 
& WCONTINUED
) && 
1324                     (p
->p_flag 
& P_CONTINUED
)) { 
1327                         if ((error 
= mac_proc_check_wait(q
, p
)) != 0) 
1331                         /* Prevent other process for waiting for this event */ 
1332                         OSBitAndAtomic(~((uint32_t)P_CONTINUED
), &p
->p_flag
); 
1333                         retval
[0] = p
->p_pid
; 
1335                                 status 
= W_STOPCODE(SIGCONT
); 
1336                                 error 
= copyout((caddr_t
)&status
, 
1343                 p
->p_listflag 
&= ~P_LIST_WAITING
; 
1346         /* list lock is held when we get here any which way */ 
1352         if (uap
->options 
& WNOHANG
) { 
1358         if ((error 
= msleep0((caddr_t
)q
, proc_list_mlock
, PWAIT 
| PCATCH 
| PDROP
, "wait", 0, wait1continue
))) 
1364         p
->p_listflag 
&= ~P_LIST_WAITING
; 
1371 #define ASSERT_LCK_MTX_OWNED(lock)      \ 
1372                                 lck_mtx_assert(lock, LCK_MTX_ASSERT_OWNED) 
1374 #define ASSERT_LCK_MTX_OWNED(lock)      /* nothing */ 
1378 waitidcontinue(int result
) 
1387         thread 
= current_thread(); 
1388         vt 
= get_bsduthreadarg(thread
); 
1389         retval 
= get_bsduthreadrval(thread
); 
1390         return (waitid(current_proc(), (struct waitid_args 
*)vt
, retval
)); 
1394  * Description: Suspend the calling thread until one child of the process 
1395  *              containing the calling thread changes state. 
1397  * Parameters:  uap->idtype             one of P_PID, P_PGID, P_ALL 
1398  *              uap->id                 pid_t or gid_t or ignored 
1399  *              uap->infop              Address of siginfo_t struct in 
1400  *                                      user space into which to return status 
1401  *              uap->options            flag values 
1403  * Returns:     0                       Success 
1404  *              !0                      Error returning status to user space 
1407 waitid(proc_t q
, struct waitid_args 
*uap
, int32_t *retval
) 
1409         __pthread_testcancel(1); 
1410         return (waitid_nocancel(q
, (struct waitid_nocancel_args 
*)uap
, retval
)); 
1414 waitid_nocancel(proc_t q
, struct waitid_nocancel_args 
*uap
, 
1415         __unused 
int32_t *retval
) 
1417         user_siginfo_t  siginfo
;        /* siginfo data to return to caller */ 
1418         boolean_t caller64 
= IS_64BIT_PROCESS(q
); 
1423         if (uap
->options 
== 0 || 
1424             (uap
->options 
& ~(WNOHANG
|WNOWAIT
|WCONTINUED
|WSTOPPED
|WEXITED
))) 
1425                 return (EINVAL
);        /* bits set that aren't recognized */ 
1427         switch (uap
->idtype
) { 
1428         case P_PID
:     /* child with process ID equal to... */ 
1429         case P_PGID
:    /* child with process group ID equal to... */ 
1430                 if (((int)uap
->id
) < 0) 
1433         case P_ALL
:     /* any child */ 
1441         for (p 
= q
->p_children
.lh_first
; p 
!= 0; p 
= p
->p_sibling
.le_next
) { 
1443                 switch (uap
->idtype
) { 
1444                 case P_PID
:     /* child with process ID equal to... */ 
1445                         if (p
->p_pid 
!= (pid_t
)uap
->id
) 
1448                 case P_PGID
:    /* child with process group ID equal to... */ 
1449                         if (p
->p_pgrpid 
!= (pid_t
)uap
->id
) 
1452                 case P_ALL
:     /* any child */ 
1456                 /* XXX This is racy because we don't get the lock!!!! */ 
1459                  * Wait collision; go to sleep and restart; used to maintain 
1460                  * the single return for waited process guarantee. 
1462                 if (p
->p_listflag 
& P_LIST_WAITING
) { 
1463                         (void) msleep(&p
->p_stat
, proc_list_mlock
, 
1464                                 PWAIT
, "waitidcoll", 0); 
1467                 p
->p_listflag 
|= P_LIST_WAITING
;                /* mark busy */ 
1471                 bzero(&siginfo
, sizeof (siginfo
)); 
1473                 switch (p
->p_stat
) { 
1474                 case SZOMB
:             /* Exited */ 
1475                         if (!(uap
->options 
& WEXITED
)) 
1479                         if ((error 
= mac_proc_check_wait(q
, p
)) != 0) 
1482                         siginfo
.si_signo 
= SIGCHLD
; 
1483                         siginfo
.si_pid 
= p
->p_pid
; 
1484                         siginfo
.si_status 
= WEXITSTATUS(p
->p_xstat
); 
1485                         if (WIFSIGNALED(p
->p_xstat
)) { 
1486                                 siginfo
.si_code 
= WCOREDUMP(p
->p_xstat
) ? 
1487                                         CLD_DUMPED 
: CLD_KILLED
; 
1489                                 siginfo
.si_code 
= CLD_EXITED
; 
1491                         if ((error 
= copyoutsiginfo(&siginfo
, 
1492                             caller64
, uap
->infop
)) != 0) 
1495                         /* Prevent other process for waiting for this event? */ 
1496                         if (!(uap
->options 
& WNOWAIT
)) { 
1497                                 (void) reap_child_locked(q
, p
, 0, 0, 0, 0); 
1502                 case SSTOP
:             /* Stopped */ 
1504                          * If we are not interested in stopped processes, then 
1507                         if (!(uap
->options 
& WSTOPPED
)) 
1511                          * If someone has already waited it, we lost a race 
1512                          * to be the one to return status. 
1514                         if ((p
->p_lflag 
& P_LWAITED
) != 0) 
1518                         if ((error 
= mac_proc_check_wait(q
, p
)) != 0) 
1521                         siginfo
.si_signo 
= SIGCHLD
; 
1522                         siginfo
.si_pid 
= p
->p_pid
; 
1523                         siginfo
.si_status 
= p
->p_xstat
; /* signal number */ 
1524                         siginfo
.si_code 
= CLD_STOPPED
; 
1526                         if ((error 
= copyoutsiginfo(&siginfo
, 
1527                             caller64
, uap
->infop
)) != 0) 
1530                         /* Prevent other process for waiting for this event? */ 
1531                         if (!(uap
->options 
& WNOWAIT
)) { 
1533                                 p
->p_lflag 
|= P_LWAITED
; 
1538                 default:                /* All other states => Continued */ 
1539                         if (!(uap
->options 
& WCONTINUED
)) 
1543                          * If the flag isn't set, then this process has not 
1544                          * been stopped and continued, or the status has 
1545                          * already been reaped by another caller of waitid(). 
1547                         if ((p
->p_flag 
& P_CONTINUED
) == 0) 
1551                         if ((error 
= mac_proc_check_wait(q
, p
)) != 0) 
1554                         siginfo
.si_signo 
= SIGCHLD
; 
1555                         siginfo
.si_code 
= CLD_CONTINUED
; 
1557                         siginfo
.si_pid 
= p
->p_contproc
; 
1558                         siginfo
.si_status 
= p
->p_xstat
; 
1561                         if ((error 
= copyoutsiginfo(&siginfo
, 
1562                             caller64
, uap
->infop
)) != 0) 
1565                         /* Prevent other process for waiting for this event? */ 
1566                         if (!(uap
->options 
& WNOWAIT
)) { 
1567                                 OSBitAndAtomic(~((uint32_t)P_CONTINUED
), 
1572                 ASSERT_LCK_MTX_OWNED(proc_list_mlock
); 
1574                 /* Not a process we are interested in; go on to next child */ 
1576                 p
->p_listflag 
&= ~P_LIST_WAITING
; 
1579         ASSERT_LCK_MTX_OWNED(proc_list_mlock
); 
1581         /* No child processes that could possibly satisfy the request? */ 
1588         if (uap
->options 
& WNOHANG
) { 
1591                 if ((error 
= mac_proc_check_wait(q
, p
)) != 0) 
1595                  * The state of the siginfo structure in this case 
1596                  * is undefined.  Some implementations bzero it, some 
1597                  * (like here) leave it untouched for efficiency. 
1599                  * Thus the most portable check for "no matching pid with 
1600                  * WNOHANG" is to store a zero into si_pid before 
1601                  * invocation, then check for a non-zero value afterwards. 
1606         if ((error 
= msleep0(q
, proc_list_mlock
, 
1607             PWAIT 
| PCATCH 
| PDROP
, "waitid", 0, waitidcontinue
)) != 0) 
1613         p
->p_listflag 
&= ~P_LIST_WAITING
; 
1620  * make process 'parent' the new parent of process 'child'. 
1623 proc_reparentlocked(proc_t child
, proc_t parent
, int cansignal
, int locked
) 
1625         proc_t oldparent 
= PROC_NULL
; 
1627         if (child
->p_pptr 
== parent
) 
1633         oldparent 
= child
->p_pptr
; 
1634 #if __PROC_INTERNAL_DEBUG 
1635         if (oldparent 
== PROC_NULL
) 
1636                 panic("proc_reparent: process %p does not have a parent\n", child
); 
1639         LIST_REMOVE(child
, p_sibling
); 
1640 #if __PROC_INTERNAL_DEBUG 
1641         if (oldparent
->p_childrencnt 
== 0) 
1642                 panic("process children count already 0\n"); 
1644         oldparent
->p_childrencnt
--; 
1645 #if __PROC_INTERNAL_DEBUG1 
1646         if (oldparent
->p_childrencnt 
< 0) 
1647                 panic("process children count -ve\n"); 
1649         LIST_INSERT_HEAD(&parent
->p_children
, child
, p_sibling
); 
1650         parent
->p_childrencnt
++;         
1651         child
->p_pptr 
= parent
; 
1652         child
->p_ppid 
= parent
->p_pid
; 
1656         if ((cansignal 
!= 0) && (initproc 
== parent
) && (child
->p_stat 
== SZOMB
)) 
1657                 psignal(initproc
, SIGCHLD
); 
1663  * Exit: deallocate address space and other resources, change proc state 
1664  * to zombie, and unlink proc from allproc and parent's lists.  Save exit 
1665  * status and rusage for wait().  Check for child processes and orphan them. 
1669 vfork_exit(proc_t p
, int rv
) 
1671         vfork_exit_internal(p
, rv
, 0); 
1675 vfork_exit_internal(proc_t p
, int rv
, int forceexit
) 
1677         thread_t self 
= current_thread(); 
1679         struct task 
*task 
= p
->task
; 
1684          * If a thread in this task has already 
1685          * called exit(), then halt any others 
1689          ut 
= get_bsdthread_info(self
); 
1693          if ((p
->p_lflag 
& P_LPEXIT
) == P_LPEXIT
) { 
1695                 * This happens when a parent exits/killed and vfork is in progress   
1696                 * other threads. But shutdown code for ex has already called exit1() 
1701         p
->p_lflag 
|= (P_LEXIT 
| P_LPEXIT
); 
1704         if (forceexit 
== 0) { 
1706                  * parent of a vfork child has already called exit() and the  
1707                  * thread that has vfork in proress terminates. So there is no 
1708                  * separate address space here and it has already been marked for 
1709                  * termination. This was never covered before and could cause problems 
1710                  * if we block here for outside code. 
1712                 /* Notify the perf server */ 
1713                 (void)sys_perf_notify(self
, p
->p_pid
); 
1717          * Remove proc from allproc queue and from pidhash chain. 
1718          * Need to do this before we do anything that can block. 
1719          * Not doing causes things like mount() find this on allproc 
1720          * in partially cleaned state. 
1725 #if CONFIG_MEMORYSTATUS 
1726         memorystatus_remove(p
, TRUE
); 
1729         LIST_REMOVE(p
, p_list
); 
1730         LIST_INSERT_HEAD(&zombproc
, p
, p_list
); /* Place onto zombproc. */ 
1731         /* will not be visible via proc_find */ 
1732         p
->p_listflag 
|= P_LIST_EXITED
; 
1738         p
->p_lflag 
&= ~(P_LTRACED 
| P_LPPWAIT
); 
1739         p
->p_sigignore 
= ~0; 
1743         if (thread_call_cancel(p
->p_rcall
)) 
1746         while (p
->p_ractive 
> 0) { 
1755         thread_call_free(p
->p_rcall
); 
1764 vproc_exit(proc_t p
) 
1771         struct task 
*task 
= p
->task
; 
1774         struct session 
*sessp
; 
1775         struct rusage_superset 
*rup
; 
1777         /* XXX Zombie allocation may fail, in which case stats get lost */ 
1778         MALLOC_ZONE(rup
, struct rusage_superset 
*, 
1779                         sizeof (*rup
), M_ZOMBIE
, M_WAITOK
); 
1784          * Close open files and release open-file table. 
1789         sessp 
= proc_session(p
); 
1790         if (SESS_LEADER(p
, sessp
)) { 
1792                 if (sessp
->s_ttyvp 
!= NULLVP
) { 
1793                         struct vnode 
*ttyvp
; 
1796                         struct vfs_context context
; 
1800                          * Controlling process. 
1801                          * Signal foreground pgrp, 
1802                          * drain controlling terminal 
1803                          * and revoke access to controlling terminal. 
1805                         session_lock(sessp
); 
1806                         tp 
= SESSION_TP(sessp
); 
1807                         if ((tp 
!= TTY_NULL
) && (tp
->t_session 
== sessp
)) { 
1808                                 session_unlock(sessp
); 
1810                                 tty_pgsignal(tp
, SIGHUP
, 1); 
1812                                 session_lock(sessp
); 
1813                                 tp 
= SESSION_TP(sessp
); 
1815                         cttyflag 
= sessp
->s_flags 
& S_CTTYREF
; 
1816                         sessp
->s_flags 
&= ~S_CTTYREF
; 
1817                         ttyvp 
= sessp
->s_ttyvp
; 
1818                         ttyvid 
= sessp
->s_ttyvid
; 
1819                         sessp
->s_ttyvp 
= NULL
; 
1820                         sessp
->s_ttyvid 
= 0; 
1821                         sessp
->s_ttyp 
= TTY_NULL
; 
1822                         sessp
->s_ttypgrpid 
= NO_PID
; 
1823                         session_unlock(sessp
); 
1825                        if ((ttyvp 
!= NULLVP
) && (vnode_getwithvid(ttyvp
, ttyvid
) == 0)) { 
1826                                 if (tp 
!= TTY_NULL
) { 
1831                                 context
.vc_thread 
= proc_thread(p
); /* XXX */ 
1832                                 context
.vc_ucred 
= kauth_cred_proc_ref(p
); 
1834                                 VNOP_REVOKE(ttyvp
, REVOKEALL
, &context
); 
1837                                          * Release the extra usecount taken in cttyopen. 
1838                                          * usecount should be released after VNOP_REVOKE is called. 
1843                                 kauth_cred_unref(&context
.vc_ucred
); 
1851                 session_lock(sessp
); 
1852                 sessp
->s_leader 
= NULL
; 
1853                 session_unlock(sessp
); 
1855         session_rele(sessp
); 
1861         p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur 
= RLIM_INFINITY
; 
1864         proc_childdrainstart(p
); 
1865         while ((q 
= p
->p_children
.lh_first
) != NULL
) { 
1866                 if (q
->p_stat 
== SZOMB
) { 
1868                                 panic("parent child linkage broken"); 
1869                         /* check for lookups by zomb sysctl */ 
1870                         while ((q
->p_listflag 
& P_LIST_WAITING
) == P_LIST_WAITING
) { 
1871                                 msleep(&q
->p_stat
, proc_list_mlock
, PWAIT
, "waitcoll", 0); 
1873                         q
->p_listflag 
|= P_LIST_WAITING
; 
1875                          * This is a named reference and it is not granted 
1876                          * if the reap is already in progress. So we get 
1877                          * the reference here exclusively and their can be 
1878                          * no waiters. So there is no need for a wakeup 
1879                          * after we are done. AlsO  the reap frees the structure 
1880                          * and the proc struct cannot be used for wakeups as well.  
1881                          * It is safe to use q here as this is system reap 
1883                         (void)reap_child_locked(p
, q
, 1, 0, 1, 0); 
1886                         * Traced processes are killed 
1887                         * since their existence means someone is messing up. 
1889                         if (q
->p_lflag 
& P_LTRACED
) { 
1894                                 opp 
= proc_find(q
->p_oppid
); 
1895                                 if (opp 
!= PROC_NULL
) { 
1899                                         proc_reparentlocked(q
, opp
, 0, 0); 
1902                                         /* original parent exited while traced */ 
1904                                         q
->p_listflag 
|= P_LIST_DEADPARENT
; 
1907                                         proc_reparentlocked(q
, initproc
, 0, 0); 
1911                                 q
->p_lflag 
&= ~P_LTRACED
; 
1913                                 if (q
->sigwait_thread
) { 
1914                                         thread_t thread 
= q
->sigwait_thread
; 
1918                                         * The sigwait_thread could be stopped at a 
1919                                         * breakpoint. Wake it up to kill. 
1920                                         * Need to do this as it could be a thread which is not 
1921                                         * the first thread in the task. So any attempts to kill 
1922                                         * the process would result into a deadlock on q->sigwait. 
1924                                         thread_resume(thread
); 
1925                                         clear_wait(thread
, THREAD_INTERRUPTED
); 
1926                                         threadsignal(thread
, SIGKILL
, 0); 
1931                                 psignal(q
, SIGKILL
); 
1934                                 q
->p_listflag 
|= P_LIST_DEADPARENT
; 
1935                                 proc_reparentlocked(q
, initproc
, 0, 1); 
1940         proc_childdrainend(p
); 
1944          * Release reference to text vnode 
1948         if (tvp 
!= NULLVP
) { 
1953          * Save exit status and final rusage info, adding in child rusage 
1954          * info and self times.  If we were unable to allocate a zombie 
1955          * structure, this information is lost. 
1958             rup
->ru 
= p
->p_stats
->p_ru
; 
1959             timerclear(&rup
->ru
.ru_utime
); 
1960             timerclear(&rup
->ru
.ru_stime
); 
1964                 mach_task_basic_info_data_t tinfo
; 
1965                 task_thread_times_info_data_t ttimesinfo
; 
1966                 int task_info_stuff
, task_ttimes_stuff
; 
1967                 struct timeval ut
,st
; 
1969                 task_info_stuff 
= MACH_TASK_BASIC_INFO_COUNT
; 
1970                 task_info(task
, MACH_TASK_BASIC_INFO
, 
1971                           &tinfo
, &task_info_stuff
); 
1972                 p
->p_ru
->ru
.ru_utime
.tv_sec 
= tinfo
.user_time
.seconds
; 
1973                 p
->p_ru
->ru
.ru_utime
.tv_usec 
= tinfo
.user_time
.microseconds
; 
1974                 p
->p_ru
->ru
.ru_stime
.tv_sec 
= tinfo
.system_time
.seconds
; 
1975                 p
->p_ru
->ru
.ru_stime
.tv_usec 
= tinfo
.system_time
.microseconds
; 
1977                 task_ttimes_stuff 
= TASK_THREAD_TIMES_INFO_COUNT
; 
1978                 task_info(task
, TASK_THREAD_TIMES_INFO
, 
1979                           &ttimesinfo
, &task_ttimes_stuff
); 
1981                 ut
.tv_sec 
= ttimesinfo
.user_time
.seconds
; 
1982                 ut
.tv_usec 
= ttimesinfo
.user_time
.microseconds
; 
1983                 st
.tv_sec 
= ttimesinfo
.system_time
.seconds
; 
1984                 st
.tv_usec 
= ttimesinfo
.system_time
.microseconds
; 
1985                 timeradd(&ut
,&p
->p_ru
->ru
.ru_utime
,&p
->p_ru
->ru
.ru_utime
); 
1986                         timeradd(&st
,&p
->p_ru
->ru
.ru_stime
,&p
->p_ru
->ru
.ru_stime
); 
1990             ruadd(&rup
->ru
, &p
->p_stats
->p_cru
); 
1992                 gather_rusage_info_v2(p
, &rup
->ri
, RUSAGE_INFO_V2
); 
1993                 rup
->ri
.ri_phys_footprint 
= 0; 
1994                 rup
->ri
.ri_proc_exit_abstime 
= mach_absolute_time(); 
1997                  * Now that we have filled in the rusage info, make it 
1998                  * visible to an external observer via proc_pid_rusage(). 
2004          * Free up profiling buffers. 
2007                 struct uprof 
*p0 
= &p
->p_stats
->p_prof
, *p1
, *pn
; 
2013                 for (; p1 
!= NULL
; p1 
= pn
) { 
2015                         kfree(p1
, sizeof *p1
); 
2020         pth_proc_hashdelete(p
); 
2024          * Other substructures are freed from wait(). 
2026         FREE_ZONE(p
->p_stats
, sizeof *p
->p_stats
, M_PSTATS
); 
2029         FREE_ZONE(p
->p_sigacts
, sizeof *p
->p_sigacts
, M_SIGACTS
); 
2030         p
->p_sigacts 
= NULL
; 
2032         proc_limitdrop(p
, 1); 
2036          * Finish up by terminating the task 
2037          * and halt this thread (only if a 
2038          * member of the task exiting). 
2040         p
->task 
= TASK_NULL
; 
2043          * Notify parent that we're gone. 
2045         pp 
= proc_parent(p
); 
2046         if ((p
->p_listflag 
& P_LIST_DEADPARENT
) == 0) { 
2047                 if (pp 
!= initproc
) { 
2049                         pp
->si_pid 
= p
->p_pid
; 
2050                         pp
->si_status 
= p
->p_xstat
; 
2051                         pp
->si_code 
= CLD_EXITED
; 
2053                          * p_ucred usage is safe as it is an exiting process 
2054                          * and reference is dropped in reap 
2056                         pp
->si_uid 
= kauth_cred_getruid(p
->p_ucred
); 
2059                 /* mark as a zombie */ 
2060                 /* mark as a zombie */ 
2061                 /* No need to take proc lock as all refs are drained and 
2062                  * no one except parent (reaping ) can look at this. 
2063                  * The write is to an int and is coherent. Also parent is 
2064                  *  keyed off of list lock for reaping 
2068                 psignal(pp
, SIGCHLD
); 
2070                 /* and now wakeup the parent */ 
2072                 wakeup((caddr_t
)pp
); 
2076                 /* check for lookups by zomb sysctl */ 
2077                 while ((p
->p_listflag 
& P_LIST_WAITING
) == P_LIST_WAITING
) { 
2078                         msleep(&p
->p_stat
, proc_list_mlock
, PWAIT
, "waitcoll", 0); 
2081                 p
->p_listflag 
|= P_LIST_WAITING
; 
2084                  * This is a named reference and it is not granted 
2085                  * if the reap is already in progress. So we get 
2086                  * the reference here exclusively and their can be 
2087                  * no waiters. So there is no need for a wakeup 
2088                  * after we are done. AlsO  the reap frees the structure 
2089                  * and the proc struct cannot be used for wakeups as well.  
2090                  * It is safe to use p here as this is system reap 
2092                 (void)reap_child_locked(pp
, p
, 0, 0, 1, 1); 
2093                 /* list lock dropped by reap_child_locked */ 
2101  *      LP64 support - long is 64 bits if we are dealing with a 64 bit user 
2102  *      process.  We munge the kernel version of rusage into the 
2105 __private_extern__  
void  
2106 munge_user64_rusage(struct rusage 
*a_rusage_p
, struct user64_rusage 
*a_user_rusage_p
) 
2108         /* timeval changes size, so utime and stime need special handling */ 
2109         a_user_rusage_p
->ru_utime
.tv_sec 
= a_rusage_p
->ru_utime
.tv_sec
; 
2110         a_user_rusage_p
->ru_utime
.tv_usec 
= a_rusage_p
->ru_utime
.tv_usec
; 
2111         a_user_rusage_p
->ru_stime
.tv_sec 
= a_rusage_p
->ru_stime
.tv_sec
; 
2112         a_user_rusage_p
->ru_stime
.tv_usec 
= a_rusage_p
->ru_stime
.tv_usec
; 
2114          * everything else can be a direct assign, since there is no loss 
2115          * of precision implied boing 32->64. 
2117         a_user_rusage_p
->ru_maxrss 
= a_rusage_p
->ru_maxrss
; 
2118         a_user_rusage_p
->ru_ixrss 
= a_rusage_p
->ru_ixrss
; 
2119         a_user_rusage_p
->ru_idrss 
= a_rusage_p
->ru_idrss
; 
2120         a_user_rusage_p
->ru_isrss 
= a_rusage_p
->ru_isrss
; 
2121         a_user_rusage_p
->ru_minflt 
= a_rusage_p
->ru_minflt
; 
2122         a_user_rusage_p
->ru_majflt 
= a_rusage_p
->ru_majflt
; 
2123         a_user_rusage_p
->ru_nswap 
= a_rusage_p
->ru_nswap
; 
2124         a_user_rusage_p
->ru_inblock 
= a_rusage_p
->ru_inblock
; 
2125         a_user_rusage_p
->ru_oublock 
= a_rusage_p
->ru_oublock
; 
2126         a_user_rusage_p
->ru_msgsnd 
= a_rusage_p
->ru_msgsnd
; 
2127         a_user_rusage_p
->ru_msgrcv 
= a_rusage_p
->ru_msgrcv
; 
2128         a_user_rusage_p
->ru_nsignals 
= a_rusage_p
->ru_nsignals
; 
2129         a_user_rusage_p
->ru_nvcsw 
= a_rusage_p
->ru_nvcsw
; 
2130         a_user_rusage_p
->ru_nivcsw 
= a_rusage_p
->ru_nivcsw
; 
2133 /* For a 64-bit kernel and 32-bit userspace, munging may be needed */ 
2134 __private_extern__  
void  
2135 munge_user32_rusage(struct rusage 
*a_rusage_p
, struct user32_rusage 
*a_user_rusage_p
) 
2137         /* timeval changes size, so utime and stime need special handling */ 
2138         a_user_rusage_p
->ru_utime
.tv_sec 
= a_rusage_p
->ru_utime
.tv_sec
; 
2139         a_user_rusage_p
->ru_utime
.tv_usec 
= a_rusage_p
->ru_utime
.tv_usec
; 
2140         a_user_rusage_p
->ru_stime
.tv_sec 
= a_rusage_p
->ru_stime
.tv_sec
; 
2141         a_user_rusage_p
->ru_stime
.tv_usec 
= a_rusage_p
->ru_stime
.tv_usec
; 
2143          * everything else can be a direct assign. We currently ignore 
2144          * the loss of precision 
2146         a_user_rusage_p
->ru_maxrss 
= a_rusage_p
->ru_maxrss
; 
2147         a_user_rusage_p
->ru_ixrss 
= a_rusage_p
->ru_ixrss
; 
2148         a_user_rusage_p
->ru_idrss 
= a_rusage_p
->ru_idrss
; 
2149         a_user_rusage_p
->ru_isrss 
= a_rusage_p
->ru_isrss
; 
2150         a_user_rusage_p
->ru_minflt 
= a_rusage_p
->ru_minflt
; 
2151         a_user_rusage_p
->ru_majflt 
= a_rusage_p
->ru_majflt
; 
2152         a_user_rusage_p
->ru_nswap 
= a_rusage_p
->ru_nswap
; 
2153         a_user_rusage_p
->ru_inblock 
= a_rusage_p
->ru_inblock
; 
2154         a_user_rusage_p
->ru_oublock 
= a_rusage_p
->ru_oublock
; 
2155         a_user_rusage_p
->ru_msgsnd 
= a_rusage_p
->ru_msgsnd
; 
2156         a_user_rusage_p
->ru_msgrcv 
= a_rusage_p
->ru_msgrcv
; 
2157         a_user_rusage_p
->ru_nsignals 
= a_rusage_p
->ru_nsignals
; 
2158         a_user_rusage_p
->ru_nvcsw 
= a_rusage_p
->ru_nvcsw
; 
2159         a_user_rusage_p
->ru_nivcsw 
= a_rusage_p
->ru_nivcsw
;