]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_exit.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / bsd / kern / kern_exit.c
1 /*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 #include <machine/reg.h>
76 #include <machine/psl.h>
77
78 #include "compat_43.h"
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/ioctl.h>
83 #include <sys/proc_internal.h>
84 #include <sys/proc.h>
85 #include <sys/kauth.h>
86 #include <sys/tty.h>
87 #include <sys/time.h>
88 #include <sys/resource.h>
89 #include <sys/kernel.h>
90 #include <sys/wait.h>
91 #include <sys/file_internal.h>
92 #include <sys/vnode_internal.h>
93 #include <sys/syslog.h>
94 #include <sys/malloc.h>
95 #include <sys/resourcevar.h>
96 #include <sys/ptrace.h>
97 #include <sys/user.h>
98 #include <sys/aio_kern.h>
99 #include <sys/sysproto.h>
100 #include <sys/signalvar.h>
101 #include <sys/kdebug.h>
102 #include <sys/filedesc.h> /* fdfree */
103 #if SYSV_SHM
104 #include <sys/shm_internal.h> /* shmexit */
105 #endif
106 #include <sys/acct.h> /* acct_process */
107
108 #include <security/audit/audit.h>
109 #include <bsm/audit_kevents.h>
110
111 #include <mach/mach_types.h>
112
113 #include <kern/kern_types.h>
114 #include <kern/kalloc.h>
115 #include <kern/task.h>
116 #include <kern/thread.h>
117 #include <kern/thread_call.h>
118 #include <kern/sched_prim.h>
119 #include <kern/assert.h>
120 #include <sys/codesign.h>
121
122 #if VM_PRESSURE_EVENTS
123 #include <kern/vm_pressure.h>
124 #endif
125
126 #if CONFIG_MEMORYSTATUS
127 #include <sys/kern_memorystatus.h>
128 #endif
129
130 #if CONFIG_DTRACE
131 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
132 extern void (*dtrace_fasttrap_exit_ptr)(proc_t);
133 extern void (*dtrace_helpers_cleanup)(proc_t);
134 extern void dtrace_lazy_dofs_destroy(proc_t);
135
136 #include <sys/dtrace_ptss.h>
137 #endif
138
139 #if CONFIG_MACF
140 #include <security/mac.h>
141 #include <sys/syscall.h>
142 #endif
143
144 #include <mach/mach_types.h>
145 #include <mach/task.h>
146 #include <mach/thread_act.h>
147
148 #include <vm/vm_protos.h>
149
150 #include <sys/sdt.h>
151
152 extern boolean_t init_task_died;
153 extern char init_task_failure_data[];
154 void proc_prepareexit(proc_t p, int rv, boolean_t perf_notify);
155 void vfork_exit(proc_t p, int rv);
156 void vproc_exit(proc_t p);
157 __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p);
158 __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p);
159 static int reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoinit, int locked, int droplock);
160
161 /*
162 * Things which should have prototypes in headers, but don't
163 */
164 void proc_exit(proc_t p);
165 int wait1continue(int result);
166 int waitidcontinue(int result);
167 kern_return_t sys_perf_notify(thread_t thread, int pid);
168 kern_return_t task_exception_notify(exception_type_t exception,
169 mach_exception_data_type_t code, mach_exception_data_type_t subcode);
170 void delay(int);
171 void gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor);
172
173 /*
174 * NOTE: Source and target may *NOT* overlap!
175 * XXX Should share code with bsd/dev/ppc/unix_signal.c
176 */
177 void
178 siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out)
179 {
180 out->si_signo = in->si_signo;
181 out->si_errno = in->si_errno;
182 out->si_code = in->si_code;
183 out->si_pid = in->si_pid;
184 out->si_uid = in->si_uid;
185 out->si_status = in->si_status;
186 out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_addr);
187 /* following cast works for sival_int because of padding */
188 out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_value.sival_ptr);
189 out->si_band = in->si_band; /* range reduction */
190 }
191
192 void
193 siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out)
194 {
195 out->si_signo = in->si_signo;
196 out->si_errno = in->si_errno;
197 out->si_code = in->si_code;
198 out->si_pid = in->si_pid;
199 out->si_uid = in->si_uid;
200 out->si_status = in->si_status;
201 out->si_addr = in->si_addr;
202 /* following cast works for sival_int because of padding */
203 out->si_value.sival_ptr = in->si_value.sival_ptr;
204 out->si_band = in->si_band; /* range reduction */
205 }
206
207 static int
208 copyoutsiginfo(user_siginfo_t *native, boolean_t is64, user_addr_t uaddr)
209 {
210 if (is64) {
211 user64_siginfo_t sinfo64;
212
213 bzero(&sinfo64, sizeof (sinfo64));
214 siginfo_user_to_user64(native, &sinfo64);
215 return (copyout(&sinfo64, uaddr, sizeof (sinfo64)));
216 } else {
217 user32_siginfo_t sinfo32;
218
219 bzero(&sinfo32, sizeof (sinfo32));
220 siginfo_user_to_user32(native, &sinfo32);
221 return (copyout(&sinfo32, uaddr, sizeof (sinfo32)));
222 }
223 }
224
225 /*
226 * exit --
227 * Death of process.
228 */
229 void
230 exit(proc_t p, struct exit_args *uap, int *retval)
231 {
232 exit1(p, W_EXITCODE(uap->rval, 0), retval);
233
234 thread_exception_return();
235 /* NOTREACHED */
236 while (TRUE)
237 thread_block(THREAD_CONTINUE_NULL);
238 /* NOTREACHED */
239 }
240
241 /*
242 * Exit: deallocate address space and other resources, change proc state
243 * to zombie, and unlink proc from allproc and parent's lists. Save exit
244 * status and rusage for wait(). Check for child processes and orphan them.
245 */
246 int
247 exit1(proc_t p, int rv, int *retval)
248 {
249 return exit1_internal(p, rv, retval, TRUE, TRUE, 0);
250 }
251
252 int
253 exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
254 int jetsam_flags)
255 {
256 thread_t self = current_thread();
257 struct task *task = p->task;
258 struct uthread *ut;
259 int error = 0;
260
261 /*
262 * If a thread in this task has already
263 * called exit(), then halt any others
264 * right here.
265 */
266
267 ut = get_bsdthread_info(self);
268 if (ut->uu_flag & UT_VFORK) {
269 if (!thread_can_terminate) {
270 return EINVAL;
271 }
272
273 vfork_exit(p, rv);
274 vfork_return(p , retval, p->p_pid);
275 unix_syscall_return(0);
276 /* NOT REACHED */
277 }
278
279 /*
280 * The parameter list of audit_syscall_exit() was augmented to
281 * take the Darwin syscall number as the first parameter,
282 * which is currently required by mac_audit_postselect().
283 */
284
285 /*
286 * The BSM token contains two components: an exit status as passed
287 * to exit(), and a return value to indicate what sort of exit it
288 * was. The exit status is WEXITSTATUS(rv), but it's not clear
289 * what the return value is.
290 */
291 AUDIT_ARG(exit, WEXITSTATUS(rv), 0);
292 AUDIT_SYSCALL_EXIT(SYS_exit, p, ut, 0); /* Exit is always successfull */
293
294 DTRACE_PROC1(exit, int, CLD_EXITED);
295
296 /* mark process is going to exit and pull out of DBG/disk throttle */
297 /* TODO: This should be done after becoming exit thread */
298 proc_set_task_policy(p->task, THREAD_NULL, TASK_POLICY_ATTRIBUTE,
299 TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
300
301 proc_lock(p);
302 error = proc_transstart(p, 1, ((jetsam_flags & P_JETSAM_VNODE) ? 1 : 0));
303 if (error == EDEADLK) {
304 /* Temp: If deadlock error, then it implies multithreaded exec is
305 * in progress. Instread of letting exit continue and
306 * corrupting the freed memory, let the exit thread
307 * return. This will save corruption in remote case.
308 */
309 proc_unlock(p);
310 if (current_proc() == p){
311 if (p->exit_thread == self)
312 printf("exit_thread failed to exit, leaving process %s[%d] in unkillable limbo\n",
313 p->p_comm, p->p_pid);
314 thread_exception_return();
315 } else {
316 /* external termination like jetsam */
317 return(error);
318 }
319 }
320
321 while (p->exit_thread != self) {
322 if (sig_try_locked(p) <= 0) {
323 proc_transend(p, 1);
324 if (get_threadtask(self) != task) {
325 proc_unlock(p);
326 return(0);
327 }
328 proc_unlock(p);
329
330 thread_terminate(self);
331 if (!thread_can_terminate) {
332 return 0;
333 }
334
335 thread_exception_return();
336 /* NOTREACHED */
337 }
338 sig_lock_to_exit(p);
339 }
340 if (p == initproc && current_proc() == p) {
341 proc_unlock(p);
342 printf("pid 1 exited (signal %d, exit %d)",
343 WTERMSIG(rv), WEXITSTATUS(rv));
344 #if (DEVELOPMENT || DEBUG)
345 int err;
346 /*
347 * For debugging purposes, generate a core file of initproc before
348 * panicking. Leave at least 300 MB free on the root volume, and ignore
349 * the process's corefile ulimit.
350 */
351 if ((err = coredump(p, 300, 1)) != 0) {
352 printf("Failed to generate initproc core file: error %d", err);
353 } else {
354 printf("Generated initproc core file");
355 sync(p, (void *)NULL, (int *)NULL);
356 }
357 #endif
358 init_task_died = TRUE;
359 panic("%s died\nState at Last Exception:\n\n%s",
360 (p->p_comm[0] != '\0' ?
361 p->p_comm :
362 "launchd"),
363 init_task_failure_data);
364 }
365
366 p->p_lflag |= P_LEXIT;
367 p->p_xstat = rv;
368 p->p_lflag |= jetsam_flags;
369
370 proc_transend(p, 1);
371 proc_unlock(p);
372
373 proc_prepareexit(p, rv, perf_notify);
374
375 /* Last thread to terminate will call proc_exit() */
376 task_terminate_internal(task);
377
378 return(0);
379 }
380
381 void
382 proc_prepareexit(proc_t p, int rv, boolean_t perf_notify)
383 {
384 mach_exception_data_type_t code, subcode;
385 struct uthread *ut;
386 thread_t self = current_thread();
387 ut = get_bsdthread_info(self);
388 struct rusage_superset *rup;
389
390 /* If a core should be generated, notify crash reporter */
391 if (hassigprop(WTERMSIG(rv), SA_CORE) || ((p->p_csflags & CS_KILLED) != 0)) {
392 /*
393 * Workaround for processes checking up on PT_DENY_ATTACH:
394 * should be backed out post-Leopard (details in 5431025).
395 */
396 if ((SIGSEGV == WTERMSIG(rv)) &&
397 (p->p_pptr->p_lflag & P_LNOATTACH)) {
398 goto skipcheck;
399 }
400
401 /*
402 * Crash Reporter looks for the signal value, original exception
403 * type, and low 20 bits of the original code in code[0]
404 * (8, 4, and 20 bits respectively). code[1] is unmodified.
405 */
406 code = ((WTERMSIG(rv) & 0xff) << 24) |
407 ((ut->uu_exception & 0x0f) << 20) |
408 ((int)ut->uu_code & 0xfffff);
409 subcode = ut->uu_subcode;
410 (void) task_exception_notify(EXC_CRASH, code, subcode);
411 }
412
413 skipcheck:
414 /* Notify the perf server? */
415 if (perf_notify) {
416 (void)sys_perf_notify(self, p->p_pid);
417 }
418
419 /*
420 * Before this process becomes a zombie, stash resource usage
421 * stats in the proc for external observers to query
422 * via proc_pid_rusage().
423 *
424 * If the zombie allocation fails, just punt the stats.
425 */
426 MALLOC_ZONE(rup, struct rusage_superset *,
427 sizeof (*rup), M_ZOMBIE, M_WAITOK);
428 if (rup != NULL) {
429 gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT);
430 rup->ri.ri_phys_footprint = 0;
431 rup->ri.ri_proc_exit_abstime = mach_absolute_time();
432
433 /*
434 * Make the rusage_info visible to external observers
435 * only after it has been completely filled in.
436 */
437 p->p_ru = rup;
438 }
439
440 /*
441 * Remove proc from allproc queue and from pidhash chain.
442 * Need to do this before we do anything that can block.
443 * Not doing causes things like mount() find this on allproc
444 * in partially cleaned state.
445 */
446
447 proc_list_lock();
448
449 #if CONFIG_MEMORYSTATUS
450 memorystatus_remove(p, TRUE);
451 #endif
452
453 LIST_REMOVE(p, p_list);
454 LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
455 /* will not be visible via proc_find */
456 p->p_listflag |= P_LIST_EXITED;
457
458 proc_list_unlock();
459
460
461 #ifdef PGINPROF
462 vmsizmon();
463 #endif
464 /*
465 * If parent is waiting for us to exit or exec,
466 * P_LPPWAIT is set; we will wakeup the parent below.
467 */
468 proc_lock(p);
469 p->p_lflag &= ~(P_LTRACED | P_LPPWAIT);
470 p->p_sigignore = ~(sigcantmask);
471 ut->uu_siglist = 0;
472 proc_unlock(p);
473 }
474
475 void
476 proc_exit(proc_t p)
477 {
478 proc_t q;
479 proc_t pp;
480 struct task *task = p->task;
481 vnode_t tvp = NULLVP;
482 struct pgrp * pg;
483 struct session *sessp;
484 struct uthread * uth;
485 pid_t pid;
486 int exitval;
487 int knote_hint;
488
489 uth = current_uthread();
490
491 proc_lock(p);
492 proc_transstart(p, 1, 0);
493 if( !(p->p_lflag & P_LEXIT)) {
494 /*
495 * This can happen if a thread_terminate() occurs
496 * in a single-threaded process.
497 */
498 p->p_lflag |= P_LEXIT;
499 proc_transend(p, 1);
500 proc_unlock(p);
501 proc_prepareexit(p, 0, TRUE);
502 (void) task_terminate_internal(task);
503 proc_lock(p);
504 } else {
505 proc_transend(p, 1);
506 }
507
508 p->p_lflag |= P_LPEXIT;
509
510 /*
511 * Other kernel threads may be in the middle of signalling this process.
512 * Wait for those threads to wrap it up before making the process
513 * disappear on them.
514 */
515 if ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 0)) {
516 p->p_sigwaitcnt++;
517 while ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 1))
518 msleep(&p->p_sigmask, &p->p_mlock, PWAIT, "proc_sigdrain", NULL);
519 p->p_sigwaitcnt--;
520 }
521
522 proc_unlock(p);
523 pid = p->p_pid;
524 exitval = p->p_xstat;
525 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
526 BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_START,
527 pid, exitval, 0, 0, 0);
528
529 #if CONFIG_DTRACE
530 /*
531 * Free any outstanding lazy dof entries. It is imperative we
532 * always call dtrace_lazy_dofs_destroy, rather than null check
533 * and call if !NULL. If we NULL test, during lazy dof faulting
534 * we can race with the faulting code and proceed from here to
535 * beyond the helpers cleanup. The lazy dof faulting will then
536 * install new helpers which will never be cleaned up, and leak.
537 */
538 dtrace_lazy_dofs_destroy(p);
539
540 /*
541 * Clean up any DTrace helper actions or probes for the process.
542 */
543 if (p->p_dtrace_helpers != NULL) {
544 (*dtrace_helpers_cleanup)(p);
545 }
546
547 /*
548 * Clean up any DTrace probes associated with this process.
549 */
550 /*
551 * APPLE NOTE: We release ptss pages/entries in dtrace_fasttrap_exit_ptr(),
552 * call this after dtrace_helpers_cleanup()
553 */
554 proc_lock(p);
555 if (p->p_dtrace_probes && dtrace_fasttrap_exit_ptr) {
556 (*dtrace_fasttrap_exit_ptr)(p);
557 }
558 proc_unlock(p);
559 #endif
560
561 nspace_proc_exit(p);
562
563 #if VM_PRESSURE_EVENTS
564 vm_pressure_proc_cleanup(p);
565 #endif
566
567 /*
568 * need to cancel async IO requests that can be cancelled and wait for those
569 * already active. MAY BLOCK!
570 */
571
572 proc_refdrain(p);
573
574 /* if any pending cpu limits action, clear it */
575 task_clear_cpuusage(p->task, TRUE);
576
577 workqueue_mark_exiting(p);
578 workqueue_exit(p);
579
580 _aio_exit( p );
581
582 /*
583 * Close open files and release open-file table.
584 * This may block!
585 */
586 fdfree(p);
587
588 if (uth->uu_lowpri_window) {
589 /*
590 * task is marked as a low priority I/O type
591 * and the I/O we issued while in flushing files on close
592 * collided with normal I/O operations...
593 * no need to throttle this thread since its going away
594 * but we do need to update our bookeeping w/r to throttled threads
595 */
596 throttle_lowpri_io(0);
597 }
598
599 #if SYSV_SHM
600 /* Close ref SYSV Shared memory*/
601 if (p->vm_shm)
602 shmexit(p);
603 #endif
604 #if SYSV_SEM
605 /* Release SYSV semaphores */
606 semexit(p);
607 #endif
608
609 #if PSYNCH
610 pth_proc_hashdelete(p);
611 #endif /* PSYNCH */
612
613 sessp = proc_session(p);
614 if (SESS_LEADER(p, sessp)) {
615
616 if (sessp->s_ttyvp != NULLVP) {
617 struct vnode *ttyvp;
618 int ttyvid;
619 int cttyflag = 0;
620 struct vfs_context context;
621 struct tty *tp;
622
623 /*
624 * Controlling process.
625 * Signal foreground pgrp,
626 * drain controlling terminal
627 * and revoke access to controlling terminal.
628 */
629 session_lock(sessp);
630 tp = SESSION_TP(sessp);
631 if ((tp != TTY_NULL) && (tp->t_session == sessp)) {
632 session_unlock(sessp);
633
634 /*
635 * We're going to SIGHUP the foreground process
636 * group. It can't change from this point on
637 * until the revoke is complete.
638 * The process group changes under both the tty
639 * lock and proc_list_lock but we need only one
640 */
641 tty_lock(tp);
642 ttysetpgrphup(tp);
643 tty_unlock(tp);
644
645 tty_pgsignal(tp, SIGHUP, 1);
646
647 session_lock(sessp);
648 tp = SESSION_TP(sessp);
649 }
650 cttyflag = sessp->s_flags & S_CTTYREF;
651 sessp->s_flags &= ~S_CTTYREF;
652 ttyvp = sessp->s_ttyvp;
653 ttyvid = sessp->s_ttyvid;
654 sessp->s_ttyvp = NULLVP;
655 sessp->s_ttyvid = 0;
656 sessp->s_ttyp = TTY_NULL;
657 sessp->s_ttypgrpid = NO_PID;
658 session_unlock(sessp);
659
660 if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
661 if (tp != TTY_NULL) {
662 tty_lock(tp);
663 (void) ttywait(tp);
664 tty_unlock(tp);
665 }
666 context.vc_thread = proc_thread(p); /* XXX */
667 context.vc_ucred = kauth_cred_proc_ref(p);
668 VNOP_REVOKE(ttyvp, REVOKEALL, &context);
669 if (cttyflag) {
670 /*
671 * Release the extra usecount taken in cttyopen.
672 * usecount should be released after VNOP_REVOKE is called.
673 * This usecount was taken to ensure that
674 * the VNOP_REVOKE results in a close to
675 * the tty since cttyclose is a no-op.
676 */
677 vnode_rele(ttyvp);
678 }
679 vnode_put(ttyvp);
680 kauth_cred_unref(&context.vc_ucred);
681 ttyvp = NULLVP;
682 }
683 if (tp) {
684 /*
685 * This is cleared even if not set. This is also done in
686 * spec_close to ensure that the flag is cleared.
687 */
688 tty_lock(tp);
689 ttyclrpgrphup(tp);
690 tty_unlock(tp);
691
692 ttyfree(tp);
693 }
694 }
695 session_lock(sessp);
696 sessp->s_leader = NULL;
697 session_unlock(sessp);
698 }
699 session_rele(sessp);
700
701 pg = proc_pgrp(p);
702 fixjobc(p, pg, 0);
703 pg_rele(pg);
704
705 p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
706 (void)acct_process(p);
707
708 proc_list_lock();
709
710 if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) {
711 p->p_listflag &= ~P_LIST_EXITCOUNT;
712 proc_shutdown_exitcount--;
713 if (proc_shutdown_exitcount == 0)
714 wakeup(&proc_shutdown_exitcount);
715 }
716
717 /* wait till parentrefs are dropped and grant no more */
718 proc_childdrainstart(p);
719 while ((q = p->p_children.lh_first) != NULL) {
720 int reparentedtoinit = (q->p_listflag & P_LIST_DEADPARENT) ? 1 : 0;
721 if (q->p_stat == SZOMB) {
722 if (p != q->p_pptr)
723 panic("parent child linkage broken");
724 /* check for sysctl zomb lookup */
725 while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
726 msleep(&q->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
727 }
728 q->p_listflag |= P_LIST_WAITING;
729 /*
730 * This is a named reference and it is not granted
731 * if the reap is already in progress. So we get
732 * the reference here exclusively and their can be
733 * no waiters. So there is no need for a wakeup
734 * after we are done. Also the reap frees the structure
735 * and the proc struct cannot be used for wakeups as well.
736 * It is safe to use q here as this is system reap
737 */
738 (void)reap_child_locked(p, q, 1, reparentedtoinit, 1, 0);
739 } else {
740 /*
741 * Traced processes are killed
742 * since their existence means someone is messing up.
743 */
744 if (q->p_lflag & P_LTRACED) {
745 struct proc *opp;
746
747 /*
748 * Take a reference on the child process to
749 * ensure it doesn't exit and disappear between
750 * the time we drop the list_lock and attempt
751 * to acquire its proc_lock.
752 */
753 if (proc_ref_locked(q) != q)
754 continue;
755
756 proc_list_unlock();
757
758 opp = proc_find(q->p_oppid);
759 if (opp != PROC_NULL) {
760 proc_list_lock();
761 q->p_oppid = 0;
762 proc_list_unlock();
763 proc_reparentlocked(q, opp, 0, 0);
764 proc_rele(opp);
765 } else {
766 /* original parent exited while traced */
767 proc_list_lock();
768 q->p_listflag |= P_LIST_DEADPARENT;
769 q->p_oppid = 0;
770 proc_list_unlock();
771 proc_reparentlocked(q, initproc, 0, 0);
772 }
773
774 proc_lock(q);
775 q->p_lflag &= ~P_LTRACED;
776
777 if (q->sigwait_thread) {
778 thread_t thread = q->sigwait_thread;
779
780 proc_unlock(q);
781 /*
782 * The sigwait_thread could be stopped at a
783 * breakpoint. Wake it up to kill.
784 * Need to do this as it could be a thread which is not
785 * the first thread in the task. So any attempts to kill
786 * the process would result into a deadlock on q->sigwait.
787 */
788 thread_resume(thread);
789 clear_wait(thread, THREAD_INTERRUPTED);
790 threadsignal(thread, SIGKILL, 0);
791 } else {
792 proc_unlock(q);
793 }
794
795 psignal(q, SIGKILL);
796 proc_list_lock();
797 proc_rele_locked(q);
798 } else {
799 q->p_listflag |= P_LIST_DEADPARENT;
800 proc_reparentlocked(q, initproc, 0, 1);
801 }
802 }
803 }
804
805 proc_childdrainend(p);
806 proc_list_unlock();
807
808 /*
809 * Release reference to text vnode
810 */
811 tvp = p->p_textvp;
812 p->p_textvp = NULL;
813 if (tvp != NULLVP) {
814 vnode_rele(tvp);
815 }
816
817 /*
818 * Save exit status and final rusage info, adding in child rusage
819 * info and self times. If we were unable to allocate a zombie
820 * structure, this information is lost.
821 */
822 if (p->p_ru != NULL) {
823 calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, NULL);
824 p->p_ru->ru = p->p_stats->p_ru;
825
826 ruadd(&(p->p_ru->ru), &p->p_stats->p_cru);
827 }
828
829 /*
830 * Free up profiling buffers.
831 */
832 {
833 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
834
835 p1 = p0->pr_next;
836 p0->pr_next = NULL;
837 p0->pr_scale = 0;
838
839 for (; p1 != NULL; p1 = pn) {
840 pn = p1->pr_next;
841 kfree(p1, sizeof *p1);
842 }
843 }
844
845 proc_spinlock(p);
846 if (thread_call_cancel(p->p_rcall))
847 p->p_ractive--;
848
849 while (p->p_ractive > 0) {
850 proc_spinunlock(p);
851
852 delay(1);
853
854 proc_spinlock(p);
855 }
856 proc_spinunlock(p);
857
858 thread_call_free(p->p_rcall);
859 p->p_rcall = NULL;
860
861 /*
862 * Other substructures are freed from wait().
863 */
864 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
865 p->p_stats = NULL;
866
867 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
868 p->p_sigacts = NULL;
869
870 proc_limitdrop(p, 1);
871 p->p_limit = NULL;
872
873 vm_purgeable_disown(p->task);
874
875 /*
876 * Finish up by terminating the task
877 * and halt this thread (only if a
878 * member of the task exiting).
879 */
880 p->task = TASK_NULL;
881 set_bsdtask_info(task, NULL);
882
883 knote_hint = NOTE_EXIT | (p->p_xstat & 0xffff);
884 proc_knote(p, knote_hint);
885
886 /* mark the thread as the one that is doing proc_exit
887 * no need to hold proc lock in uthread_free
888 */
889 uth->uu_flag |= UT_PROCEXIT;
890 /*
891 * Notify parent that we're gone.
892 */
893 pp = proc_parent(p);
894 if (pp->p_flag & P_NOCLDWAIT) {
895
896 if (p->p_ru != NULL) {
897 proc_lock(pp);
898 #if 3839178
899 /*
900 * If the parent is ignoring SIGCHLD, then POSIX requires
901 * us to not add the resource usage to the parent process -
902 * we are only going to hand it off to init to get reaped.
903 * We should contest the standard in this case on the basis
904 * of RLIMIT_CPU.
905 */
906 #else /* !3839178 */
907 /*
908 * Add child resource usage to parent before giving
909 * zombie to init. If we were unable to allocate a
910 * zombie structure, this information is lost.
911 */
912 ruadd(&pp->p_stats->p_cru, &p->p_ru->ru);
913 #endif /* !3839178 */
914 update_rusage_info_child(&pp->p_stats->ri_child, &p->p_ru->ri);
915 proc_unlock(pp);
916 }
917
918 /* kernel can reap this one, no need to move it to launchd */
919 proc_list_lock();
920 p->p_listflag |= P_LIST_DEADPARENT;
921 proc_list_unlock();
922 }
923 if ((p->p_listflag & P_LIST_DEADPARENT) == 0 || p->p_oppid) {
924 if (pp != initproc) {
925 proc_lock(pp);
926 pp->si_pid = p->p_pid;
927 pp->si_status = p->p_xstat;
928 pp->si_code = CLD_EXITED;
929 /*
930 * p_ucred usage is safe as it is an exiting process
931 * and reference is dropped in reap
932 */
933 pp->si_uid = kauth_cred_getruid(p->p_ucred);
934 proc_unlock(pp);
935 }
936 /* mark as a zombie */
937 /* No need to take proc lock as all refs are drained and
938 * no one except parent (reaping ) can look at this.
939 * The write is to an int and is coherent. Also parent is
940 * keyed off of list lock for reaping
941 */
942 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
943 BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
944 pid, exitval, 0, 0, 0);
945 p->p_stat = SZOMB;
946 /*
947 * The current process can be reaped so, no one
948 * can depend on this
949 */
950
951 psignal(pp, SIGCHLD);
952
953 /* and now wakeup the parent */
954 proc_list_lock();
955 wakeup((caddr_t)pp);
956 proc_list_unlock();
957 } else {
958 /* should be fine as parent proc would be initproc */
959 /* mark as a zombie */
960 /* No need to take proc lock as all refs are drained and
961 * no one except parent (reaping ) can look at this.
962 * The write is to an int and is coherent. Also parent is
963 * keyed off of list lock for reaping
964 */
965 proc_list_lock();
966 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
967 BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
968 pid, exitval, 0, 0, 0);
969 /* check for sysctl zomb lookup */
970 while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
971 msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
972 }
973 /* safe to use p as this is a system reap */
974 p->p_stat = SZOMB;
975 p->p_listflag |= P_LIST_WAITING;
976
977 /*
978 * This is a named reference and it is not granted
979 * if the reap is already in progress. So we get
980 * the reference here exclusively and their can be
981 * no waiters. So there is no need for a wakeup
982 * after we are done. AlsO the reap frees the structure
983 * and the proc struct cannot be used for wakeups as well.
984 * It is safe to use p here as this is system reap
985 */
986 (void)reap_child_locked(pp, p, 1, 0, 1, 1);
987 /* list lock dropped by reap_child_locked */
988 }
989 if (uth->uu_lowpri_window) {
990 /*
991 * task is marked as a low priority I/O type and we've
992 * somehow picked up another throttle during exit processing...
993 * no need to throttle this thread since its going away
994 * but we do need to update our bookeeping w/r to throttled threads
995 */
996 throttle_lowpri_io(0);
997 }
998
999 proc_rele(pp);
1000
1001 }
1002
1003
1004 /*
1005 * reap_child_locked
1006 *
1007 * Description: Given a process from which all status information needed
1008 * has already been extracted, if the process is a ptrace
1009 * attach process, detach it and give it back to its real
1010 * parent, else recover all resources remaining associated
1011 * with it.
1012 *
1013 * Parameters: proc_t parent Parent of process being reaped
1014 * proc_t child Process to reap
1015 *
1016 * Returns: 0 Process was not reaped because it
1017 * came from an attach
1018 * 1 Process was reaped
1019 */
1020 static int
1021 reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoinit, int locked, int droplock)
1022 {
1023 proc_t trace_parent = PROC_NULL; /* Traced parent process, if tracing */
1024
1025 if (locked == 1)
1026 proc_list_unlock();
1027
1028 /*
1029 * If we got the child via a ptrace 'attach',
1030 * we need to give it back to the old parent.
1031 *
1032 * Exception: someone who has been reparented to launchd before being
1033 * ptraced can simply be reaped, refer to radar 5677288
1034 * p_oppid -> ptraced
1035 * trace_parent == initproc -> away from launchd
1036 * reparentedtoinit -> came to launchd by reparenting
1037 */
1038 if (child->p_oppid) {
1039 int knote_hint;
1040 pid_t oppid;
1041
1042 proc_lock(child);
1043 oppid = child->p_oppid;
1044 child->p_oppid = 0;
1045 knote_hint = NOTE_EXIT | (child->p_xstat & 0xffff);
1046 proc_unlock(child);
1047
1048 if ((trace_parent = proc_find(oppid))
1049 && !((trace_parent == initproc) && reparentedtoinit)) {
1050
1051 if (trace_parent != initproc) {
1052 /*
1053 * proc internal fileds and p_ucred usage safe
1054 * here as child is dead and is not reaped or
1055 * reparented yet
1056 */
1057 proc_lock(trace_parent);
1058 trace_parent->si_pid = child->p_pid;
1059 trace_parent->si_status = child->p_xstat;
1060 trace_parent->si_code = CLD_CONTINUED;
1061 trace_parent->si_uid = kauth_cred_getruid(child->p_ucred);
1062 proc_unlock(trace_parent);
1063 }
1064 proc_reparentlocked(child, trace_parent, 1, 0);
1065
1066 /* resend knote to original parent (and others) after reparenting */
1067 proc_knote(child, knote_hint);
1068
1069 psignal(trace_parent, SIGCHLD);
1070 proc_list_lock();
1071 wakeup((caddr_t)trace_parent);
1072 child->p_listflag &= ~P_LIST_WAITING;
1073 wakeup(&child->p_stat);
1074 proc_list_unlock();
1075 proc_rele(trace_parent);
1076 if ((locked == 1) && (droplock == 0))
1077 proc_list_lock();
1078 return (0);
1079 }
1080
1081 /*
1082 * If we can't reparent (e.g. the original parent exited while child was being debugged, or
1083 * original parent is the same as the debugger currently exiting), we still need to satisfy
1084 * the knote lifecycle for other observers on the system. While the debugger was attached,
1085 * the NOTE_EXIT would not have been broadcast during initial child termination.
1086 */
1087 proc_knote(child, knote_hint);
1088
1089 if (trace_parent != PROC_NULL) {
1090 proc_rele(trace_parent);
1091 }
1092 }
1093
1094 #pragma clang diagnostic push
1095 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1096 proc_knote(child, NOTE_REAP);
1097 #pragma clang diagnostic pop
1098
1099 proc_knote_drain(child);
1100
1101 child->p_xstat = 0;
1102 if (child->p_ru) {
1103 proc_lock(parent);
1104 #if 3839178
1105 /*
1106 * If the parent is ignoring SIGCHLD, then POSIX requires
1107 * us to not add the resource usage to the parent process -
1108 * we are only going to hand it off to init to get reaped.
1109 * We should contest the standard in this case on the basis
1110 * of RLIMIT_CPU.
1111 */
1112 if (!(parent->p_flag & P_NOCLDWAIT))
1113 #endif /* 3839178 */
1114 ruadd(&parent->p_stats->p_cru, &child->p_ru->ru);
1115 update_rusage_info_child(&parent->p_stats->ri_child, &child->p_ru->ri);
1116 proc_unlock(parent);
1117 FREE_ZONE(child->p_ru, sizeof *child->p_ru, M_ZOMBIE);
1118 child->p_ru = NULL;
1119 } else {
1120 printf("Warning : lost p_ru for %s\n", child->p_comm);
1121 }
1122
1123 AUDIT_SESSION_PROCEXIT(child);
1124
1125 /*
1126 * Decrement the count of procs running with this uid.
1127 * p_ucred usage is safe here as it is an exited process.
1128 * and refernce is dropped after these calls down below
1129 * (locking protection is provided by list lock held in chgproccnt)
1130 */
1131 (void)chgproccnt(kauth_cred_getruid(child->p_ucred), -1);
1132
1133 #if CONFIG_LCTX
1134 ALLLCTX_LOCK;
1135 leavelctx(child);
1136 ALLLCTX_UNLOCK;
1137 #endif
1138
1139 /*
1140 * Free up credentials.
1141 */
1142 if (IS_VALID_CRED(child->p_ucred)) {
1143 kauth_cred_unref(&child->p_ucred);
1144 }
1145
1146 /* XXXX Note NOT SAFE TO USE p_ucred from this point onwards */
1147
1148 /*
1149 * Finally finished with old proc entry.
1150 * Unlink it from its process group and free it.
1151 */
1152 leavepgrp(child);
1153
1154 proc_list_lock();
1155 LIST_REMOVE(child, p_list); /* off zombproc */
1156 parent->p_childrencnt--;
1157 LIST_REMOVE(child, p_sibling);
1158 /* If there are no more children wakeup parent */
1159 if ((deadparent != 0) && (LIST_EMPTY(&parent->p_children)))
1160 wakeup((caddr_t)parent); /* with list lock held */
1161 child->p_listflag &= ~P_LIST_WAITING;
1162 wakeup(&child->p_stat);
1163
1164 /* Take it out of process hash */
1165 LIST_REMOVE(child, p_hash);
1166 child->p_listflag &= ~P_LIST_INHASH;
1167 proc_checkdeadrefs(child);
1168 nprocs--;
1169
1170 if (deadparent) {
1171 /*
1172 * If a child zombie is being reaped because its parent
1173 * is exiting, make sure we update the list flag
1174 */
1175 child->p_listflag |= P_LIST_DEADPARENT;
1176 }
1177
1178 proc_list_unlock();
1179
1180 #if CONFIG_FINE_LOCK_GROUPS
1181 lck_mtx_destroy(&child->p_mlock, proc_mlock_grp);
1182 lck_mtx_destroy(&child->p_fdmlock, proc_fdmlock_grp);
1183 #if CONFIG_DTRACE
1184 lck_mtx_destroy(&child->p_dtrace_sprlock, proc_lck_grp);
1185 #endif
1186 lck_spin_destroy(&child->p_slock, proc_slock_grp);
1187 #else /* CONFIG_FINE_LOCK_GROUPS */
1188 lck_mtx_destroy(&child->p_mlock, proc_lck_grp);
1189 lck_mtx_destroy(&child->p_fdmlock, proc_lck_grp);
1190 #if CONFIG_DTRACE
1191 lck_mtx_destroy(&child->p_dtrace_sprlock, proc_lck_grp);
1192 #endif
1193 lck_spin_destroy(&child->p_slock, proc_lck_grp);
1194 #endif /* CONFIG_FINE_LOCK_GROUPS */
1195 workqueue_destroy_lock(child);
1196
1197 FREE_ZONE(child, sizeof *child, M_PROC);
1198 if ((locked == 1) && (droplock == 0))
1199 proc_list_lock();
1200
1201 return (1);
1202 }
1203
1204
1205 int
1206 wait1continue(int result)
1207 {
1208 proc_t p;
1209 thread_t thread;
1210 uthread_t uth;
1211 struct _wait4_data *wait4_data;
1212 struct wait4_nocancel_args *uap;
1213 int *retval;
1214
1215 if (result)
1216 return(result);
1217
1218 p = current_proc();
1219 thread = current_thread();
1220 uth = (struct uthread *)get_bsdthread_info(thread);
1221
1222 wait4_data = &uth->uu_kevent.uu_wait4_data;
1223 uap = wait4_data->args;
1224 retval = wait4_data->retval;
1225 return(wait4_nocancel(p, uap, retval));
1226 }
1227
1228 int
1229 wait4(proc_t q, struct wait4_args *uap, int32_t *retval)
1230 {
1231 __pthread_testcancel(1);
1232 return(wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval));
1233 }
1234
1235 int
1236 wait4_nocancel(proc_t q, struct wait4_nocancel_args *uap, int32_t *retval)
1237 {
1238 int nfound;
1239 int sibling_count;
1240 proc_t p;
1241 int status, error;
1242 uthread_t uth;
1243 struct _wait4_data *wait4_data;
1244
1245 AUDIT_ARG(pid, uap->pid);
1246
1247 if (uap->pid == 0)
1248 uap->pid = -q->p_pgrpid;
1249
1250 loop:
1251 proc_list_lock();
1252 loop1:
1253 nfound = 0;
1254 sibling_count = 0;
1255
1256 for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) {
1257 if ( p->p_sibling.le_next != 0 )
1258 sibling_count++;
1259 if (uap->pid != WAIT_ANY &&
1260 p->p_pid != uap->pid &&
1261 p->p_pgrpid != -(uap->pid))
1262 continue;
1263
1264 nfound++;
1265
1266 /* XXX This is racy because we don't get the lock!!!! */
1267
1268 if (p->p_listflag & P_LIST_WAITING) {
1269 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
1270 goto loop1;
1271 }
1272 p->p_listflag |= P_LIST_WAITING; /* only allow single thread to wait() */
1273
1274
1275 if (p->p_stat == SZOMB) {
1276 int reparentedtoinit = (p->p_listflag & P_LIST_DEADPARENT) ? 1 : 0;
1277
1278 proc_list_unlock();
1279 #if CONFIG_MACF
1280 if ((error = mac_proc_check_wait(q, p)) != 0)
1281 goto out;
1282 #endif
1283 retval[0] = p->p_pid;
1284 if (uap->status) {
1285 /* Legacy apps expect only 8 bits of status */
1286 status = 0xffff & p->p_xstat; /* convert to int */
1287 error = copyout((caddr_t)&status,
1288 uap->status,
1289 sizeof(status));
1290 if (error)
1291 goto out;
1292 }
1293 if (uap->rusage) {
1294 if (p->p_ru == NULL) {
1295 error = ENOMEM;
1296 } else {
1297 if (IS_64BIT_PROCESS(q)) {
1298 struct user64_rusage my_rusage;
1299 munge_user64_rusage(&p->p_ru->ru, &my_rusage);
1300 error = copyout((caddr_t)&my_rusage,
1301 uap->rusage,
1302 sizeof (my_rusage));
1303 }
1304 else {
1305 struct user32_rusage my_rusage;
1306 munge_user32_rusage(&p->p_ru->ru, &my_rusage);
1307 error = copyout((caddr_t)&my_rusage,
1308 uap->rusage,
1309 sizeof (my_rusage));
1310 }
1311 }
1312 /* information unavailable? */
1313 if (error)
1314 goto out;
1315 }
1316
1317 /* Conformance change for 6577252.
1318 * When SIGCHLD is blocked and wait() returns because the status
1319 * of a child process is available and there are no other
1320 * children processes, then any pending SIGCHLD signal is cleared.
1321 */
1322 if ( sibling_count == 0 ) {
1323 int mask = sigmask(SIGCHLD);
1324 uth = current_uthread();
1325
1326 if ( (uth->uu_sigmask & mask) != 0 ) {
1327 /* we are blocking SIGCHLD signals. clear any pending SIGCHLD.
1328 * This locking looks funny but it is protecting access to the
1329 * thread via p_uthlist.
1330 */
1331 proc_lock(q);
1332 uth->uu_siglist &= ~mask; /* clear pending signal */
1333 proc_unlock(q);
1334 }
1335 }
1336
1337 /* Clean up */
1338 (void)reap_child_locked(q, p, 0, reparentedtoinit, 0, 0);
1339
1340 return (0);
1341 }
1342 if (p->p_stat == SSTOP && (p->p_lflag & P_LWAITED) == 0 &&
1343 (p->p_lflag & P_LTRACED || uap->options & WUNTRACED)) {
1344 proc_list_unlock();
1345 #if CONFIG_MACF
1346 if ((error = mac_proc_check_wait(q, p)) != 0)
1347 goto out;
1348 #endif
1349 proc_lock(p);
1350 p->p_lflag |= P_LWAITED;
1351 proc_unlock(p);
1352 retval[0] = p->p_pid;
1353 if (uap->status) {
1354 status = W_STOPCODE(p->p_xstat);
1355 error = copyout((caddr_t)&status,
1356 uap->status,
1357 sizeof(status));
1358 } else
1359 error = 0;
1360 goto out;
1361 }
1362 /*
1363 * If we are waiting for continued processses, and this
1364 * process was continued
1365 */
1366 if ((uap->options & WCONTINUED) &&
1367 (p->p_flag & P_CONTINUED)) {
1368 proc_list_unlock();
1369 #if CONFIG_MACF
1370 if ((error = mac_proc_check_wait(q, p)) != 0)
1371 goto out;
1372 #endif
1373
1374 /* Prevent other process for waiting for this event */
1375 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
1376 retval[0] = p->p_pid;
1377 if (uap->status) {
1378 status = W_STOPCODE(SIGCONT);
1379 error = copyout((caddr_t)&status,
1380 uap->status,
1381 sizeof(status));
1382 } else
1383 error = 0;
1384 goto out;
1385 }
1386 p->p_listflag &= ~P_LIST_WAITING;
1387 wakeup(&p->p_stat);
1388 }
1389 /* list lock is held when we get here any which way */
1390 if (nfound == 0) {
1391 proc_list_unlock();
1392 return (ECHILD);
1393 }
1394
1395 if (uap->options & WNOHANG) {
1396 retval[0] = 0;
1397 proc_list_unlock();
1398 return (0);
1399 }
1400
1401 /* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
1402 uth = current_uthread();
1403 wait4_data = &uth->uu_kevent.uu_wait4_data;
1404 wait4_data->args = uap;
1405 wait4_data->retval = retval;
1406
1407 if ((error = msleep0((caddr_t)q, proc_list_mlock, PWAIT | PCATCH | PDROP, "wait", 0, wait1continue)))
1408 return (error);
1409
1410 goto loop;
1411 out:
1412 proc_list_lock();
1413 p->p_listflag &= ~P_LIST_WAITING;
1414 wakeup(&p->p_stat);
1415 proc_list_unlock();
1416 return (error);
1417 }
1418
1419 #if DEBUG
1420 #define ASSERT_LCK_MTX_OWNED(lock) \
1421 lck_mtx_assert(lock, LCK_MTX_ASSERT_OWNED)
1422 #else
1423 #define ASSERT_LCK_MTX_OWNED(lock) /* nothing */
1424 #endif
1425
1426 int
1427 waitidcontinue(int result)
1428 {
1429 proc_t p;
1430 thread_t thread;
1431 uthread_t uth;
1432 struct _waitid_data *waitid_data;
1433 struct waitid_nocancel_args *uap;
1434 int *retval;
1435
1436 if (result)
1437 return (result);
1438
1439 p = current_proc();
1440 thread = current_thread();
1441 uth = (struct uthread *)get_bsdthread_info(thread);
1442
1443 waitid_data = &uth->uu_kevent.uu_waitid_data;
1444 uap = waitid_data->args;
1445 retval = waitid_data->retval;
1446 return(waitid_nocancel(p, uap, retval));
1447 }
1448
1449 /*
1450 * Description: Suspend the calling thread until one child of the process
1451 * containing the calling thread changes state.
1452 *
1453 * Parameters: uap->idtype one of P_PID, P_PGID, P_ALL
1454 * uap->id pid_t or gid_t or ignored
1455 * uap->infop Address of siginfo_t struct in
1456 * user space into which to return status
1457 * uap->options flag values
1458 *
1459 * Returns: 0 Success
1460 * !0 Error returning status to user space
1461 */
1462 int
1463 waitid(proc_t q, struct waitid_args *uap, int32_t *retval)
1464 {
1465 __pthread_testcancel(1);
1466 return (waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval));
1467 }
1468
1469 int
1470 waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap,
1471 __unused int32_t *retval)
1472 {
1473 user_siginfo_t siginfo; /* siginfo data to return to caller */
1474 boolean_t caller64 = IS_64BIT_PROCESS(q);
1475 int nfound;
1476 proc_t p;
1477 int error;
1478 uthread_t uth;
1479 struct _waitid_data *waitid_data;
1480
1481 if (uap->options == 0 ||
1482 (uap->options & ~(WNOHANG|WNOWAIT|WCONTINUED|WSTOPPED|WEXITED)))
1483 return (EINVAL); /* bits set that aren't recognized */
1484
1485 switch (uap->idtype) {
1486 case P_PID: /* child with process ID equal to... */
1487 case P_PGID: /* child with process group ID equal to... */
1488 if (((int)uap->id) < 0)
1489 return (EINVAL);
1490 break;
1491 case P_ALL: /* any child */
1492 break;
1493 }
1494
1495 loop:
1496 proc_list_lock();
1497 loop1:
1498 nfound = 0;
1499 for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) {
1500
1501 switch (uap->idtype) {
1502 case P_PID: /* child with process ID equal to... */
1503 if (p->p_pid != (pid_t)uap->id)
1504 continue;
1505 break;
1506 case P_PGID: /* child with process group ID equal to... */
1507 if (p->p_pgrpid != (pid_t)uap->id)
1508 continue;
1509 break;
1510 case P_ALL: /* any child */
1511 break;
1512 }
1513
1514 /* XXX This is racy because we don't get the lock!!!! */
1515
1516 /*
1517 * Wait collision; go to sleep and restart; used to maintain
1518 * the single return for waited process guarantee.
1519 */
1520 if (p->p_listflag & P_LIST_WAITING) {
1521 (void) msleep(&p->p_stat, proc_list_mlock,
1522 PWAIT, "waitidcoll", 0);
1523 goto loop1;
1524 }
1525 p->p_listflag |= P_LIST_WAITING; /* mark busy */
1526
1527 nfound++;
1528
1529 bzero(&siginfo, sizeof (siginfo));
1530
1531 switch (p->p_stat) {
1532 case SZOMB: /* Exited */
1533 if (!(uap->options & WEXITED))
1534 break;
1535 proc_list_unlock();
1536 #if CONFIG_MACF
1537 if ((error = mac_proc_check_wait(q, p)) != 0)
1538 goto out;
1539 #endif
1540 siginfo.si_signo = SIGCHLD;
1541 siginfo.si_pid = p->p_pid;
1542 siginfo.si_status = WEXITSTATUS(p->p_xstat);
1543 if (WIFSIGNALED(p->p_xstat)) {
1544 siginfo.si_code = WCOREDUMP(p->p_xstat) ?
1545 CLD_DUMPED : CLD_KILLED;
1546 } else
1547 siginfo.si_code = CLD_EXITED;
1548
1549 if ((error = copyoutsiginfo(&siginfo,
1550 caller64, uap->infop)) != 0)
1551 goto out;
1552
1553 /* Prevent other process for waiting for this event? */
1554 if (!(uap->options & WNOWAIT)) {
1555 (void) reap_child_locked(q, p, 0, 0, 0, 0);
1556 return (0);
1557 }
1558 goto out;
1559
1560 case SSTOP: /* Stopped */
1561 /*
1562 * If we are not interested in stopped processes, then
1563 * ignore this one.
1564 */
1565 if (!(uap->options & WSTOPPED))
1566 break;
1567
1568 /*
1569 * If someone has already waited it, we lost a race
1570 * to be the one to return status.
1571 */
1572 if ((p->p_lflag & P_LWAITED) != 0)
1573 break;
1574 proc_list_unlock();
1575 #if CONFIG_MACF
1576 if ((error = mac_proc_check_wait(q, p)) != 0)
1577 goto out;
1578 #endif
1579 siginfo.si_signo = SIGCHLD;
1580 siginfo.si_pid = p->p_pid;
1581 siginfo.si_status = p->p_xstat; /* signal number */
1582 siginfo.si_code = CLD_STOPPED;
1583
1584 if ((error = copyoutsiginfo(&siginfo,
1585 caller64, uap->infop)) != 0)
1586 goto out;
1587
1588 /* Prevent other process for waiting for this event? */
1589 if (!(uap->options & WNOWAIT)) {
1590 proc_lock(p);
1591 p->p_lflag |= P_LWAITED;
1592 proc_unlock(p);
1593 }
1594 goto out;
1595
1596 default: /* All other states => Continued */
1597 if (!(uap->options & WCONTINUED))
1598 break;
1599
1600 /*
1601 * If the flag isn't set, then this process has not
1602 * been stopped and continued, or the status has
1603 * already been reaped by another caller of waitid().
1604 */
1605 if ((p->p_flag & P_CONTINUED) == 0)
1606 break;
1607 proc_list_unlock();
1608 #if CONFIG_MACF
1609 if ((error = mac_proc_check_wait(q, p)) != 0)
1610 goto out;
1611 #endif
1612 siginfo.si_signo = SIGCHLD;
1613 siginfo.si_code = CLD_CONTINUED;
1614 proc_lock(p);
1615 siginfo.si_pid = p->p_contproc;
1616 siginfo.si_status = p->p_xstat;
1617 proc_unlock(p);
1618
1619 if ((error = copyoutsiginfo(&siginfo,
1620 caller64, uap->infop)) != 0)
1621 goto out;
1622
1623 /* Prevent other process for waiting for this event? */
1624 if (!(uap->options & WNOWAIT)) {
1625 OSBitAndAtomic(~((uint32_t)P_CONTINUED),
1626 &p->p_flag);
1627 }
1628 goto out;
1629 }
1630 ASSERT_LCK_MTX_OWNED(proc_list_mlock);
1631
1632 /* Not a process we are interested in; go on to next child */
1633
1634 p->p_listflag &= ~P_LIST_WAITING;
1635 wakeup(&p->p_stat);
1636 }
1637 ASSERT_LCK_MTX_OWNED(proc_list_mlock);
1638
1639 /* No child processes that could possibly satisfy the request? */
1640
1641 if (nfound == 0) {
1642 proc_list_unlock();
1643 return (ECHILD);
1644 }
1645
1646 if (uap->options & WNOHANG) {
1647 proc_list_unlock();
1648 #if CONFIG_MACF
1649 if ((error = mac_proc_check_wait(q, p)) != 0)
1650 return (error);
1651 #endif
1652 /*
1653 * The state of the siginfo structure in this case
1654 * is undefined. Some implementations bzero it, some
1655 * (like here) leave it untouched for efficiency.
1656 *
1657 * Thus the most portable check for "no matching pid with
1658 * WNOHANG" is to store a zero into si_pid before
1659 * invocation, then check for a non-zero value afterwards.
1660 */
1661 return (0);
1662 }
1663
1664 /* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
1665 uth = current_uthread();
1666 waitid_data = &uth->uu_kevent.uu_waitid_data;
1667 waitid_data->args = uap;
1668 waitid_data->retval = retval;
1669
1670 if ((error = msleep0(q, proc_list_mlock,
1671 PWAIT | PCATCH | PDROP, "waitid", 0, waitidcontinue)) != 0)
1672 return (error);
1673
1674 goto loop;
1675 out:
1676 proc_list_lock();
1677 p->p_listflag &= ~P_LIST_WAITING;
1678 wakeup(&p->p_stat);
1679 proc_list_unlock();
1680 return (error);
1681 }
1682
1683 /*
1684 * make process 'parent' the new parent of process 'child'.
1685 */
1686 void
1687 proc_reparentlocked(proc_t child, proc_t parent, int cansignal, int locked)
1688 {
1689 proc_t oldparent = PROC_NULL;
1690
1691 if (child->p_pptr == parent)
1692 return;
1693
1694 if (locked == 0)
1695 proc_list_lock();
1696
1697 oldparent = child->p_pptr;
1698 #if __PROC_INTERNAL_DEBUG
1699 if (oldparent == PROC_NULL)
1700 panic("proc_reparent: process %p does not have a parent\n", child);
1701 #endif
1702
1703 LIST_REMOVE(child, p_sibling);
1704 #if __PROC_INTERNAL_DEBUG
1705 if (oldparent->p_childrencnt == 0)
1706 panic("process children count already 0\n");
1707 #endif
1708 oldparent->p_childrencnt--;
1709 #if __PROC_INTERNAL_DEBUG1
1710 if (oldparent->p_childrencnt < 0)
1711 panic("process children count -ve\n");
1712 #endif
1713 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1714 parent->p_childrencnt++;
1715 child->p_pptr = parent;
1716 child->p_ppid = parent->p_pid;
1717
1718 proc_list_unlock();
1719
1720 if ((cansignal != 0) && (initproc == parent) && (child->p_stat == SZOMB))
1721 psignal(initproc, SIGCHLD);
1722 if (locked == 1)
1723 proc_list_lock();
1724 }
1725
1726 /*
1727 * Exit: deallocate address space and other resources, change proc state
1728 * to zombie, and unlink proc from allproc and parent's lists. Save exit
1729 * status and rusage for wait(). Check for child processes and orphan them.
1730 */
1731
1732 void
1733 vfork_exit(proc_t p, int rv)
1734 {
1735 vfork_exit_internal(p, rv, 0);
1736 }
1737
1738 void
1739 vfork_exit_internal(proc_t p, int rv, int forceexit)
1740 {
1741 thread_t self = current_thread();
1742 #ifdef FIXME
1743 struct task *task = p->task;
1744 #endif
1745 struct uthread *ut;
1746
1747 /*
1748 * If a thread in this task has already
1749 * called exit(), then halt any others
1750 * right here.
1751 */
1752
1753 ut = get_bsdthread_info(self);
1754
1755
1756 proc_lock(p);
1757 if ((p->p_lflag & P_LPEXIT) == P_LPEXIT) {
1758 /*
1759 * This happens when a parent exits/killed and vfork is in progress
1760 * other threads. But shutdown code for ex has already called exit1()
1761 */
1762 proc_unlock(p);
1763 return;
1764 }
1765 p->p_lflag |= (P_LEXIT | P_LPEXIT);
1766 proc_unlock(p);
1767
1768 if (forceexit == 0) {
1769 /*
1770 * parent of a vfork child has already called exit() and the
1771 * thread that has vfork in proress terminates. So there is no
1772 * separate address space here and it has already been marked for
1773 * termination. This was never covered before and could cause problems
1774 * if we block here for outside code.
1775 */
1776 /* Notify the perf server */
1777 (void)sys_perf_notify(self, p->p_pid);
1778 }
1779
1780 /*
1781 * Remove proc from allproc queue and from pidhash chain.
1782 * Need to do this before we do anything that can block.
1783 * Not doing causes things like mount() find this on allproc
1784 * in partially cleaned state.
1785 */
1786
1787 proc_list_lock();
1788
1789 #if CONFIG_MEMORYSTATUS
1790 memorystatus_remove(p, TRUE);
1791 #endif
1792
1793 LIST_REMOVE(p, p_list);
1794 LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
1795 /* will not be visible via proc_find */
1796 p->p_listflag |= P_LIST_EXITED;
1797
1798 proc_list_unlock();
1799
1800 proc_lock(p);
1801 p->p_xstat = rv;
1802 p->p_lflag &= ~(P_LTRACED | P_LPPWAIT);
1803 p->p_sigignore = ~0;
1804 proc_unlock(p);
1805
1806 proc_spinlock(p);
1807 if (thread_call_cancel(p->p_rcall))
1808 p->p_ractive--;
1809
1810 while (p->p_ractive > 0) {
1811 proc_spinunlock(p);
1812
1813 delay(1);
1814
1815 proc_spinlock(p);
1816 }
1817 proc_spinunlock(p);
1818
1819 thread_call_free(p->p_rcall);
1820 p->p_rcall = NULL;
1821
1822 ut->uu_siglist = 0;
1823
1824 vproc_exit(p);
1825 }
1826
1827 void
1828 vproc_exit(proc_t p)
1829 {
1830 proc_t q;
1831 proc_t pp;
1832
1833 vnode_t tvp;
1834 #ifdef FIXME
1835 struct task *task = p->task;
1836 #endif
1837 struct pgrp * pg;
1838 struct session *sessp;
1839 struct rusage_superset *rup;
1840
1841 /* XXX Zombie allocation may fail, in which case stats get lost */
1842 MALLOC_ZONE(rup, struct rusage_superset *,
1843 sizeof (*rup), M_ZOMBIE, M_WAITOK);
1844
1845 proc_refdrain(p);
1846
1847 /*
1848 * Close open files and release open-file table.
1849 * This may block!
1850 */
1851 fdfree(p);
1852
1853 sessp = proc_session(p);
1854 if (SESS_LEADER(p, sessp)) {
1855
1856 if (sessp->s_ttyvp != NULLVP) {
1857 struct vnode *ttyvp;
1858 int ttyvid;
1859 int cttyflag = 0;
1860 struct vfs_context context;
1861 struct tty *tp;
1862
1863 /*
1864 * Controlling process.
1865 * Signal foreground pgrp,
1866 * drain controlling terminal
1867 * and revoke access to controlling terminal.
1868 */
1869 session_lock(sessp);
1870 tp = SESSION_TP(sessp);
1871 if ((tp != TTY_NULL) && (tp->t_session == sessp)) {
1872 session_unlock(sessp);
1873
1874 /*
1875 * We're going to SIGHUP the foreground process
1876 * group. It can't change from this point on
1877 * until the revoke is complete.
1878 * The process group changes under both the tty
1879 * lock and proc_list_lock but we need only one
1880 */
1881 tty_lock(tp);
1882 ttysetpgrphup(tp);
1883 tty_unlock(tp);
1884
1885 tty_pgsignal(tp, SIGHUP, 1);
1886
1887 session_lock(sessp);
1888 tp = SESSION_TP(sessp);
1889 }
1890 cttyflag = sessp->s_flags & S_CTTYREF;
1891 sessp->s_flags &= ~S_CTTYREF;
1892 ttyvp = sessp->s_ttyvp;
1893 ttyvid = sessp->s_ttyvid;
1894 sessp->s_ttyvp = NULL;
1895 sessp->s_ttyvid = 0;
1896 sessp->s_ttyp = TTY_NULL;
1897 sessp->s_ttypgrpid = NO_PID;
1898 session_unlock(sessp);
1899
1900 if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
1901 if (tp != TTY_NULL) {
1902 tty_lock(tp);
1903 (void) ttywait(tp);
1904 tty_unlock(tp);
1905 }
1906 context.vc_thread = proc_thread(p); /* XXX */
1907 context.vc_ucred = kauth_cred_proc_ref(p);
1908 VNOP_REVOKE(ttyvp, REVOKEALL, &context);
1909 if (cttyflag) {
1910 /*
1911 * Release the extra usecount taken in cttyopen.
1912 * usecount should be released after VNOP_REVOKE is called.
1913 * This usecount was taken to ensure that
1914 * the VNOP_REVOKE results in a close to
1915 * the tty since cttyclose is a no-op.
1916 */
1917 vnode_rele(ttyvp);
1918 }
1919 vnode_put(ttyvp);
1920 kauth_cred_unref(&context.vc_ucred);
1921 ttyvp = NULLVP;
1922 }
1923 if (tp) {
1924 /*
1925 * This is cleared even if not set. This is also done in
1926 * spec_close to ensure that the flag is cleared.
1927 */
1928 tty_lock(tp);
1929 ttyclrpgrphup(tp);
1930 tty_unlock(tp);
1931
1932 ttyfree(tp);
1933 }
1934 }
1935 session_lock(sessp);
1936 sessp->s_leader = NULL;
1937 session_unlock(sessp);
1938 }
1939 session_rele(sessp);
1940
1941 pg = proc_pgrp(p);
1942 fixjobc(p, pg, 0);
1943 pg_rele(pg);
1944
1945 p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
1946
1947 proc_list_lock();
1948 proc_childdrainstart(p);
1949 while ((q = p->p_children.lh_first) != NULL) {
1950 if (q->p_stat == SZOMB) {
1951 if (p != q->p_pptr)
1952 panic("parent child linkage broken");
1953 /* check for lookups by zomb sysctl */
1954 while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
1955 msleep(&q->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
1956 }
1957 q->p_listflag |= P_LIST_WAITING;
1958 /*
1959 * This is a named reference and it is not granted
1960 * if the reap is already in progress. So we get
1961 * the reference here exclusively and their can be
1962 * no waiters. So there is no need for a wakeup
1963 * after we are done. AlsO the reap frees the structure
1964 * and the proc struct cannot be used for wakeups as well.
1965 * It is safe to use q here as this is system reap
1966 */
1967 (void)reap_child_locked(p, q, 1, 0, 1, 0);
1968 } else {
1969 /*
1970 * Traced processes are killed
1971 * since their existence means someone is messing up.
1972 */
1973 if (q->p_lflag & P_LTRACED) {
1974 struct proc *opp;
1975
1976 proc_list_unlock();
1977
1978 opp = proc_find(q->p_oppid);
1979 if (opp != PROC_NULL) {
1980 proc_list_lock();
1981 q->p_oppid = 0;
1982 proc_list_unlock();
1983 proc_reparentlocked(q, opp, 0, 0);
1984 proc_rele(opp);
1985 } else {
1986 /* original parent exited while traced */
1987 proc_list_lock();
1988 q->p_listflag |= P_LIST_DEADPARENT;
1989 q->p_oppid = 0;
1990 proc_list_unlock();
1991 proc_reparentlocked(q, initproc, 0, 0);
1992 }
1993
1994 proc_lock(q);
1995 q->p_lflag &= ~P_LTRACED;
1996
1997 if (q->sigwait_thread) {
1998 thread_t thread = q->sigwait_thread;
1999
2000 proc_unlock(q);
2001 /*
2002 * The sigwait_thread could be stopped at a
2003 * breakpoint. Wake it up to kill.
2004 * Need to do this as it could be a thread which is not
2005 * the first thread in the task. So any attempts to kill
2006 * the process would result into a deadlock on q->sigwait.
2007 */
2008 thread_resume(thread);
2009 clear_wait(thread, THREAD_INTERRUPTED);
2010 threadsignal(thread, SIGKILL, 0);
2011 } else {
2012 proc_unlock(q);
2013 }
2014
2015 psignal(q, SIGKILL);
2016 proc_list_lock();
2017 } else {
2018 q->p_listflag |= P_LIST_DEADPARENT;
2019 proc_reparentlocked(q, initproc, 0, 1);
2020 }
2021 }
2022 }
2023
2024 proc_childdrainend(p);
2025 proc_list_unlock();
2026
2027 /*
2028 * Release reference to text vnode
2029 */
2030 tvp = p->p_textvp;
2031 p->p_textvp = NULL;
2032 if (tvp != NULLVP) {
2033 vnode_rele(tvp);
2034 }
2035
2036 /*
2037 * Save exit status and final rusage info, adding in child rusage
2038 * info and self times. If we were unable to allocate a zombie
2039 * structure, this information is lost.
2040 */
2041 if (rup != NULL) {
2042 rup->ru = p->p_stats->p_ru;
2043 timerclear(&rup->ru.ru_utime);
2044 timerclear(&rup->ru.ru_stime);
2045
2046 #ifdef FIXME
2047 if (task) {
2048 mach_task_basic_info_data_t tinfo;
2049 task_thread_times_info_data_t ttimesinfo;
2050 int task_info_stuff, task_ttimes_stuff;
2051 struct timeval ut,st;
2052
2053 task_info_stuff = MACH_TASK_BASIC_INFO_COUNT;
2054 task_info(task, MACH_TASK_BASIC_INFO,
2055 &tinfo, &task_info_stuff);
2056 p->p_ru->ru.ru_utime.tv_sec = tinfo.user_time.seconds;
2057 p->p_ru->ru.ru_utime.tv_usec = tinfo.user_time.microseconds;
2058 p->p_ru->ru.ru_stime.tv_sec = tinfo.system_time.seconds;
2059 p->p_ru->ru.ru_stime.tv_usec = tinfo.system_time.microseconds;
2060
2061 task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT;
2062 task_info(task, TASK_THREAD_TIMES_INFO,
2063 &ttimesinfo, &task_ttimes_stuff);
2064
2065 ut.tv_sec = ttimesinfo.user_time.seconds;
2066 ut.tv_usec = ttimesinfo.user_time.microseconds;
2067 st.tv_sec = ttimesinfo.system_time.seconds;
2068 st.tv_usec = ttimesinfo.system_time.microseconds;
2069 timeradd(&ut,&p->p_ru->ru.ru_utime,&p->p_ru->ru.ru_utime);
2070 timeradd(&st,&p->p_ru->ru.ru_stime,&p->p_ru->ru.ru_stime);
2071 }
2072 #endif /* FIXME */
2073
2074 ruadd(&rup->ru, &p->p_stats->p_cru);
2075
2076 gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT);
2077 rup->ri.ri_phys_footprint = 0;
2078 rup->ri.ri_proc_exit_abstime = mach_absolute_time();
2079
2080 /*
2081 * Now that we have filled in the rusage info, make it
2082 * visible to an external observer via proc_pid_rusage().
2083 */
2084 p->p_ru = rup;
2085 }
2086
2087 /*
2088 * Free up profiling buffers.
2089 */
2090 {
2091 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
2092
2093 p1 = p0->pr_next;
2094 p0->pr_next = NULL;
2095 p0->pr_scale = 0;
2096
2097 for (; p1 != NULL; p1 = pn) {
2098 pn = p1->pr_next;
2099 kfree(p1, sizeof *p1);
2100 }
2101 }
2102
2103 #if PSYNCH
2104 pth_proc_hashdelete(p);
2105 #endif /* PSYNCH */
2106
2107 /*
2108 * Other substructures are freed from wait().
2109 */
2110 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
2111 p->p_stats = NULL;
2112
2113 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
2114 p->p_sigacts = NULL;
2115
2116 proc_limitdrop(p, 1);
2117 p->p_limit = NULL;
2118
2119 /*
2120 * Finish up by terminating the task
2121 * and halt this thread (only if a
2122 * member of the task exiting).
2123 */
2124 p->task = TASK_NULL;
2125
2126 /*
2127 * Notify parent that we're gone.
2128 */
2129 pp = proc_parent(p);
2130 if ((p->p_listflag & P_LIST_DEADPARENT) == 0) {
2131 if (pp != initproc) {
2132 proc_lock(pp);
2133 pp->si_pid = p->p_pid;
2134 pp->si_status = p->p_xstat;
2135 pp->si_code = CLD_EXITED;
2136 /*
2137 * p_ucred usage is safe as it is an exiting process
2138 * and reference is dropped in reap
2139 */
2140 pp->si_uid = kauth_cred_getruid(p->p_ucred);
2141 proc_unlock(pp);
2142 }
2143 /* mark as a zombie */
2144 /* mark as a zombie */
2145 /* No need to take proc lock as all refs are drained and
2146 * no one except parent (reaping ) can look at this.
2147 * The write is to an int and is coherent. Also parent is
2148 * keyed off of list lock for reaping
2149 */
2150 p->p_stat = SZOMB;
2151
2152 psignal(pp, SIGCHLD);
2153
2154 /* and now wakeup the parent */
2155 proc_list_lock();
2156 wakeup((caddr_t)pp);
2157 proc_list_unlock();
2158 } else {
2159 proc_list_lock();
2160 /* check for lookups by zomb sysctl */
2161 while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2162 msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
2163 }
2164 p->p_stat = SZOMB;
2165 p->p_listflag |= P_LIST_WAITING;
2166
2167 /*
2168 * This is a named reference and it is not granted
2169 * if the reap is already in progress. So we get
2170 * the reference here exclusively and their can be
2171 * no waiters. So there is no need for a wakeup
2172 * after we are done. AlsO the reap frees the structure
2173 * and the proc struct cannot be used for wakeups as well.
2174 * It is safe to use p here as this is system reap
2175 */
2176 (void)reap_child_locked(pp, p, 0, 0, 1, 1);
2177 /* list lock dropped by reap_child_locked */
2178 }
2179 proc_rele(pp);
2180 }
2181
2182
2183 /*
2184 * munge_rusage
2185 * LP64 support - long is 64 bits if we are dealing with a 64 bit user
2186 * process. We munge the kernel version of rusage into the
2187 * 64 bit version.
2188 */
2189 __private_extern__ void
2190 munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p)
2191 {
2192 /* timeval changes size, so utime and stime need special handling */
2193 a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
2194 a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
2195 a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
2196 a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
2197 /*
2198 * everything else can be a direct assign, since there is no loss
2199 * of precision implied boing 32->64.
2200 */
2201 a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
2202 a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
2203 a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
2204 a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
2205 a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
2206 a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
2207 a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
2208 a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
2209 a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
2210 a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
2211 a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
2212 a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
2213 a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
2214 a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
2215 }
2216
2217 /* For a 64-bit kernel and 32-bit userspace, munging may be needed */
2218 __private_extern__ void
2219 munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p)
2220 {
2221 /* timeval changes size, so utime and stime need special handling */
2222 a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
2223 a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
2224 a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
2225 a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
2226 /*
2227 * everything else can be a direct assign. We currently ignore
2228 * the loss of precision
2229 */
2230 a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
2231 a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
2232 a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
2233 a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
2234 a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
2235 a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
2236 a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
2237 a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
2238 a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
2239 a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
2240 a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
2241 a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
2242 a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
2243 a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
2244 }