]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_exit.c
xnu-1699.26.8.tar.gz
[apple/xnu.git] / bsd / kern / kern_exit.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
67 */
2d21ac55
A
68/*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
1c79356b
A
74
75#include <machine/reg.h>
76#include <machine/psl.h>
77
78#include "compat_43.h"
79
80#include <sys/param.h>
81#include <sys/systm.h>
82#include <sys/ioctl.h>
91447636 83#include <sys/proc_internal.h>
2d21ac55 84#include <sys/proc.h>
91447636 85#include <sys/kauth.h>
1c79356b
A
86#include <sys/tty.h>
87#include <sys/time.h>
88#include <sys/resource.h>
89#include <sys/kernel.h>
1c79356b 90#include <sys/wait.h>
91447636
A
91#include <sys/file_internal.h>
92#include <sys/vnode_internal.h>
1c79356b
A
93#include <sys/syslog.h>
94#include <sys/malloc.h>
95#include <sys/resourcevar.h>
96#include <sys/ptrace.h>
97#include <sys/user.h>
55e303ae 98#include <sys/aio_kern.h>
91447636
A
99#include <sys/sysproto.h>
100#include <sys/signalvar.h>
b0d623f7 101#include <sys/kdebug.h>
91447636 102#include <sys/filedesc.h> /* fdfree */
2d21ac55 103#if SYSV_SHM
91447636 104#include <sys/shm_internal.h> /* shmexit */
2d21ac55 105#endif
91447636 106#include <sys/acct.h> /* acct_process */
e5568f75 107
b0d623f7 108#include <security/audit/audit.h>
e5568f75 109#include <bsm/audit_kevents.h>
1c79356b
A
110
111#include <mach/mach_types.h>
91447636
A
112
113#include <kern/kern_types.h>
114#include <kern/kalloc.h>
115#include <kern/task.h>
1c79356b 116#include <kern/thread.h>
2d21ac55 117#include <kern/thread_call.h>
9bccf70c 118#include <kern/sched_prim.h>
1c79356b 119#include <kern/assert.h>
c331a0be
A
120#include <sys/codesign.h>
121
2d21ac55
A
122#if CONFIG_DTRACE
123/* Do not include dtrace.h, it redefines kmem_[alloc/free] */
124extern void (*dtrace_fasttrap_exit_ptr)(proc_t);
125extern void (*dtrace_helpers_cleanup)(proc_t);
126extern void dtrace_lazy_dofs_destroy(proc_t);
127
128#include <sys/dtrace_ptss.h>
129#endif
130
131#if CONFIG_MACF
132#include <security/mac.h>
133#include <sys/syscall.h>
9bccf70c 134#endif
1c79356b 135
91447636
A
136#include <mach/mach_types.h>
137#include <mach/task.h>
138#include <mach/thread_act.h>
91447636 139
2d21ac55
A
140#include <sys/sdt.h>
141
1c79356b 142extern char init_task_failure_data[];
2d21ac55
A
143void proc_prepareexit(proc_t p, int rv);
144void vfork_exit(proc_t p, int rv);
145void vproc_exit(proc_t p);
b0d623f7
A
146__private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p);
147__private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p);
2d21ac55 148static int reap_child_locked(proc_t parent, proc_t child, int deadparent, int locked, int droplock);
91447636
A
149
150/*
151 * Things which should have prototypes in headers, but don't
152 */
91447636 153void *get_bsduthreadarg(thread_t);
2d21ac55 154void proc_exit(proc_t p);
91447636
A
155int wait1continue(int result);
156int waitidcontinue(int result);
157int *get_bsduthreadrval(thread_t);
2d21ac55
A
158kern_return_t sys_perf_notify(thread_t thread, int pid);
159kern_return_t abnormal_exit_notify(mach_exception_data_type_t code,
160 mach_exception_data_type_t subcode);
2d21ac55
A
161void delay(int);
162
91447636
A
163/*
164 * NOTE: Source and target may *NOT* overlap!
165 * XXX Should share code with bsd/dev/ppc/unix_signal.c
166 */
167static void
b0d623f7 168siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out)
91447636
A
169{
170 out->si_signo = in->si_signo;
171 out->si_errno = in->si_errno;
172 out->si_code = in->si_code;
173 out->si_pid = in->si_pid;
174 out->si_uid = in->si_uid;
175 out->si_status = in->si_status;
b0d623f7 176 out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_addr);
91447636 177 /* following cast works for sival_int because of padding */
b0d623f7
A
178 out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_value.sival_ptr);
179 out->si_band = in->si_band; /* range reduction */
180}
181
182static void
183siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out)
184{
185 out->si_signo = in->si_signo;
186 out->si_errno = in->si_errno;
187 out->si_code = in->si_code;
188 out->si_pid = in->si_pid;
189 out->si_uid = in->si_uid;
190 out->si_status = in->si_status;
191 out->si_addr = in->si_addr;
192 /* following cast works for sival_int because of padding */
193 out->si_value.sival_ptr = in->si_value.sival_ptr;
91447636 194 out->si_band = in->si_band; /* range reduction */
91447636 195}
1c79356b
A
196
197/*
198 * exit --
199 * Death of process.
200 */
1c79356b 201void
2d21ac55 202exit(proc_t p, struct exit_args *uap, int *retval)
1c79356b 203{
0b4e3aa0 204 exit1(p, W_EXITCODE(uap->rval, 0), retval);
1c79356b 205
9bccf70c 206 /* drop funnel before we return */
1c79356b
A
207 thread_exception_return();
208 /* NOTREACHED */
209 while (TRUE)
9bccf70c 210 thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
211 /* NOTREACHED */
212}
213
214/*
215 * Exit: deallocate address space and other resources, change proc state
216 * to zombie, and unlink proc from allproc and parent's lists. Save exit
217 * status and rusage for wait(). Check for child processes and orphan them.
218 */
0b4e3aa0 219int
2d21ac55 220exit1(proc_t p, int rv, int *retval)
1c79356b 221{
91447636 222 thread_t self = current_thread();
1c79356b 223 struct task *task = p->task;
1c79356b
A
224 struct uthread *ut;
225
226 /*
227 * If a thread in this task has already
228 * called exit(), then halt any others
229 * right here.
230 */
0b4e3aa0 231
55e303ae 232 ut = get_bsdthread_info(self);
91447636
A
233 if (ut->uu_flag & UT_VFORK) {
234 vfork_exit(p, rv);
2d21ac55 235 vfork_return(p , retval, p->p_pid);
0b4e3aa0
A
236 unix_syscall_return(0);
237 /* NOT REACHED */
238 }
2d21ac55
A
239
240 /*
241 * The parameter list of audit_syscall_exit() was augmented to
242 * take the Darwin syscall number as the first parameter,
243 * which is currently required by mac_audit_postselect().
244 */
245
b0d623f7
A
246 /*
247 * The BSM token contains two components: an exit status as passed
248 * to exit(), and a return value to indicate what sort of exit it
249 * was. The exit status is WEXITSTATUS(rv), but it's not clear
250 * what the return value is.
251 */
252 AUDIT_ARG(exit, WEXITSTATUS(rv), 0);
2d21ac55
A
253 AUDIT_SYSCALL_EXIT(SYS_exit, p, ut, 0); /* Exit is always successfull */
254
255 DTRACE_PROC1(exit, int, CLD_EXITED);
256
257 proc_lock(p);
6d2010ae 258 proc_transstart(p, 1);
1c79356b
A
259 while (p->exit_thread != self) {
260 if (sig_try_locked(p) <= 0) {
6d2010ae 261 proc_transend(p, 1);
55e303ae 262 if (get_threadtask(self) != task) {
2d21ac55 263 proc_unlock(p);
0b4e3aa0 264 return(0);
1c79356b 265 }
2d21ac55 266 proc_unlock(p);
55e303ae 267 thread_terminate(self);
1c79356b
A
268 thread_exception_return();
269 /* NOTREACHED */
270 }
271 sig_lock_to_exit(p);
272 }
4a3eedf9 273 if (p == initproc) {
2d21ac55 274 proc_unlock(p);
1c79356b
A
275 printf("pid 1 exited (signal %d, exit %d)",
276 WTERMSIG(rv), WEXITSTATUS(rv));
2d21ac55
A
277 panic("%s died\nState at Last Exception:\n\n%s",
278 (p->p_comm[0] != '\0' ?
279 p->p_comm :
280 "launchd"),
1c79356b
A
281 init_task_failure_data);
282 }
283
2d21ac55 284 p->p_lflag |= P_LEXIT;
1c79356b
A
285 p->p_xstat = rv;
286
6d2010ae 287 proc_transend(p, 1);
2d21ac55
A
288 proc_unlock(p);
289
290 proc_prepareexit(p, rv);
291
6d2010ae 292 /* Last thread to terminate will call proc_exit() */
1c79356b
A
293 task_terminate_internal(task);
294
0b4e3aa0 295 return(0);
1c79356b
A
296}
297
298void
2d21ac55 299proc_prepareexit(proc_t p, int rv)
1c79356b 300{
2d21ac55 301 mach_exception_data_type_t code, subcode;
1c79356b 302 struct uthread *ut;
91447636 303 thread_t self = current_thread();
2d21ac55 304 ut = get_bsdthread_info(self);
55e303ae 305
2d21ac55 306 /* If a core should be generated, notify crash reporter */
c331a0be 307 if (hassigprop(WTERMSIG(rv), SA_CORE) || ((p->p_csflags & CS_KILLED) != 0)) {
2d21ac55
A
308 /*
309 * Workaround for processes checking up on PT_DENY_ATTACH:
310 * should be backed out post-Leopard (details in 5431025).
311 */
312 if ((SIGSEGV == WTERMSIG(rv)) &&
313 (p->p_pptr->p_lflag & P_LNOATTACH)) {
314 goto skipcheck;
315 }
316
317 /*
318 * Crash Reporter looks for the signal value, original exception
319 * type, and low 20 bits of the original code in code[0]
320 * (8, 4, and 20 bits respectively). code[1] is unmodified.
321 */
322 code = ((WTERMSIG(rv) & 0xff) << 24) |
323 ((ut->uu_exception & 0x0f) << 20) |
324 ((int)ut->uu_code & 0xfffff);
325 subcode = ut->uu_subcode;
326 (void) abnormal_exit_notify(code, subcode);
327 }
328
329skipcheck:
91447636 330 /* Notify the perf server */
2d21ac55 331 (void)sys_perf_notify(self, p->p_pid);
1c79356b 332
1c79356b
A
333 /*
334 * Remove proc from allproc queue and from pidhash chain.
335 * Need to do this before we do anything that can block.
336 * Not doing causes things like mount() find this on allproc
337 * in partially cleaned state.
338 */
2d21ac55
A
339
340 proc_list_lock();
341
1c79356b 342 LIST_REMOVE(p, p_list);
55e303ae 343 LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
2d21ac55
A
344 /* will not be visible via proc_find */
345 p->p_listflag |= P_LIST_EXITED;
346
347 proc_list_unlock();
348
1c79356b
A
349
350#ifdef PGINPROF
351 vmsizmon();
352#endif
353 /*
354 * If parent is waiting for us to exit or exec,
2d21ac55 355 * P_LPPWAIT is set; we will wakeup the parent below.
1c79356b 356 */
2d21ac55
A
357 proc_lock(p);
358 p->p_lflag &= ~(P_LTRACED | P_LPPWAIT);
91447636 359 p->p_sigignore = ~(sigcantmask);
9bccf70c 360 ut->uu_siglist = 0;
2d21ac55 361 proc_unlock(p);
1c79356b
A
362}
363
364void
2d21ac55 365proc_exit(proc_t p)
1c79356b 366{
2d21ac55
A
367 proc_t q;
368 proc_t pp;
1c79356b 369 struct task *task = p->task;
2d21ac55
A
370 vnode_t tvp = NULLVP;
371 struct pgrp * pg;
372 struct session *sessp;
373 struct uthread * uth;
b0d623f7
A
374 pid_t pid;
375 int exitval;
1c79356b 376
2d21ac55
A
377 uth = (struct uthread *)get_bsdthread_info(current_thread());
378
379 proc_lock(p);
6d2010ae 380 proc_transstart(p, 1);
2d21ac55 381 if( !(p->p_lflag & P_LEXIT)) {
6d2010ae
A
382 /*
383 * This can happen if a thread_terminate() occurs
384 * in a single-threaded process.
385 */
2d21ac55 386 p->p_lflag |= P_LEXIT;
6d2010ae 387 proc_transend(p, 1);
2d21ac55
A
388 proc_unlock(p);
389 proc_prepareexit(p, 0);
6d2010ae 390 (void) task_terminate_internal(task);
2d21ac55 391 proc_lock(p);
6d2010ae
A
392 } else {
393 proc_transend(p, 1);
1c79356b
A
394 }
395
91447636 396 p->p_lflag |= P_LPEXIT;
6d2010ae
A
397
398 /*
399 * Other kernel threads may be in the middle of signalling this process.
400 * Wait for those threads to wrap it up before making the process
401 * disappear on them.
402 */
403 if ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 0)) {
404 p->p_sigwaitcnt++;
405 while ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 1))
406 msleep(&p->p_sigmask, &p->p_mlock, PWAIT, "proc_sigdrain", NULL);
407 p->p_sigwaitcnt--;
408 }
409
2d21ac55 410 proc_unlock(p);
b0d623f7
A
411 pid = p->p_pid;
412 exitval = p->p_xstat;
413 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_START,
414 pid, exitval, 0, 0, 0);
2d21ac55
A
415
416#if CONFIG_DTRACE
417 /*
418 * Free any outstanding lazy dof entries. It is imperative we
419 * always call dtrace_lazy_dofs_destroy, rather than null check
420 * and call if !NULL. If we NULL test, during lazy dof faulting
421 * we can race with the faulting code and proceed from here to
422 * beyond the helpers cleanup. The lazy dof faulting will then
423 * install new helpers which will never be cleaned up, and leak.
424 */
425 dtrace_lazy_dofs_destroy(p);
426
427 /*
428 * Clean up any DTrace helper actions or probes for the process.
429 */
430 if (p->p_dtrace_helpers != NULL) {
431 (*dtrace_helpers_cleanup)(p);
432 }
433
434 /*
435 * Clean up any DTrace probes associated with this process.
436 */
437 /*
438 * APPLE NOTE: We release ptss pages/entries in dtrace_fasttrap_exit_ptr(),
439 * call this after dtrace_helpers_cleanup()
440 */
441 proc_lock(p);
442 if (p->p_dtrace_probes && dtrace_fasttrap_exit_ptr) {
443 (*dtrace_fasttrap_exit_ptr)(p);
444 }
445 proc_unlock(p);
446#endif
447
91447636 448 /* XXX Zombie allocation may fail, in which case stats get lost */
1c79356b
A
449 MALLOC_ZONE(p->p_ru, struct rusage *,
450 sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
451
6d2010ae
A
452 nspace_proc_exit(p);
453
55e303ae
A
454 /*
455 * need to cancel async IO requests that can be cancelled and wait for those
456 * already active. MAY BLOCK!
457 */
ff6e181a 458
2d21ac55
A
459 proc_refdrain(p);
460
461 workqueue_exit(p);
ff6e181a 462
55e303ae
A
463 _aio_exit( p );
464
1c79356b
A
465 /*
466 * Close open files and release open-file table.
467 * This may block!
468 */
469 fdfree(p);
470
593a1d5f
A
471 if (uth->uu_lowpri_window) {
472 /*
473 * task is marked as a low priority I/O type
b0d623f7 474 * and the I/O we issued while in flushing files on close
593a1d5f
A
475 * collided with normal I/O operations...
476 * no need to throttle this thread since its going away
477 * but we do need to update our bookeeping w/r to throttled threads
478 */
479 throttle_lowpri_io(FALSE);
480 }
481
2d21ac55 482#if SYSV_SHM
1c79356b
A
483 /* Close ref SYSV Shared memory*/
484 if (p->vm_shm)
485 shmexit(p);
2d21ac55
A
486#endif
487#if SYSV_SEM
9bccf70c
A
488 /* Release SYSV semaphores */
489 semexit(p);
2d21ac55 490#endif
1c79356b 491
b0d623f7
A
492#if PSYNCH
493 pth_proc_hashdelete(p);
494#endif /* PSYNCH */
495
2d21ac55
A
496 sessp = proc_session(p);
497 if (SESS_LEADER(p, sessp)) {
498
2d21ac55 499 if (sessp->s_ttyvp != NULLVP) {
fa4905b1 500 struct vnode *ttyvp;
2d21ac55 501 int ttyvid;
91447636 502 struct vfs_context context;
2d21ac55
A
503 struct tty * tp;
504
fa4905b1 505
1c79356b
A
506 /*
507 * Controlling process.
508 * Signal foreground pgrp,
509 * drain controlling terminal
510 * and revoke access to controlling terminal.
511 */
b0d623f7 512 tp = SESSION_TP(sessp);
2d21ac55
A
513
514 if ((tp != TTY_NULL) && (tp->t_session == sessp)) {
515 tty_pgsignal(tp, SIGHUP, 1);
2d21ac55
A
516
517 session_lock(sessp);
b0d623f7
A
518 /* reget potentially tp due to revocation */
519 tp = SESSION_TP(sessp);
2d21ac55
A
520 ttyvp = sessp->s_ttyvp;
521 ttyvid = sessp->s_ttyvid;
b0d623f7 522 sessp->s_ttyvp = NULLVP;
2d21ac55 523 sessp->s_ttyvid = 0;
b0d623f7 524 sessp->s_ttyp = TTY_NULL;
2d21ac55
A
525 sessp->s_ttypgrpid = NO_PID;
526 session_unlock(sessp);
527
528 if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
b0d623f7
A
529
530 if (tp != TTY_NULL) {
531 tty_lock(tp);
532 (void) ttywait(tp);
533 tty_unlock(tp);
534 }
2d21ac55
A
535 context.vc_thread = proc_thread(p); /* XXX */
536 context.vc_ucred = kauth_cred_proc_ref(p);
537 VNOP_REVOKE(ttyvp, REVOKEALL, &context);
538 vnode_put(ttyvp);
539 kauth_cred_unref(&context.vc_ucred);
540 }
541 } else {
542 session_lock(sessp);
b0d623f7
A
543 /* reget potentially tp due to revocation */
544 tp = SESSION_TP(sessp);
2d21ac55 545 ttyvp = sessp->s_ttyvp;
b0d623f7 546 sessp->s_ttyvp = NULLVP;
2d21ac55 547 sessp->s_ttyvid = 0;
b0d623f7 548 sessp->s_ttyp = TTY_NULL;
2d21ac55
A
549 sessp->s_ttypgrpid = NO_PID;
550 session_unlock(sessp);
1c79356b 551 }
2d21ac55 552 if (ttyvp)
91447636 553 vnode_rele(ttyvp);
1c79356b
A
554 /*
555 * s_ttyp is not zero'd; we use this to indicate
556 * that the session once had a controlling terminal.
557 * (for logging and informational purposes)
558 */
559 }
2d21ac55 560
2d21ac55
A
561 session_lock(sessp);
562 sessp->s_leader = NULL;
563 session_unlock(sessp);
1c79356b 564 }
2d21ac55
A
565 session_rele(sessp);
566
567 pg = proc_pgrp(p);
568 fixjobc(p, pg, 0);
569 pg_rele(pg);
1c79356b 570
1c79356b 571 p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
9bccf70c
A
572 (void)acct_process(p);
573
2d21ac55 574 proc_list_lock();
b0d623f7
A
575
576 if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) {
577 p->p_listflag &= ~P_LIST_EXITCOUNT;
578 proc_shutdown_exitcount--;
579 if (proc_shutdown_exitcount == 0)
580 wakeup(&proc_shutdown_exitcount);
581 }
582
2d21ac55
A
583 /* wait till parentrefs are dropped and grant no more */
584 proc_childdrainstart(p);
585 while ((q = p->p_children.lh_first) != NULL) {
586 q->p_listflag |= P_LIST_DEADPARENT;
587 if (q->p_stat == SZOMB) {
588 if (p != q->p_pptr)
589 panic("parent child linkage broken");
590 /* check for sysctl zomb lookup */
591 while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
592 msleep(&q->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
593 }
594 q->p_listflag |= P_LIST_WAITING;
595 /*
596 * This is a named reference and it is not granted
597 * if the reap is already in progress. So we get
598 * the reference here exclusively and their can be
599 * no waiters. So there is no need for a wakeup
6d2010ae 600 * after we are done. Also the reap frees the structure
2d21ac55
A
601 * and the proc struct cannot be used for wakeups as well.
602 * It is safe to use q here as this is system reap
603 */
604 (void)reap_child_locked(p, q, 1, 1, 0);
605 } else {
606 proc_reparentlocked(q, initproc, 0, 1);
607 /*
608 * Traced processes are killed
609 * since their existence means someone is messing up.
610 */
611 if (q->p_lflag & P_LTRACED) {
6d2010ae
A
612 /*
613 * Take a reference on the child process to
614 * ensure it doesn't exit and disappear between
615 * the time we drop the list_lock and attempt
616 * to acquire its proc_lock.
617 */
618 if (proc_ref_locked(q) != q)
619 continue;
620
2d21ac55
A
621 proc_list_unlock();
622 proc_lock(q);
623 q->p_lflag &= ~P_LTRACED;
624 if (q->sigwait_thread) {
6d2010ae
A
625 thread_t thread = q->sigwait_thread;
626
2d21ac55
A
627 proc_unlock(q);
628 /*
629 * The sigwait_thread could be stopped at a
630 * breakpoint. Wake it up to kill.
631 * Need to do this as it could be a thread which is not
632 * the first thread in the task. So any attempts to kill
633 * the process would result into a deadlock on q->sigwait.
634 */
6d2010ae
A
635 thread_resume(thread);
636 clear_wait(thread, THREAD_INTERRUPTED);
637 threadsignal(thread, SIGKILL, 0);
638 } else {
2d21ac55 639 proc_unlock(q);
6d2010ae
A
640 }
641
2d21ac55
A
642 psignal(q, SIGKILL);
643 proc_list_lock();
6d2010ae 644 proc_rele_locked(q);
1c79356b 645 }
1c79356b
A
646 }
647 }
648
2d21ac55
A
649 proc_childdrainend(p);
650 proc_list_unlock();
651
652 /*
653 * Release reference to text vnode
654 */
655 tvp = p->p_textvp;
656 p->p_textvp = NULL;
657 if (tvp != NULLVP) {
658 vnode_rele(tvp);
659 }
660
1c79356b
A
661 /*
662 * Save exit status and final rusage info, adding in child rusage
91447636
A
663 * info and self times. If we were unable to allocate a zombie
664 * structure, this information is lost.
1c79356b 665 */
2d21ac55 666 /* No need for locking here as no one than this thread can access this */
91447636 667 if (p->p_ru != NULL) {
6d2010ae 668 calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, NULL);
91447636 669 *p->p_ru = p->p_stats->p_ru;
1c79356b 670
91447636
A
671 ruadd(p->p_ru, &p->p_stats->p_cru);
672 }
1c79356b
A
673
674 /*
675 * Free up profiling buffers.
676 */
677 {
678 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
679
680 p1 = p0->pr_next;
681 p0->pr_next = NULL;
682 p0->pr_scale = 0;
683
684 for (; p1 != NULL; p1 = pn) {
685 pn = p1->pr_next;
91447636 686 kfree(p1, sizeof *p1);
1c79356b
A
687 }
688 }
689
2d21ac55
A
690 proc_spinlock(p);
691 if (thread_call_cancel(p->p_rcall))
692 p->p_ractive--;
693
694 while (p->p_ractive > 0) {
695 proc_spinunlock(p);
696
697 delay(1);
698
699 proc_spinlock(p);
700 }
701 proc_spinunlock(p);
702
703 thread_call_free(p->p_rcall);
704 p->p_rcall = NULL;
705
1c79356b
A
706 /*
707 * Other substructures are freed from wait().
708 */
2d21ac55 709 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
1c79356b
A
710 p->p_stats = NULL;
711
2d21ac55 712 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
1c79356b
A
713 p->p_sigacts = NULL;
714
2d21ac55 715 proc_limitdrop(p, 1);
1c79356b
A
716 p->p_limit = NULL;
717
2d21ac55 718
1c79356b
A
719 /*
720 * Finish up by terminating the task
721 * and halt this thread (only if a
722 * member of the task exiting).
723 */
724 p->task = TASK_NULL;
1c79356b
A
725 set_bsdtask_info(task, NULL);
726
6d2010ae
A
727 /* exit status will be seen by parent process */
728 proc_knote(p, NOTE_EXIT | (p->p_xstat & 0xffff));
55e303ae 729
2d21ac55
A
730 /* mark the thread as the one that is doing proc_exit
731 * no need to hold proc lock in uthread_free
732 */
733 uth->uu_flag |= UT_PROCEXIT;
1c79356b
A
734 /*
735 * Notify parent that we're gone.
736 */
2d21ac55
A
737 pp = proc_parent(p);
738 if (pp->p_flag & P_NOCLDWAIT) {
9bccf70c 739
2d21ac55
A
740#if 3839178
741 /*
742 * If the parent is ignoring SIGCHLD, then POSIX requires
743 * us to not add the resource usage to the parent process -
744 * we are only going to hand it off to init to get reaped.
745 * We should contest the standard in this case on the basis
746 * of RLIMIT_CPU.
747 */
748#else /* !3839178 */
55e303ae
A
749 /*
750 * Add child resource usage to parent before giving
91447636
A
751 * zombie to init. If we were unable to allocate a
752 * zombie structure, this information is lost.
55e303ae 753 */
2d21ac55
A
754 if (p->p_ru != NULL) {
755 proc_lock(pp);
756 ruadd(&pp->p_stats->p_cru, p->p_ru);
757 proc_unlock(pp);
758 }
759#endif /* !3839178 */
55e303ae 760
2d21ac55
A
761 /* kernel can reap this one, no need to move it to launchd */
762 proc_list_lock();
763 p->p_listflag |= P_LIST_DEADPARENT;
764 proc_list_unlock();
9bccf70c 765 }
2d21ac55
A
766 if ((p->p_listflag & P_LIST_DEADPARENT) == 0) {
767 if (pp != initproc) {
768 proc_lock(pp);
769 pp->si_pid = p->p_pid;
770 pp->si_status = p->p_xstat;
771 pp->si_code = CLD_EXITED;
772 /*
773 * p_ucred usage is safe as it is an exiting process
774 * and reference is dropped in reap
775 */
6d2010ae 776 pp->si_uid = kauth_cred_getruid(p->p_ucred);
2d21ac55
A
777 proc_unlock(pp);
778 }
779 /* mark as a zombie */
780 /* No need to take proc lock as all refs are drained and
781 * no one except parent (reaping ) can look at this.
782 * The write is to an int and is coherent. Also parent is
783 * keyed off of list lock for reaping
784 */
b0d623f7
A
785 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
786 pid, exitval, 0, 0, 0);
2d21ac55
A
787 p->p_stat = SZOMB;
788 /*
789 * The current process can be reaped so, no one
790 * can depend on this
791 */
1c79356b 792
2d21ac55
A
793 psignal(pp, SIGCHLD);
794
795 /* and now wakeup the parent */
796 proc_list_lock();
797 wakeup((caddr_t)pp);
798 proc_list_unlock();
799 } else {
800 /* should be fine as parent proc would be initproc */
801 /* mark as a zombie */
802 /* No need to take proc lock as all refs are drained and
803 * no one except parent (reaping ) can look at this.
804 * The write is to an int and is coherent. Also parent is
805 * keyed off of list lock for reaping
806 */
807 proc_list_lock();
b0d623f7
A
808 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
809 pid, exitval, 0, 0, 0);
2d21ac55
A
810 /* check for sysctl zomb lookup */
811 while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
812 msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
813 }
814 /* safe to use p as this is a system reap */
d1ecb069 815 p->p_stat = SZOMB;
2d21ac55 816 p->p_listflag |= P_LIST_WAITING;
d1ecb069 817
2d21ac55
A
818 /*
819 * This is a named reference and it is not granted
820 * if the reap is already in progress. So we get
821 * the reference here exclusively and their can be
822 * no waiters. So there is no need for a wakeup
823 * after we are done. AlsO the reap frees the structure
824 * and the proc struct cannot be used for wakeups as well.
825 * It is safe to use p here as this is system reap
826 */
827 (void)reap_child_locked(pp, p, 1, 1, 1);
828 /* list lock dropped by reap_child_locked */
829 }
593a1d5f
A
830 if (uth->uu_lowpri_window) {
831 /*
832 * task is marked as a low priority I/O type and we've
833 * somehow picked up another throttle during exit processing...
834 * no need to throttle this thread since its going away
835 * but we do need to update our bookeeping w/r to throttled threads
836 */
837 throttle_lowpri_io(FALSE);
838 }
91447636 839
2d21ac55 840 proc_rele(pp);
1c79356b 841
1c79356b
A
842}
843
844
91447636 845/*
2d21ac55 846 * reap_child_locked
91447636
A
847 *
848 * Description: Given a process from which all status information needed
849 * has already been extracted, if the process is a ptrace
850 * attach process, detach it and give it back to its real
851 * parent, else recover all resources remaining associated
852 * with it.
853 *
2d21ac55
A
854 * Parameters: proc_t parent Parent of process being reaped
855 * proc_t child Process to reap
91447636
A
856 *
857 * Returns: 0 Process was not reaped because it
858 * came from an attach
859 * 1 Process was reaped
860 */
861static int
2d21ac55 862reap_child_locked(proc_t parent, proc_t child, int deadparent, int locked, int droplock)
1c79356b 863{
b0d623f7 864 proc_t trace_parent = PROC_NULL; /* Traced parent process, if tracing */
1c79356b 865
b0d623f7
A
866 if (locked == 1)
867 proc_list_unlock();
868
91447636
A
869 /*
870 * If we got the child via a ptrace 'attach',
871 * we need to give it back to the old parent.
b0d623f7
A
872 *
873 * Exception: someone who has been reparented to launchd before being
874 * ptraced can simply be reaped, refer to radar 5677288
875 * p_oppid -> ptraced
876 * trace_parent == initproc -> away from launchd
877 * P_LIST_DEADPARENT -> came to launchd by reparenting
91447636 878 */
b0d623f7
A
879 if (child->p_oppid && (trace_parent = proc_find(child->p_oppid))
880 && !((trace_parent == initproc) && (child->p_lflag & P_LIST_DEADPARENT))) {
2d21ac55 881 proc_lock(child);
91447636 882 child->p_oppid = 0;
2d21ac55 883 proc_unlock(child);
91447636 884 if (trace_parent != initproc) {
2d21ac55
A
885 /*
886 * proc internal fileds and p_ucred usage safe
887 * here as child is dead and is not reaped or
888 * reparented yet
889 */
890 proc_lock(trace_parent);
91447636
A
891 trace_parent->si_pid = child->p_pid;
892 trace_parent->si_status = child->p_xstat;
893 trace_parent->si_code = CLD_CONTINUED;
6d2010ae 894 trace_parent->si_uid = kauth_cred_getruid(child->p_ucred);
2d21ac55 895 proc_unlock(trace_parent);
91447636 896 }
2d21ac55 897 proc_reparentlocked(child, trace_parent, 1, 0);
91447636 898 psignal(trace_parent, SIGCHLD);
2d21ac55 899 proc_list_lock();
91447636 900 wakeup((caddr_t)trace_parent);
2d21ac55
A
901 child->p_listflag &= ~P_LIST_WAITING;
902 wakeup(&child->p_stat);
903 proc_list_unlock();
904 proc_rele(trace_parent);
905 if ((locked == 1) && (droplock == 0))
906 proc_list_lock();
91447636
A
907 return (0);
908 }
b0d623f7
A
909
910 if (trace_parent != PROC_NULL) {
911 proc_rele(trace_parent);
912 }
913
2d21ac55 914 proc_knote(child, NOTE_REAP);
b0d623f7 915 proc_knote_drain(child);
2d21ac55 916
91447636
A
917 child->p_xstat = 0;
918 if (child->p_ru) {
2d21ac55
A
919 proc_lock(parent);
920#if 3839178
921 /*
922 * If the parent is ignoring SIGCHLD, then POSIX requires
923 * us to not add the resource usage to the parent process -
924 * we are only going to hand it off to init to get reaped.
925 * We should contest the standard in this case on the basis
926 * of RLIMIT_CPU.
927 */
928 if (!(parent->p_flag & P_NOCLDWAIT))
929#endif /* 3839178 */
930 ruadd(&parent->p_stats->p_cru, child->p_ru);
931 proc_unlock(parent);
91447636
A
932 FREE_ZONE(child->p_ru, sizeof *child->p_ru, M_ZOMBIE);
933 child->p_ru = NULL;
934 } else {
935 printf("Warning : lost p_ru for %s\n", child->p_comm);
936 }
1c79356b 937
6d2010ae 938 AUDIT_SESSION_PROCEXIT(child);
b0d623f7 939
91447636
A
940 /*
941 * Decrement the count of procs running with this uid.
2d21ac55
A
942 * p_ucred usage is safe here as it is an exited process.
943 * and refernce is dropped after these calls down below
944 * (locking protection is provided by list lock held in chgproccnt)
91447636 945 */
6d2010ae 946 (void)chgproccnt(kauth_cred_getruid(child->p_ucred), -1);
1c79356b 947
2d21ac55
A
948#if CONFIG_LCTX
949 ALLLCTX_LOCK;
950 leavelctx(child);
951 ALLLCTX_UNLOCK;
952#endif
953
91447636
A
954 /*
955 * Free up credentials.
956 */
0c530ab8
A
957 if (IS_VALID_CRED(child->p_ucred)) {
958 kauth_cred_unref(&child->p_ucred);
959 }
1c79356b 960
2d21ac55
A
961 /* XXXX Note NOT SAFE TO USE p_ucred from this point onwards */
962
91447636
A
963 /*
964 * Finally finished with old proc entry.
965 * Unlink it from its process group and free it.
966 */
967 leavepgrp(child);
2d21ac55
A
968
969 proc_list_lock();
91447636 970 LIST_REMOVE(child, p_list); /* off zombproc */
2d21ac55 971 parent->p_childrencnt--;
91447636 972 LIST_REMOVE(child, p_sibling);
2d21ac55
A
973 /* If there are no more children wakeup parent */
974 if ((deadparent != 0) && (LIST_EMPTY(&parent->p_children)))
975 wakeup((caddr_t)parent); /* with list lock held */
976 child->p_listflag &= ~P_LIST_WAITING;
b36670ce 977 wakeup(&child->p_stat);
1c79356b 978
2d21ac55
A
979 /* Take it out of process hash */
980 LIST_REMOVE(child, p_hash);
981 child->p_listflag &= ~P_LIST_INHASH;
982 proc_checkdeadrefs(child);
983 nprocs--;
984
985 proc_list_unlock();
986
6d2010ae 987#if CONFIG_FINE_LOCK_GROUPS
b0d623f7
A
988 lck_mtx_destroy(&child->p_mlock, proc_mlock_grp);
989 lck_mtx_destroy(&child->p_fdmlock, proc_fdmlock_grp);
990#if CONFIG_DTRACE
991 lck_mtx_destroy(&child->p_dtrace_sprlock, proc_lck_grp);
992#endif
993 lck_spin_destroy(&child->p_slock, proc_slock_grp);
6d2010ae
A
994#else /* CONFIG_FINE_LOCK_GROUPS */
995 lck_mtx_destroy(&child->p_mlock, proc_lck_grp);
996 lck_mtx_destroy(&child->p_fdmlock, proc_lck_grp);
997#if CONFIG_DTRACE
998 lck_mtx_destroy(&child->p_dtrace_sprlock, proc_lck_grp);
b0d623f7 999#endif
6d2010ae
A
1000 lck_spin_destroy(&child->p_slock, proc_lck_grp);
1001#endif /* CONFIG_FINE_LOCK_GROUPS */
2d21ac55
A
1002 workqueue_destroy_lock(child);
1003
91447636 1004 FREE_ZONE(child, sizeof *child, M_PROC);
2d21ac55
A
1005 if ((locked == 1) && (droplock == 0))
1006 proc_list_lock();
1007
91447636 1008 return (1);
1c79356b
A
1009}
1010
1c79356b
A
1011
1012int
91447636 1013wait1continue(int result)
1c79356b 1014{
7b1edb79 1015 void *vt;
91447636 1016 thread_t thread;
7b1edb79 1017 int *retval;
2d21ac55 1018 proc_t p;
1c79356b 1019
7b1edb79
A
1020 if (result)
1021 return(result);
1c79356b 1022
7b1edb79 1023 p = current_proc();
91447636
A
1024 thread = current_thread();
1025 vt = get_bsduthreadarg(thread);
1026 retval = get_bsduthreadrval(thread);
2d21ac55 1027 return(wait4(p, (struct wait4_args *)vt, retval));
1c79356b
A
1028}
1029
1030int
b0d623f7 1031wait4(proc_t q, struct wait4_args *uap, int32_t *retval)
1c79356b 1032{
2d21ac55
A
1033 __pthread_testcancel(1);
1034 return(wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval));
1035}
1036
1037int
b0d623f7 1038wait4_nocancel(proc_t q, struct wait4_nocancel_args *uap, int32_t *retval)
2d21ac55
A
1039{
1040 int nfound;
b0d623f7 1041 int sibling_count;
2d21ac55 1042 proc_t p;
1c79356b
A
1043 int status, error;
1044
b0d623f7
A
1045 AUDIT_ARG(pid, uap->pid);
1046
1c79356b 1047 if (uap->pid == 0)
2d21ac55 1048 uap->pid = -q->p_pgrpid;
1c79356b
A
1049
1050loop:
2d21ac55
A
1051 proc_list_lock();
1052loop1:
1c79356b 1053 nfound = 0;
b0d623f7
A
1054 sibling_count = 0;
1055
1c79356b 1056 for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) {
b0d623f7
A
1057 if ( p->p_sibling.le_next != 0 )
1058 sibling_count++;
1c79356b
A
1059 if (uap->pid != WAIT_ANY &&
1060 p->p_pid != uap->pid &&
2d21ac55 1061 p->p_pgrpid != -(uap->pid))
1c79356b 1062 continue;
2d21ac55 1063
1c79356b 1064 nfound++;
91447636
A
1065
1066 /* XXX This is racy because we don't get the lock!!!! */
1067
2d21ac55
A
1068 if (p->p_listflag & P_LIST_WAITING) {
1069 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
1070 goto loop1;
7b1edb79 1071 }
2d21ac55
A
1072 p->p_listflag |= P_LIST_WAITING; /* only allow single thread to wait() */
1073
7b1edb79 1074
1c79356b 1075 if (p->p_stat == SZOMB) {
2d21ac55
A
1076 proc_list_unlock();
1077#if CONFIG_MACF
1078 if ((error = mac_proc_check_wait(q, p)) != 0)
1079 goto out;
1080#endif
1c79356b 1081 retval[0] = p->p_pid;
1c79356b 1082 if (uap->status) {
2d21ac55
A
1083 /* Legacy apps expect only 8 bits of status */
1084 status = 0xffff & p->p_xstat; /* convert to int */
91447636
A
1085 error = copyout((caddr_t)&status,
1086 uap->status,
1087 sizeof(status));
2d21ac55
A
1088 if (error)
1089 goto out;
1c79356b 1090 }
91447636
A
1091 if (uap->rusage) {
1092 if (p->p_ru == NULL) {
1093 error = ENOMEM;
1094 } else {
1095 if (IS_64BIT_PROCESS(q)) {
b0d623f7
A
1096 struct user64_rusage my_rusage;
1097 munge_user64_rusage(p->p_ru, &my_rusage);
91447636
A
1098 error = copyout((caddr_t)&my_rusage,
1099 uap->rusage,
1100 sizeof (my_rusage));
1101 }
1102 else {
b0d623f7
A
1103 struct user32_rusage my_rusage;
1104 munge_user32_rusage(p->p_ru, &my_rusage);
1105 error = copyout((caddr_t)&my_rusage,
91447636 1106 uap->rusage,
b0d623f7 1107 sizeof (my_rusage));
91447636 1108 }
9bccf70c 1109 }
91447636 1110 /* information unavailable? */
2d21ac55
A
1111 if (error)
1112 goto out;
1c79356b
A
1113 }
1114
b0d623f7
A
1115 /* Conformance change for 6577252.
1116 * When SIGCHLD is blocked and wait() returns because the status
1117 * of a child process is available and there are no other
1118 * children processes, then any pending SIGCHLD signal is cleared.
1119 */
1120 if ( sibling_count == 0 ) {
1121 int mask = sigmask(SIGCHLD);
1122 uthread_t uth = (struct uthread *)get_bsdthread_info(current_thread());
1123
1124 if ( (uth->uu_sigmask & mask) != 0 ) {
1125 /* we are blocking SIGCHLD signals. clear any pending SIGCHLD.
1126 * This locking looks funny but it is protecting access to the
1127 * thread via p_uthlist.
1128 */
1129 proc_lock(q);
1130 uth->uu_siglist &= ~mask; /* clear pending signal */
1131 proc_unlock(q);
1132 }
b36670ce 1133 }
b0d623f7
A
1134
1135 /* Clean up */
1136 (void)reap_child_locked(q, p, 0, 0, 0);
91447636 1137
1c79356b
A
1138 return (0);
1139 }
2d21ac55
A
1140 if (p->p_stat == SSTOP && (p->p_lflag & P_LWAITED) == 0 &&
1141 (p->p_lflag & P_LTRACED || uap->options & WUNTRACED)) {
1142 proc_list_unlock();
1143#if CONFIG_MACF
1144 if ((error = mac_proc_check_wait(q, p)) != 0)
1145 goto out;
1146#endif
1147 proc_lock(p);
1148 p->p_lflag |= P_LWAITED;
1149 proc_unlock(p);
1c79356b 1150 retval[0] = p->p_pid;
1c79356b
A
1151 if (uap->status) {
1152 status = W_STOPCODE(p->p_xstat);
1153 error = copyout((caddr_t)&status,
91447636 1154 uap->status,
1c79356b
A
1155 sizeof(status));
1156 } else
1157 error = 0;
2d21ac55
A
1158 goto out;
1159 }
1160 /*
1161 * If we are waiting for continued processses, and this
1162 * process was continued
1163 */
1164 if ((uap->options & WCONTINUED) &&
1165 (p->p_flag & P_CONTINUED)) {
1166 proc_list_unlock();
1167#if CONFIG_MACF
1168 if ((error = mac_proc_check_wait(q, p)) != 0)
1169 goto out;
1170#endif
1171
1172 /* Prevent other process for waiting for this event */
b0d623f7 1173 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2d21ac55
A
1174 retval[0] = p->p_pid;
1175 if (uap->status) {
1176 status = W_STOPCODE(SIGCONT);
1177 error = copyout((caddr_t)&status,
1178 uap->status,
1179 sizeof(status));
1180 } else
1181 error = 0;
1182 goto out;
1c79356b 1183 }
2d21ac55 1184 p->p_listflag &= ~P_LIST_WAITING;
7b1edb79 1185 wakeup(&p->p_stat);
1c79356b 1186 }
2d21ac55
A
1187 /* list lock is held when we get here any which way */
1188 if (nfound == 0) {
1189 proc_list_unlock();
1c79356b 1190 return (ECHILD);
2d21ac55 1191 }
7b1edb79 1192
1c79356b
A
1193 if (uap->options & WNOHANG) {
1194 retval[0] = 0;
2d21ac55 1195 proc_list_unlock();
1c79356b
A
1196 return (0);
1197 }
1198
2d21ac55 1199 if ((error = msleep0((caddr_t)q, proc_list_mlock, PWAIT | PCATCH | PDROP, "wait", 0, wait1continue)))
91447636
A
1200 return (error);
1201
1202 goto loop;
2d21ac55
A
1203out:
1204 proc_list_lock();
1205 p->p_listflag &= ~P_LIST_WAITING;
1206 wakeup(&p->p_stat);
1207 proc_list_unlock();
1208 return (error);
91447636
A
1209}
1210
1211
1212int
1213waitidcontinue(int result)
1214{
1215 void *vt;
1216 thread_t thread;
1217 int *retval;
91447636
A
1218
1219 if (result)
1220 return(result);
1221
91447636
A
1222 thread = current_thread();
1223 vt = get_bsduthreadarg(thread);
1224 retval = get_bsduthreadrval(thread);
2d21ac55 1225 return(waitid(current_proc(), (struct waitid_args *)vt, retval));
91447636
A
1226}
1227
1228/*
1229 * Description: Suspend the calling thread until one child of the process
1230 * containing the calling thread changes state.
1231 *
1232 * Parameters: uap->idtype one of P_PID, P_PGID, P_ALL
1233 * uap->id pid_t or gid_t or ignored
1234 * uap->infop Address of signinfo_t struct in
1235 * user space into which to return status
1236 * uap->options flag values
1237 *
1238 * Returns: 0 Success
1239 * !0 Error returning status to user space
1240 */
1241int
b0d623f7 1242waitid(proc_t q, struct waitid_args *uap, int32_t *retval)
2d21ac55
A
1243{
1244 __pthread_testcancel(1);
1245 return(waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval));
1246}
1247
1248int
b0d623f7 1249waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap, __unused int32_t *retval)
91447636
A
1250{
1251 user_siginfo_t collect64; /* siginfo data to return to caller */
1252
2d21ac55
A
1253 int nfound;
1254 proc_t p;
91447636
A
1255 int error;
1256
2d21ac55
A
1257 /*
1258 * Forced validation of options for T.waitpid 21; should be a TSD!
1259 * This will pass the test, but note that we have more bits than the
1260 * standard specifies that we will allow in, in this case. The test
1261 * passes because they light all the bits, not just the ones we allow,
1262 * and so the following check returns EINVAL like the test wants.
1263 */
1264 if (((uap->options & (WNOHANG|WNOWAIT|WCONTINUED|WUNTRACED|WSTOPPED|WEXITED)) != uap->options) ||
1265 (uap->options == 0))
1266 return (EINVAL); /* bits set that aren't recognized */
1267
1268 /*
1269 * Overly critical options checking, per POSIX
1270 */
1271 switch(uap->idtype) {
1272 case P_PID: /* child with process ID equal to... */
1273 case P_PGID: /* child with process group ID equal to... */
1274 if (((int)uap->id) < 0)
1275 return (EINVAL);
1276 break;
1277 case P_ALL: /* any child */
1278 break;
1279 }
1280
91447636 1281loop:
2d21ac55
A
1282 proc_list_lock();
1283loop1:
91447636
A
1284 nfound = 0;
1285 for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) {
1286 switch(uap->idtype) {
1287 case P_PID: /* child with process ID equal to... */
1288 if (p->p_pid != (pid_t)uap->id)
1289 continue;
1290 break;
1291 case P_PGID: /* child with process group ID equal to... */
2d21ac55 1292 if (p->p_pgrpid != (pid_t)uap->id)
91447636
A
1293 continue;
1294 break;
1295 case P_ALL: /* any child */
1296 break;
1297 }
1298
1299 /* XXX This is racy because we don't get the lock!!!! */
1300
1301 /*
1302 * Wait collision; go to sleep and restart; used to maintain
1303 * the single return for waited process guarantee.
1304 */
2d21ac55
A
1305 if (p->p_listflag & P_LIST_WAITING) {
1306 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitidcoll", 0);
1307 goto loop1;
91447636 1308 }
2d21ac55 1309 p->p_listflag |= P_LIST_WAITING; /* mark busy */
91447636
A
1310
1311 nfound++;
1312
1313 /*
1314 * Types of processes we are interested in
1315 *
1316 * XXX Don't know what to do for WCONTINUED?!?
1317 */
1318 switch(p->p_stat) {
1319 case SZOMB: /* Exited */
1320 if (!(uap->options & WEXITED))
1321 break;
1322
2d21ac55
A
1323 /* drop the lock and the thread is going to return */
1324 proc_list_unlock();
1325
91447636 1326 /* Collect "siginfo" information for caller */
2d21ac55 1327 collect64.si_signo = SIGCHLD;
91447636
A
1328 collect64.si_code = 0;
1329 collect64.si_errno = 0;
1330 collect64.si_pid = 0;
1331 collect64.si_uid = 0;
1332 collect64.si_addr = 0;
2d21ac55 1333 collect64.si_status = WEXITSTATUS(p->p_xstat);
91447636
A
1334 collect64.si_band = 0;
1335
1336 if (IS_64BIT_PROCESS(p)) {
b0d623f7
A
1337 user64_siginfo_t sinfo64;
1338
1339 siginfo_user_to_user64(&collect64, &sinfo64);
1340
1341 error = copyout((caddr_t)&sinfo64,
91447636 1342 uap->infop,
b0d623f7 1343 sizeof(sinfo64));
91447636 1344 } else {
b0d623f7
A
1345 user32_siginfo_t sinfo32;
1346
1347 siginfo_user_to_user32(&collect64, &sinfo32);
1348
1349 error = copyout((caddr_t)&sinfo32,
1350 uap->infop,
1351 sizeof(sinfo32));
91447636
A
1352 }
1353 /* information unavailable? */
2d21ac55
A
1354 if (error)
1355 goto out;
91447636
A
1356
1357 /* Prevent other process for waiting for this event? */
1358 if (!(uap->options & WNOWAIT)) {
1359 /* Clean up */
b0d623f7 1360 (void)reap_child_locked(q, p, 0, 0, 0);
2d21ac55
A
1361 } else {
1362 proc_list_lock();
1363 p->p_listflag &= ~P_LIST_WAITING;
1364 proc_list_unlock();
91447636
A
1365 }
1366
1367 return (0);
1368
1369 case SSTOP: /* Stopped */
1370 /*
1371 * If we are not interested in stopped processes, then
1372 * ignore this one.
1373 */
1374 if (!(uap->options & WSTOPPED))
1375 break;
1376
1377 /*
1378 * If someone has already waited it, we lost a race
1379 * to be the one to return status.
1380 */
2d21ac55 1381 if ((p->p_lflag & P_LWAITED) != 0)
91447636
A
1382 break;
1383
2d21ac55
A
1384 /* drop the lock and the thread is going to return */
1385 proc_list_unlock();
91447636
A
1386
1387 /* Collect "siginfo" information for caller */
2d21ac55 1388 collect64.si_signo = SIGCHLD;
91447636
A
1389 collect64.si_code = 0;
1390 collect64.si_errno = 0;
1391 collect64.si_pid = 0;
1392 collect64.si_uid = 0;
1393 collect64.si_addr = 0;
2d21ac55 1394 proc_lock(p);
91447636 1395 collect64.si_status = p->p_xstat;
2d21ac55 1396 proc_unlock(p);
91447636
A
1397 collect64.si_band = 0;
1398
1399 if (IS_64BIT_PROCESS(p)) {
b0d623f7
A
1400 user64_siginfo_t sinfo64;
1401
1402 siginfo_user_to_user64(&collect64, &sinfo64);
1403
1404 error = copyout((caddr_t)&sinfo64,
1405 uap->infop,
1406 sizeof(sinfo64));
91447636 1407 } else {
b0d623f7
A
1408 user32_siginfo_t sinfo32;
1409
1410 siginfo_user_to_user32(&collect64, &sinfo32);
1411
1412 error = copyout((caddr_t)&sinfo32,
1413 uap->infop,
1414 sizeof(sinfo32));
91447636
A
1415 }
1416 /* information unavailable? */
2d21ac55
A
1417 if (error)
1418 goto out;
91447636
A
1419
1420 /* Prevent other process for waiting for this event? */
1421 if (!(uap->options & WNOWAIT)) {
2d21ac55
A
1422 proc_lock(p);
1423 p->p_lflag |= P_LWAITED;
1424 proc_unlock(p);
91447636
A
1425 }
1426
2d21ac55
A
1427 error = 0;
1428 goto out;
91447636
A
1429
1430 default: /* All others */
1431 /* ...meaning Continued */
1432 if (!(uap->options & WCONTINUED))
1433 break;
1434
1435 /*
1436 * If the flag isn't set, then this process has not
1437 * been stopped and continued, or the status has
1438 * already been reaped by another caller of waitid().
1439 */
1440 if ((p->p_flag & P_CONTINUED) == 0)
1441 break;
1442
2d21ac55
A
1443 /* drop the lock and the thread is going to return */
1444 proc_list_unlock();
1445
91447636 1446 /* Collect "siginfo" information for caller */
2d21ac55
A
1447 proc_lock(p);
1448 collect64.si_signo = SIGCHLD;
1449 collect64.si_code = CLD_CONTINUED;
91447636 1450 collect64.si_errno = 0;
2d21ac55 1451 collect64.si_pid = p->p_contproc;
91447636
A
1452 collect64.si_uid = 0;
1453 collect64.si_addr = 0;
1454 collect64.si_status = p->p_xstat;
1455 collect64.si_band = 0;
2d21ac55 1456 proc_unlock(p);
91447636
A
1457
1458 if (IS_64BIT_PROCESS(p)) {
b0d623f7
A
1459 user64_siginfo_t sinfo64;
1460
1461 siginfo_user_to_user64(&collect64, &sinfo64);
1462
1463 error = copyout((caddr_t)&sinfo64,
1464 uap->infop,
1465 sizeof(sinfo64));
91447636 1466 } else {
b0d623f7
A
1467 user32_siginfo_t sinfo32;
1468
1469 siginfo_user_to_user32(&collect64, &sinfo32);
1470
1471 error = copyout((caddr_t)&sinfo32,
1472 uap->infop,
1473 sizeof(sinfo32));
91447636
A
1474 }
1475 /* information unavailable? */
2d21ac55
A
1476 if (error)
1477 goto out;
91447636
A
1478
1479 /* Prevent other process for waiting for this event? */
1480 if (!(uap->options & WNOWAIT)) {
b0d623f7 1481 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
91447636
A
1482 }
1483
2d21ac55
A
1484 error = 0;
1485 goto out;
91447636 1486 }
2d21ac55 1487 /* LIST LOCK IS HELD HERE */
91447636 1488 /* Not a process we are interested in; go on to next child */
2d21ac55
A
1489
1490 p->p_listflag &= ~P_LIST_WAITING;
91447636
A
1491 wakeup(&p->p_stat);
1492 }
1493
2d21ac55 1494 /* list lock is always held */
91447636 1495 /* No child processes that could possibly satisfy the request? */
2d21ac55
A
1496 if (nfound == 0) {
1497 proc_list_unlock();
91447636 1498 return (ECHILD);
2d21ac55 1499 }
91447636
A
1500
1501 if (uap->options & WNOHANG) {
2d21ac55 1502 proc_list_unlock();
91447636
A
1503 return (0);
1504 }
1505
2d21ac55 1506 if ((error = msleep0((caddr_t)q, proc_list_mlock, PWAIT | PCATCH | PDROP, "waitid", 0, waitidcontinue)))
1c79356b 1507 return (error);
7b1edb79 1508
1c79356b 1509 goto loop;
2d21ac55
A
1510out:
1511 proc_list_lock();
1512 p->p_listflag &= ~P_LIST_WAITING;
1513 wakeup(&p->p_stat);
1514 proc_list_unlock();
1515 return (error);
1c79356b
A
1516}
1517
1518/*
1519 * make process 'parent' the new parent of process 'child'.
1520 */
1521void
2d21ac55 1522proc_reparentlocked(proc_t child, proc_t parent, int cansignal, int locked)
1c79356b 1523{
2d21ac55 1524 proc_t oldparent = PROC_NULL;
1c79356b
A
1525
1526 if (child->p_pptr == parent)
1527 return;
1528
2d21ac55
A
1529 if (locked == 0)
1530 proc_list_lock();
1531
1532 oldparent = child->p_pptr;
1533#if __PROC_INTERNAL_DEBUG
1534 if (oldparent == PROC_NULL)
b0d623f7 1535 panic("proc_reparent: process %p does not have a parent\n", child);
2d21ac55
A
1536#endif
1537
1c79356b 1538 LIST_REMOVE(child, p_sibling);
2d21ac55
A
1539#if __PROC_INTERNAL_DEBUG
1540 if (oldparent->p_childrencnt == 0)
1541 panic("process children count already 0\n");
1542#endif
1543 oldparent->p_childrencnt--;
1544#if __PROC_INTERNAL_DEBUG1
1545 if (oldparent->p_childrencnt < 0)
1546 panic("process children count -ve\n");
1547#endif
1c79356b 1548 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
2d21ac55 1549 parent->p_childrencnt++;
1c79356b 1550 child->p_pptr = parent;
2d21ac55
A
1551 child->p_ppid = parent->p_pid;
1552
1553 proc_list_unlock();
91447636 1554
2d21ac55 1555 if ((cansignal != 0) && (initproc == parent) && (child->p_stat == SZOMB))
91447636 1556 psignal(initproc, SIGCHLD);
2d21ac55
A
1557 if (locked == 1)
1558 proc_list_lock();
1c79356b
A
1559}
1560
0b4e3aa0
A
1561/*
1562 * Exit: deallocate address space and other resources, change proc state
1563 * to zombie, and unlink proc from allproc and parent's lists. Save exit
1564 * status and rusage for wait(). Check for child processes and orphan them.
1565 */
1566
91447636 1567void
2d21ac55
A
1568vfork_exit(proc_t p, int rv)
1569{
1570 vfork_exit_internal(p, rv, 0);
1571}
1572
1573void
1574vfork_exit_internal(proc_t p, int rv, int forceexit)
0b4e3aa0 1575{
91447636
A
1576 thread_t self = current_thread();
1577#ifdef FIXME
0b4e3aa0 1578 struct task *task = p->task;
91447636 1579#endif
0b4e3aa0
A
1580 struct uthread *ut;
1581
91447636
A
1582 /*
1583 * If a thread in this task has already
1584 * called exit(), then halt any others
1585 * right here.
1586 */
1587
1588 ut = get_bsdthread_info(self);
91447636 1589
55e303ae 1590
2d21ac55
A
1591 proc_lock(p);
1592 if ((p->p_lflag & P_LPEXIT) == P_LPEXIT) {
1593 /*
1594 * This happens when a parent exits/killed and vfork is in progress
1595 * other threads. But shutdown code for ex has already called exit1()
1596 */
1597 proc_unlock(p);
1598 return;
1599 }
1600 p->p_lflag |= (P_LEXIT | P_LPEXIT);
1601 proc_unlock(p);
1602
1603 if (forceexit == 0) {
1604 /*
1605 * parent of a vfork child has already called exit() and the
1606 * thread that has vfork in proress terminates. So there is no
1607 * separate address space here and it has already been marked for
1608 * termination. This was never covered before and could cause problems
1609 * if we block here for outside code.
1610 */
1611 /* Notify the perf server */
1612 (void)sys_perf_notify(self, p->p_pid);
1613 }
55e303ae 1614
0b4e3aa0
A
1615 /*
1616 * Remove proc from allproc queue and from pidhash chain.
1617 * Need to do this before we do anything that can block.
1618 * Not doing causes things like mount() find this on allproc
1619 * in partially cleaned state.
1620 */
2d21ac55
A
1621
1622 proc_list_lock();
1623
0b4e3aa0 1624 LIST_REMOVE(p, p_list);
55e303ae 1625 LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
2d21ac55
A
1626 /* will not be visible via proc_find */
1627 p->p_listflag |= P_LIST_EXITED;
0b4e3aa0 1628
2d21ac55 1629 proc_list_unlock();
0b4e3aa0 1630
2d21ac55 1631 proc_lock(p);
0b4e3aa0 1632 p->p_xstat = rv;
2d21ac55
A
1633 p->p_lflag &= ~(P_LTRACED | P_LPPWAIT);
1634 p->p_sigignore = ~0;
1635 proc_unlock(p);
1636
1637 proc_spinlock(p);
1638 if (thread_call_cancel(p->p_rcall))
1639 p->p_ractive--;
1640
1641 while (p->p_ractive > 0) {
1642 proc_spinunlock(p);
1643
1644 delay(1);
1645
1646 proc_spinlock(p);
1647 }
1648 proc_spinunlock(p);
1649
1650 thread_call_free(p->p_rcall);
1651 p->p_rcall = NULL;
1652
1653 ut->uu_siglist = 0;
0b4e3aa0 1654
55e303ae 1655 vproc_exit(p);
0b4e3aa0
A
1656}
1657
0b4e3aa0 1658void
2d21ac55 1659vproc_exit(proc_t p)
0b4e3aa0 1660{
2d21ac55
A
1661 proc_t q;
1662 proc_t pp;
1663
1664 vnode_t tvp;
91447636 1665#ifdef FIXME
0b4e3aa0 1666 struct task *task = p->task;
91447636 1667#endif
2d21ac55
A
1668 struct pgrp * pg;
1669 struct session *sessp;
0b4e3aa0 1670
91447636 1671 /* XXX Zombie allocation may fail, in which case stats get lost */
0b4e3aa0
A
1672 MALLOC_ZONE(p->p_ru, struct rusage *,
1673 sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
1674
2d21ac55
A
1675
1676 proc_refdrain(p);
1677
0b4e3aa0
A
1678 /*
1679 * Close open files and release open-file table.
1680 * This may block!
1681 */
1682 fdfree(p);
1683
2d21ac55
A
1684 sessp = proc_session(p);
1685 if (SESS_LEADER(p, sessp)) {
1686
2d21ac55 1687 if (sessp->s_ttyvp != NULLVP) {
fa4905b1 1688 struct vnode *ttyvp;
2d21ac55 1689 int ttyvid;
91447636 1690 struct vfs_context context;
2d21ac55 1691 struct tty * tp;
fa4905b1 1692
0b4e3aa0
A
1693 /*
1694 * Controlling process.
1695 * Signal foreground pgrp,
1696 * drain controlling terminal
1697 * and revoke access to controlling terminal.
1698 */
b0d623f7 1699 tp = SESSION_TP(sessp);
2d21ac55
A
1700
1701 if ((tp != TTY_NULL) && (tp->t_session == sessp)) {
1702 tty_pgsignal(tp, SIGHUP, 1);
b0d623f7 1703 tty_lock(tp);
2d21ac55 1704 (void) ttywait(tp);
b0d623f7 1705 tty_unlock(tp);
0b4e3aa0
A
1706 /*
1707 * The tty could have been revoked
1708 * if we blocked.
1709 */
2d21ac55
A
1710
1711 session_lock(sessp);
b0d623f7
A
1712 /* reget in case of race */
1713 tp = SESSION_TP(sessp);
2d21ac55
A
1714 ttyvp = sessp->s_ttyvp;
1715 ttyvid = sessp->s_ttyvid;
1716 sessp->s_ttyvp = NULL;
1717 sessp->s_ttyvid = 0;
b0d623f7 1718 sessp->s_ttyp = TTY_NULL;
2d21ac55
A
1719 sessp->s_ttypgrpid = NO_PID;
1720 session_unlock(sessp);
1721
1722 if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
1723 context.vc_thread = proc_thread(p); /* XXX */
1724 context.vc_ucred = kauth_cred_proc_ref(p);
1725 VNOP_REVOKE(ttyvp, REVOKEALL, &context);
1726 vnode_put(ttyvp);
1727 kauth_cred_unref(&context.vc_ucred);
1728 }
1729 } else {
1730 session_lock(sessp);
1731 ttyvp = sessp->s_ttyvp;
1732 sessp->s_ttyvp = NULL;
1733 sessp->s_ttyvid = 0;
b0d623f7 1734 sessp->s_ttyp = TTY_NULL;
2d21ac55
A
1735 sessp->s_ttypgrpid = NO_PID;
1736 session_unlock(sessp);
0b4e3aa0 1737 }
2d21ac55 1738 if (ttyvp)
91447636 1739 vnode_rele(ttyvp);
0b4e3aa0
A
1740 /*
1741 * s_ttyp is not zero'd; we use this to indicate
1742 * that the session once had a controlling terminal.
1743 * (for logging and informational purposes)
1744 */
1745 }
2d21ac55
A
1746
1747 session_lock(sessp);
1748 sessp->s_leader = NULL;
1749 session_unlock(sessp);
0b4e3aa0 1750 }
2d21ac55 1751 session_rele(sessp);
0b4e3aa0 1752
2d21ac55
A
1753 pg = proc_pgrp(p);
1754 fixjobc(p, pg, 0);
1755 pg_rele(pg);
9bccf70c 1756
2d21ac55 1757 p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
0b4e3aa0 1758
2d21ac55
A
1759 proc_list_lock();
1760 proc_childdrainstart(p);
1761 while ((q = p->p_children.lh_first) != NULL) {
1762 q->p_listflag |= P_LIST_DEADPARENT;
1763 if (q->p_stat == SZOMB) {
1764 if (p != q->p_pptr)
1765 panic("parent child linkage broken");
1766 /* check for lookups by zomb sysctl */
1767 while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
1768 msleep(&q->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
1769 }
1770 q->p_listflag |= P_LIST_WAITING;
1771 /*
1772 * This is a named reference and it is not granted
1773 * if the reap is already in progress. So we get
1774 * the reference here exclusively and their can be
1775 * no waiters. So there is no need for a wakeup
1776 * after we are done. AlsO the reap frees the structure
1777 * and the proc struct cannot be used for wakeups as well.
1778 * It is safe to use q here as this is system reap
1779 */
1780 (void)reap_child_locked(p, q, 1, 1, 0);
1781 } else {
1782 proc_reparentlocked(q, initproc, 0, 1);
1783 /*
1784 * Traced processes are killed
1785 * since their existence means someone is messing up.
1786 */
1787 if (q->p_lflag & P_LTRACED) {
1788 proc_list_unlock();
1789 proc_lock(q);
1790 q->p_lflag &= ~P_LTRACED;
1791 if (q->sigwait_thread) {
6d2010ae
A
1792 thread_t thread = q->sigwait_thread;
1793
2d21ac55
A
1794 proc_unlock(q);
1795 /*
1796 * The sigwait_thread could be stopped at a
1797 * breakpoint. Wake it up to kill.
1798 * Need to do this as it could be a thread which is not
1799 * the first thread in the task. So any attempts to kill
1800 * the process would result into a deadlock on q->sigwait.
1801 */
6d2010ae
A
1802 thread_resume(thread);
1803 clear_wait(thread, THREAD_INTERRUPTED);
1804 threadsignal(thread, SIGKILL, 0);
1805 } else {
2d21ac55 1806 proc_unlock(q);
6d2010ae
A
1807 }
1808
2d21ac55
A
1809 psignal(q, SIGKILL);
1810 proc_list_lock();
0b4e3aa0 1811 }
0b4e3aa0
A
1812 }
1813 }
1814
2d21ac55
A
1815 proc_childdrainend(p);
1816 proc_list_unlock();
1817
1818 /*
1819 * Release reference to text vnode
1820 */
1821 tvp = p->p_textvp;
1822 p->p_textvp = NULL;
1823 if (tvp != NULLVP) {
1824 vnode_rele(tvp);
1825 }
1826
0b4e3aa0
A
1827 /*
1828 * Save exit status and final rusage info, adding in child rusage
91447636
A
1829 * info and self times. If we were unable to allocate a zombie
1830 * structure, this information is lost.
0b4e3aa0 1831 */
2d21ac55 1832 /* No need for locking here as no one than this thread can access this */
91447636
A
1833 if (p->p_ru != NULL) {
1834 *p->p_ru = p->p_stats->p_ru;
1835 timerclear(&p->p_ru->ru_utime);
1836 timerclear(&p->p_ru->ru_stime);
0b4e3aa0
A
1837
1838#ifdef FIXME
91447636 1839 if (task) {
0b4e3aa0
A
1840 task_basic_info_data_t tinfo;
1841 task_thread_times_info_data_t ttimesinfo;
1842 int task_info_stuff, task_ttimes_stuff;
1843 struct timeval ut,st;
1844
1845 task_info_stuff = TASK_BASIC_INFO_COUNT;
1846 task_info(task, TASK_BASIC_INFO,
1847 &tinfo, &task_info_stuff);
1848 p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds;
1849 p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds;
1850 p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds;
1851 p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds;
1852
1853 task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT;
1854 task_info(task, TASK_THREAD_TIMES_INFO,
1855 &ttimesinfo, &task_ttimes_stuff);
1856
1857 ut.tv_sec = ttimesinfo.user_time.seconds;
1858 ut.tv_usec = ttimesinfo.user_time.microseconds;
1859 st.tv_sec = ttimesinfo.system_time.seconds;
1860 st.tv_usec = ttimesinfo.system_time.microseconds;
1861 timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime);
91447636
A
1862 timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
1863 }
0b4e3aa0
A
1864#endif /* FIXME */
1865
91447636
A
1866 ruadd(p->p_ru, &p->p_stats->p_cru);
1867 }
0b4e3aa0
A
1868
1869 /*
1870 * Free up profiling buffers.
1871 */
1872 {
1873 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
1874
1875 p1 = p0->pr_next;
1876 p0->pr_next = NULL;
1877 p0->pr_scale = 0;
1878
1879 for (; p1 != NULL; p1 = pn) {
1880 pn = p1->pr_next;
91447636 1881 kfree(p1, sizeof *p1);
0b4e3aa0
A
1882 }
1883 }
1884
6d2010ae
A
1885#if PSYNCH
1886 pth_proc_hashdelete(p);
1887#endif /* PSYNCH */
1888
0b4e3aa0
A
1889 /*
1890 * Other substructures are freed from wait().
1891 */
2d21ac55 1892 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
0b4e3aa0
A
1893 p->p_stats = NULL;
1894
2d21ac55 1895 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
0b4e3aa0
A
1896 p->p_sigacts = NULL;
1897
2d21ac55 1898 proc_limitdrop(p, 1);
0b4e3aa0
A
1899 p->p_limit = NULL;
1900
1901 /*
1902 * Finish up by terminating the task
1903 * and halt this thread (only if a
1904 * member of the task exiting).
1905 */
1906 p->task = TASK_NULL;
1907
1908 /*
1909 * Notify parent that we're gone.
1910 */
2d21ac55
A
1911 pp = proc_parent(p);
1912 if ((p->p_listflag & P_LIST_DEADPARENT) == 0) {
1913 if (pp != initproc) {
1914 proc_lock(pp);
1915 pp->si_pid = p->p_pid;
1916 pp->si_status = p->p_xstat;
1917 pp->si_code = CLD_EXITED;
1918 /*
1919 * p_ucred usage is safe as it is an exiting process
1920 * and reference is dropped in reap
1921 */
6d2010ae 1922 pp->si_uid = kauth_cred_getruid(p->p_ucred);
2d21ac55
A
1923 proc_unlock(pp);
1924 }
1925 /* mark as a zombie */
1926 /* mark as a zombie */
1927 /* No need to take proc lock as all refs are drained and
1928 * no one except parent (reaping ) can look at this.
1929 * The write is to an int and is coherent. Also parent is
1930 * keyed off of list lock for reaping
1931 */
1932 p->p_stat = SZOMB;
0b4e3aa0 1933
2d21ac55 1934 psignal(pp, SIGCHLD);
91447636 1935
2d21ac55
A
1936 /* and now wakeup the parent */
1937 proc_list_lock();
1938 wakeup((caddr_t)pp);
1939 proc_list_unlock();
1940 } else {
1941 proc_list_lock();
2d21ac55
A
1942 /* check for lookups by zomb sysctl */
1943 while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
1944 msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
1945 }
d1ecb069 1946 p->p_stat = SZOMB;
2d21ac55 1947 p->p_listflag |= P_LIST_WAITING;
d1ecb069 1948
2d21ac55
A
1949 /*
1950 * This is a named reference and it is not granted
1951 * if the reap is already in progress. So we get
1952 * the reference here exclusively and their can be
1953 * no waiters. So there is no need for a wakeup
1954 * after we are done. AlsO the reap frees the structure
1955 * and the proc struct cannot be used for wakeups as well.
1956 * It is safe to use p here as this is system reap
1957 */
1958 (void)reap_child_locked(pp, p, 0, 1, 1);
1959 /* list lock dropped by reap_child_locked */
1960 }
1961 proc_rele(pp);
0b4e3aa0 1962}
91447636
A
1963
1964
1965/*
1966 * munge_rusage
1967 * LP64 support - long is 64 bits if we are dealing with a 64 bit user
b0d623f7 1968 * process. We munge the kernel version of rusage into the
91447636
A
1969 * 64 bit version.
1970 */
1971__private_extern__ void
b0d623f7 1972munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p)
91447636
A
1973{
1974 /* timeval changes size, so utime and stime need special handling */
1975 a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
1976 a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
1977 a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
1978 a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
1979 /*
1980 * everything else can be a direct assign, since there is no loss
1981 * of precision implied boing 32->64.
1982 */
1983 a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
1984 a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
1985 a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
1986 a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
1987 a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
1988 a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
1989 a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
1990 a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
1991 a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
1992 a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
1993 a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
1994 a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
1995 a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
1996 a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
1997}
b0d623f7
A
1998
1999/* For a 64-bit kernel and 32-bit userspace, munging may be needed */
2000__private_extern__ void
2001munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p)
2002{
2003 /* timeval changes size, so utime and stime need special handling */
2004 a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
2005 a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
2006 a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
2007 a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
2008 /*
2009 * everything else can be a direct assign. We currently ignore
2010 * the loss of precision
2011 */
2012 a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
2013 a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
2014 a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
2015 a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
2016 a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
2017 a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
2018 a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
2019 a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
2020 a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
2021 a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
2022 a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
2023 a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
2024 a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
2025 a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
2026}