]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_exit.c
xnu-1228.7.58.tar.gz
[apple/xnu.git] / bsd / kern / kern_exit.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
67 */
2d21ac55
A
68/*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
1c79356b
A
74
75#include <machine/reg.h>
76#include <machine/psl.h>
77
78#include "compat_43.h"
79
80#include <sys/param.h>
81#include <sys/systm.h>
82#include <sys/ioctl.h>
91447636 83#include <sys/proc_internal.h>
2d21ac55 84#include <sys/proc.h>
91447636 85#include <sys/kauth.h>
1c79356b
A
86#include <sys/tty.h>
87#include <sys/time.h>
88#include <sys/resource.h>
89#include <sys/kernel.h>
1c79356b 90#include <sys/wait.h>
91447636
A
91#include <sys/file_internal.h>
92#include <sys/vnode_internal.h>
1c79356b
A
93#include <sys/syslog.h>
94#include <sys/malloc.h>
95#include <sys/resourcevar.h>
96#include <sys/ptrace.h>
97#include <sys/user.h>
55e303ae 98#include <sys/aio_kern.h>
91447636
A
99#include <sys/sysproto.h>
100#include <sys/signalvar.h>
101#include <sys/filedesc.h> /* fdfree */
2d21ac55 102#if SYSV_SHM
91447636 103#include <sys/shm_internal.h> /* shmexit */
2d21ac55 104#endif
91447636 105#include <sys/acct.h> /* acct_process */
e5568f75
A
106
107#include <bsm/audit_kernel.h>
108#include <bsm/audit_kevents.h>
1c79356b
A
109
110#include <mach/mach_types.h>
91447636
A
111
112#include <kern/kern_types.h>
113#include <kern/kalloc.h>
114#include <kern/task.h>
1c79356b 115#include <kern/thread.h>
2d21ac55 116#include <kern/thread_call.h>
9bccf70c 117#include <kern/sched_prim.h>
1c79356b 118#include <kern/assert.h>
2d21ac55
A
119#if CONFIG_DTRACE
120/* Do not include dtrace.h, it redefines kmem_[alloc/free] */
121extern void (*dtrace_fasttrap_exit_ptr)(proc_t);
122extern void (*dtrace_helpers_cleanup)(proc_t);
123extern void dtrace_lazy_dofs_destroy(proc_t);
124
125#include <sys/dtrace_ptss.h>
126#endif
127
128#if CONFIG_MACF
129#include <security/mac.h>
130#include <sys/syscall.h>
9bccf70c 131#endif
1c79356b 132
91447636
A
133#include <mach/mach_types.h>
134#include <mach/task.h>
135#include <mach/thread_act.h>
136#include <mach/mach_traps.h> /* init_process */
137
2d21ac55
A
138#include <sys/sdt.h>
139
1c79356b 140extern char init_task_failure_data[];
2d21ac55
A
141void proc_prepareexit(proc_t p, int rv);
142void vfork_exit(proc_t p, int rv);
143void vproc_exit(proc_t p);
91447636 144__private_extern__ void munge_rusage(struct rusage *a_rusage_p, struct user_rusage *a_user_rusage_p);
2d21ac55 145static int reap_child_locked(proc_t parent, proc_t child, int deadparent, int locked, int droplock);
91447636
A
146
147/*
148 * Things which should have prototypes in headers, but don't
149 */
91447636 150void *get_bsduthreadarg(thread_t);
2d21ac55 151void proc_exit(proc_t p);
91447636
A
152int wait1continue(int result);
153int waitidcontinue(int result);
154int *get_bsduthreadrval(thread_t);
2d21ac55
A
155kern_return_t sys_perf_notify(thread_t thread, int pid);
156kern_return_t abnormal_exit_notify(mach_exception_data_type_t code,
157 mach_exception_data_type_t subcode);
158int in_shutdown(void);
159void workqueue_exit(struct proc *);
160void delay(int);
161
91447636
A
162/*
163 * NOTE: Source and target may *NOT* overlap!
164 * XXX Should share code with bsd/dev/ppc/unix_signal.c
165 */
166static void
167siginfo_64to32(user_siginfo_t *in, siginfo_t *out)
168{
169 out->si_signo = in->si_signo;
170 out->si_errno = in->si_errno;
171 out->si_code = in->si_code;
172 out->si_pid = in->si_pid;
173 out->si_uid = in->si_uid;
174 out->si_status = in->si_status;
175 out->si_addr = CAST_DOWN(void *,in->si_addr);
176 /* following cast works for sival_int because of padding */
177 out->si_value.sival_ptr = CAST_DOWN(void *,in->si_value.sival_ptr);
178 out->si_band = in->si_band; /* range reduction */
2d21ac55 179 out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */
91447636 180}
1c79356b
A
181
182/*
183 * exit --
184 * Death of process.
185 */
1c79356b 186void
2d21ac55 187exit(proc_t p, struct exit_args *uap, int *retval)
1c79356b 188{
0b4e3aa0 189 exit1(p, W_EXITCODE(uap->rval, 0), retval);
1c79356b 190
9bccf70c 191 /* drop funnel before we return */
1c79356b
A
192 thread_exception_return();
193 /* NOTREACHED */
194 while (TRUE)
9bccf70c 195 thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
196 /* NOTREACHED */
197}
198
199/*
200 * Exit: deallocate address space and other resources, change proc state
201 * to zombie, and unlink proc from allproc and parent's lists. Save exit
202 * status and rusage for wait(). Check for child processes and orphan them.
203 */
0b4e3aa0 204int
2d21ac55 205exit1(proc_t p, int rv, int *retval)
1c79356b 206{
91447636 207 thread_t self = current_thread();
1c79356b 208 struct task *task = p->task;
1c79356b
A
209 struct uthread *ut;
210
211 /*
212 * If a thread in this task has already
213 * called exit(), then halt any others
214 * right here.
215 */
0b4e3aa0 216
55e303ae 217 ut = get_bsdthread_info(self);
91447636
A
218 if (ut->uu_flag & UT_VFORK) {
219 vfork_exit(p, rv);
2d21ac55 220 vfork_return(p , retval, p->p_pid);
0b4e3aa0
A
221 unix_syscall_return(0);
222 /* NOT REACHED */
223 }
2d21ac55
A
224
225 /*
226 * The parameter list of audit_syscall_exit() was augmented to
227 * take the Darwin syscall number as the first parameter,
228 * which is currently required by mac_audit_postselect().
229 */
230
231 AUDIT_SYSCALL_EXIT(SYS_exit, p, ut, 0); /* Exit is always successfull */
232
233 DTRACE_PROC1(exit, int, CLD_EXITED);
234
235 proc_lock(p);
1c79356b
A
236 while (p->exit_thread != self) {
237 if (sig_try_locked(p) <= 0) {
55e303ae 238 if (get_threadtask(self) != task) {
2d21ac55 239 proc_unlock(p);
0b4e3aa0 240 return(0);
1c79356b 241 }
2d21ac55 242 proc_unlock(p);
55e303ae 243 thread_terminate(self);
1c79356b
A
244 thread_exception_return();
245 /* NOTREACHED */
246 }
247 sig_lock_to_exit(p);
248 }
4a3eedf9 249 if (p == initproc) {
2d21ac55 250 proc_unlock(p);
1c79356b
A
251 printf("pid 1 exited (signal %d, exit %d)",
252 WTERMSIG(rv), WEXITSTATUS(rv));
2d21ac55
A
253 panic("%s died\nState at Last Exception:\n\n%s",
254 (p->p_comm[0] != '\0' ?
255 p->p_comm :
256 "launchd"),
1c79356b
A
257 init_task_failure_data);
258 }
259
2d21ac55 260 p->p_lflag |= P_LEXIT;
1c79356b
A
261 p->p_xstat = rv;
262
2d21ac55
A
263 proc_unlock(p);
264
265 proc_prepareexit(p, rv);
266
1c79356b
A
267 /* task terminate will call proc_terminate and that cleans it up */
268 task_terminate_internal(task);
269
0b4e3aa0 270 return(0);
1c79356b
A
271}
272
273void
2d21ac55 274proc_prepareexit(proc_t p, int rv)
1c79356b 275{
2d21ac55 276 mach_exception_data_type_t code, subcode;
1c79356b 277 struct uthread *ut;
91447636 278 thread_t self = current_thread();
2d21ac55 279 ut = get_bsdthread_info(self);
55e303ae 280
2d21ac55
A
281 /* If a core should be generated, notify crash reporter */
282 if (!in_shutdown() && hassigprop(WTERMSIG(rv), SA_CORE)) {
283 /*
284 * Workaround for processes checking up on PT_DENY_ATTACH:
285 * should be backed out post-Leopard (details in 5431025).
286 */
287 if ((SIGSEGV == WTERMSIG(rv)) &&
288 (p->p_pptr->p_lflag & P_LNOATTACH)) {
289 goto skipcheck;
290 }
291
292 /*
293 * Crash Reporter looks for the signal value, original exception
294 * type, and low 20 bits of the original code in code[0]
295 * (8, 4, and 20 bits respectively). code[1] is unmodified.
296 */
297 code = ((WTERMSIG(rv) & 0xff) << 24) |
298 ((ut->uu_exception & 0x0f) << 20) |
299 ((int)ut->uu_code & 0xfffff);
300 subcode = ut->uu_subcode;
301 (void) abnormal_exit_notify(code, subcode);
302 }
303
304skipcheck:
91447636 305 /* Notify the perf server */
2d21ac55 306 (void)sys_perf_notify(self, p->p_pid);
1c79356b 307
1c79356b
A
308 /*
309 * Remove proc from allproc queue and from pidhash chain.
310 * Need to do this before we do anything that can block.
311 * Not doing causes things like mount() find this on allproc
312 * in partially cleaned state.
313 */
2d21ac55
A
314
315 proc_list_lock();
316
1c79356b 317 LIST_REMOVE(p, p_list);
55e303ae 318 LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
2d21ac55
A
319 /* will not be visible via proc_find */
320 p->p_listflag |= P_LIST_EXITED;
321
322 proc_list_unlock();
323
1c79356b
A
324
325#ifdef PGINPROF
326 vmsizmon();
327#endif
328 /*
329 * If parent is waiting for us to exit or exec,
2d21ac55 330 * P_LPPWAIT is set; we will wakeup the parent below.
1c79356b 331 */
2d21ac55
A
332 proc_lock(p);
333 p->p_lflag &= ~(P_LTRACED | P_LPPWAIT);
91447636 334 p->p_sigignore = ~(sigcantmask);
9bccf70c 335 ut->uu_siglist = 0;
2d21ac55 336 proc_unlock(p);
1c79356b
A
337}
338
339void
2d21ac55 340proc_exit(proc_t p)
1c79356b 341{
2d21ac55
A
342 proc_t q;
343 proc_t pp;
1c79356b 344 struct task *task = p->task;
2d21ac55
A
345 boolean_t fstate;
346 vnode_t tvp = NULLVP;
347 struct pgrp * pg;
348 struct session *sessp;
349 struct uthread * uth;
1c79356b
A
350
351 /* This can happen if thread_terminate of the single thread
352 * process
353 */
354
2d21ac55
A
355 uth = (struct uthread *)get_bsdthread_info(current_thread());
356
357 proc_lock(p);
358 if( !(p->p_lflag & P_LEXIT)) {
359 p->p_lflag |= P_LEXIT;
360 proc_unlock(p);
361 proc_prepareexit(p, 0);
362 proc_lock(p);
1c79356b
A
363 }
364
91447636 365 p->p_lflag |= P_LPEXIT;
2d21ac55
A
366 proc_unlock(p);
367
368#if CONFIG_DTRACE
369 /*
370 * Free any outstanding lazy dof entries. It is imperative we
371 * always call dtrace_lazy_dofs_destroy, rather than null check
372 * and call if !NULL. If we NULL test, during lazy dof faulting
373 * we can race with the faulting code and proceed from here to
374 * beyond the helpers cleanup. The lazy dof faulting will then
375 * install new helpers which will never be cleaned up, and leak.
376 */
377 dtrace_lazy_dofs_destroy(p);
378
379 /*
380 * Clean up any DTrace helper actions or probes for the process.
381 */
382 if (p->p_dtrace_helpers != NULL) {
383 (*dtrace_helpers_cleanup)(p);
384 }
385
386 /*
387 * Clean up any DTrace probes associated with this process.
388 */
389 /*
390 * APPLE NOTE: We release ptss pages/entries in dtrace_fasttrap_exit_ptr(),
391 * call this after dtrace_helpers_cleanup()
392 */
393 proc_lock(p);
394 if (p->p_dtrace_probes && dtrace_fasttrap_exit_ptr) {
395 (*dtrace_fasttrap_exit_ptr)(p);
396 }
397 proc_unlock(p);
398#endif
399
91447636 400 /* XXX Zombie allocation may fail, in which case stats get lost */
1c79356b
A
401 MALLOC_ZONE(p->p_ru, struct rusage *,
402 sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
403
55e303ae
A
404 /*
405 * need to cancel async IO requests that can be cancelled and wait for those
406 * already active. MAY BLOCK!
407 */
ff6e181a 408
2d21ac55
A
409 proc_refdrain(p);
410
411 workqueue_exit(p);
ff6e181a 412
55e303ae
A
413 _aio_exit( p );
414
1c79356b
A
415 /*
416 * Close open files and release open-file table.
417 * This may block!
418 */
419 fdfree(p);
420
2d21ac55 421#if SYSV_SHM
1c79356b
A
422 /* Close ref SYSV Shared memory*/
423 if (p->vm_shm)
424 shmexit(p);
2d21ac55
A
425#endif
426#if SYSV_SEM
9bccf70c
A
427 /* Release SYSV semaphores */
428 semexit(p);
2d21ac55 429#endif
1c79356b 430
2d21ac55
A
431 sessp = proc_session(p);
432 if (SESS_LEADER(p, sessp)) {
433
434 /* Protected by funnel for tty accesses */
435 fstate = thread_funnel_set(kernel_flock, TRUE);
1c79356b 436
2d21ac55 437 if (sessp->s_ttyvp != NULLVP) {
fa4905b1 438 struct vnode *ttyvp;
2d21ac55 439 int ttyvid;
91447636 440 struct vfs_context context;
2d21ac55
A
441 struct tty * tp;
442
fa4905b1 443
1c79356b
A
444 /*
445 * Controlling process.
446 * Signal foreground pgrp,
447 * drain controlling terminal
448 * and revoke access to controlling terminal.
449 */
2d21ac55
A
450 tp = sessp->s_ttyp;
451
452 if ((tp != TTY_NULL) && (tp->t_session == sessp)) {
453 tty_pgsignal(tp, SIGHUP, 1);
454 (void) ttywait(tp);
1c79356b
A
455 /*
456 * The tty could have been revoked
457 * if we blocked.
458 */
2d21ac55
A
459
460 session_lock(sessp);
461 ttyvp = sessp->s_ttyvp;
462 ttyvid = sessp->s_ttyvid;
463 sessp->s_ttyvp = NULL;
464 sessp->s_ttyvid = 0;
465 sessp->s_ttyp = NULL;
466 sessp->s_ttypgrpid = NO_PID;
467 session_unlock(sessp);
468
469 if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
470 context.vc_thread = proc_thread(p); /* XXX */
471 context.vc_ucred = kauth_cred_proc_ref(p);
472 VNOP_REVOKE(ttyvp, REVOKEALL, &context);
473 vnode_put(ttyvp);
474 kauth_cred_unref(&context.vc_ucred);
475 }
476 } else {
477 session_lock(sessp);
478 ttyvp = sessp->s_ttyvp;
479 sessp->s_ttyvp = NULL;
480 sessp->s_ttyvid = 0;
481 sessp->s_ttyp = NULL;
482 sessp->s_ttypgrpid = NO_PID;
483 session_unlock(sessp);
1c79356b 484 }
2d21ac55 485 if (ttyvp)
91447636 486 vnode_rele(ttyvp);
1c79356b
A
487 /*
488 * s_ttyp is not zero'd; we use this to indicate
489 * that the session once had a controlling terminal.
490 * (for logging and informational purposes)
491 */
492 }
2d21ac55
A
493
494 (void) thread_funnel_set(kernel_flock, fstate);
495 session_lock(sessp);
496 sessp->s_leader = NULL;
497 session_unlock(sessp);
1c79356b 498 }
2d21ac55
A
499 session_rele(sessp);
500
501 pg = proc_pgrp(p);
502 fixjobc(p, pg, 0);
503 pg_rele(pg);
1c79356b 504
1c79356b 505 p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
9bccf70c
A
506 (void)acct_process(p);
507
2d21ac55
A
508 proc_list_lock();
509 /* wait till parentrefs are dropped and grant no more */
510 proc_childdrainstart(p);
511 while ((q = p->p_children.lh_first) != NULL) {
512 q->p_listflag |= P_LIST_DEADPARENT;
513 if (q->p_stat == SZOMB) {
514 if (p != q->p_pptr)
515 panic("parent child linkage broken");
516 /* check for sysctl zomb lookup */
517 while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
518 msleep(&q->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
519 }
520 q->p_listflag |= P_LIST_WAITING;
521 /*
522 * This is a named reference and it is not granted
523 * if the reap is already in progress. So we get
524 * the reference here exclusively and their can be
525 * no waiters. So there is no need for a wakeup
526 * after we are done. AlsO the reap frees the structure
527 * and the proc struct cannot be used for wakeups as well.
528 * It is safe to use q here as this is system reap
529 */
530 (void)reap_child_locked(p, q, 1, 1, 0);
531 } else {
532 proc_reparentlocked(q, initproc, 0, 1);
533 /*
534 * Traced processes are killed
535 * since their existence means someone is messing up.
536 */
537 if (q->p_lflag & P_LTRACED) {
538 proc_list_unlock();
539 proc_lock(q);
540 q->p_lflag &= ~P_LTRACED;
541 if (q->sigwait_thread) {
542 proc_unlock(q);
543 /*
544 * The sigwait_thread could be stopped at a
545 * breakpoint. Wake it up to kill.
546 * Need to do this as it could be a thread which is not
547 * the first thread in the task. So any attempts to kill
548 * the process would result into a deadlock on q->sigwait.
549 */
550 thread_resume((thread_t)q->sigwait_thread);
551 clear_wait(q->sigwait_thread, THREAD_INTERRUPTED);
552 threadsignal((thread_t)q->sigwait_thread, SIGKILL, 0);
553 } else
554 proc_unlock(q);
555 psignal(q, SIGKILL);
556 proc_list_lock();
1c79356b 557 }
1c79356b
A
558 }
559 }
560
2d21ac55
A
561 proc_childdrainend(p);
562 proc_list_unlock();
563
564 /*
565 * Release reference to text vnode
566 */
567 tvp = p->p_textvp;
568 p->p_textvp = NULL;
569 if (tvp != NULLVP) {
570 vnode_rele(tvp);
571 }
572
1c79356b
A
573 /*
574 * Save exit status and final rusage info, adding in child rusage
91447636
A
575 * info and self times. If we were unable to allocate a zombie
576 * structure, this information is lost.
1c79356b 577 */
2d21ac55 578 /* No need for locking here as no one than this thread can access this */
91447636
A
579 if (p->p_ru != NULL) {
580 *p->p_ru = p->p_stats->p_ru;
1c79356b 581
91447636
A
582 timerclear(&p->p_ru->ru_utime);
583 timerclear(&p->p_ru->ru_stime);
1c79356b 584
91447636 585 if (task) {
2d21ac55 586 task_basic_info_32_data_t tinfo;
1c79356b 587 task_thread_times_info_data_t ttimesinfo;
2d21ac55
A
588 task_events_info_data_t teventsinfo;
589 mach_msg_type_number_t task_info_stuff, task_ttimes_stuff;
590 mach_msg_type_number_t task_events_stuff;
1c79356b
A
591 struct timeval ut,st;
592
2d21ac55
A
593 task_info_stuff = TASK_BASIC_INFO_32_COUNT;
594 task_info(task, TASK_BASIC2_INFO_32,
91447636 595 (task_info_t)&tinfo, &task_info_stuff);
1c79356b
A
596 p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds;
597 p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds;
598 p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds;
599 p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds;
600
2d21ac55
A
601 p->p_ru->ru_maxrss = tinfo.resident_size;
602
1c79356b
A
603 task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT;
604 task_info(task, TASK_THREAD_TIMES_INFO,
91447636 605 (task_info_t)&ttimesinfo, &task_ttimes_stuff);
1c79356b
A
606
607 ut.tv_sec = ttimesinfo.user_time.seconds;
608 ut.tv_usec = ttimesinfo.user_time.microseconds;
609 st.tv_sec = ttimesinfo.system_time.seconds;
610 st.tv_usec = ttimesinfo.system_time.microseconds;
611 timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime);
612 timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
2d21ac55
A
613
614 task_events_stuff = TASK_EVENTS_INFO_COUNT;
615 task_info(task, TASK_EVENTS_INFO,
616 (task_info_t)&teventsinfo, &task_events_stuff);
617
618 p->p_ru->ru_minflt = (teventsinfo.faults -
619 teventsinfo.pageins);
620 p->p_ru->ru_majflt = teventsinfo.pageins;
621 p->p_ru->ru_nivcsw = (teventsinfo.csw -
622 p->p_ru->ru_nvcsw);
623 if (p->p_ru->ru_nivcsw < 0)
624 p->p_ru->ru_nivcsw = 0;
91447636 625 }
1c79356b 626
91447636
A
627 ruadd(p->p_ru, &p->p_stats->p_cru);
628 }
1c79356b
A
629
630 /*
631 * Free up profiling buffers.
632 */
633 {
634 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
635
636 p1 = p0->pr_next;
637 p0->pr_next = NULL;
638 p0->pr_scale = 0;
639
640 for (; p1 != NULL; p1 = pn) {
641 pn = p1->pr_next;
91447636 642 kfree(p1, sizeof *p1);
1c79356b
A
643 }
644 }
645
2d21ac55
A
646 proc_spinlock(p);
647 if (thread_call_cancel(p->p_rcall))
648 p->p_ractive--;
649
650 while (p->p_ractive > 0) {
651 proc_spinunlock(p);
652
653 delay(1);
654
655 proc_spinlock(p);
656 }
657 proc_spinunlock(p);
658
659 thread_call_free(p->p_rcall);
660 p->p_rcall = NULL;
661
1c79356b
A
662 /*
663 * Other substructures are freed from wait().
664 */
2d21ac55 665 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
1c79356b
A
666 p->p_stats = NULL;
667
2d21ac55 668 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
1c79356b
A
669 p->p_sigacts = NULL;
670
2d21ac55 671 proc_limitdrop(p, 1);
1c79356b
A
672 p->p_limit = NULL;
673
2d21ac55 674
1c79356b
A
675 /*
676 * Finish up by terminating the task
677 * and halt this thread (only if a
678 * member of the task exiting).
679 */
680 p->task = TASK_NULL;
1c79356b
A
681 set_bsdtask_info(task, NULL);
682
2d21ac55 683 proc_knote(p, NOTE_EXIT);
55e303ae 684
2d21ac55
A
685 /* mark the thread as the one that is doing proc_exit
686 * no need to hold proc lock in uthread_free
687 */
688 uth->uu_flag |= UT_PROCEXIT;
1c79356b
A
689 /*
690 * Notify parent that we're gone.
691 */
2d21ac55
A
692 pp = proc_parent(p);
693 if (pp->p_flag & P_NOCLDWAIT) {
9bccf70c 694
2d21ac55
A
695#if 3839178
696 /*
697 * If the parent is ignoring SIGCHLD, then POSIX requires
698 * us to not add the resource usage to the parent process -
699 * we are only going to hand it off to init to get reaped.
700 * We should contest the standard in this case on the basis
701 * of RLIMIT_CPU.
702 */
703#else /* !3839178 */
55e303ae
A
704 /*
705 * Add child resource usage to parent before giving
91447636
A
706 * zombie to init. If we were unable to allocate a
707 * zombie structure, this information is lost.
55e303ae 708 */
2d21ac55
A
709 if (p->p_ru != NULL) {
710 proc_lock(pp);
711 ruadd(&pp->p_stats->p_cru, p->p_ru);
712 proc_unlock(pp);
713 }
714#endif /* !3839178 */
55e303ae 715
2d21ac55
A
716 /* kernel can reap this one, no need to move it to launchd */
717 proc_list_lock();
718 p->p_listflag |= P_LIST_DEADPARENT;
719 proc_list_unlock();
9bccf70c 720 }
2d21ac55
A
721 if ((p->p_listflag & P_LIST_DEADPARENT) == 0) {
722 if (pp != initproc) {
723 proc_lock(pp);
724 pp->si_pid = p->p_pid;
725 pp->si_status = p->p_xstat;
726 pp->si_code = CLD_EXITED;
727 /*
728 * p_ucred usage is safe as it is an exiting process
729 * and reference is dropped in reap
730 */
731 pp->si_uid = p->p_ucred->cr_ruid;
732 proc_unlock(pp);
733 }
734 /* mark as a zombie */
735 /* No need to take proc lock as all refs are drained and
736 * no one except parent (reaping ) can look at this.
737 * The write is to an int and is coherent. Also parent is
738 * keyed off of list lock for reaping
739 */
740 p->p_stat = SZOMB;
741 /*
742 * The current process can be reaped so, no one
743 * can depend on this
744 */
1c79356b 745
2d21ac55
A
746 psignal(pp, SIGCHLD);
747
748 /* and now wakeup the parent */
749 proc_list_lock();
750 wakeup((caddr_t)pp);
751 proc_list_unlock();
752 } else {
753 /* should be fine as parent proc would be initproc */
754 /* mark as a zombie */
755 /* No need to take proc lock as all refs are drained and
756 * no one except parent (reaping ) can look at this.
757 * The write is to an int and is coherent. Also parent is
758 * keyed off of list lock for reaping
759 */
760 proc_list_lock();
761 p->p_stat = SZOMB;
762 /* check for sysctl zomb lookup */
763 while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
764 msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
765 }
766 /* safe to use p as this is a system reap */
767 p->p_listflag |= P_LIST_WAITING;
768 /*
769 * This is a named reference and it is not granted
770 * if the reap is already in progress. So we get
771 * the reference here exclusively and their can be
772 * no waiters. So there is no need for a wakeup
773 * after we are done. AlsO the reap frees the structure
774 * and the proc struct cannot be used for wakeups as well.
775 * It is safe to use p here as this is system reap
776 */
777 (void)reap_child_locked(pp, p, 1, 1, 1);
778 /* list lock dropped by reap_child_locked */
779 }
91447636 780
2d21ac55 781 proc_rele(pp);
1c79356b 782
1c79356b
A
783}
784
785
91447636 786/*
2d21ac55 787 * reap_child_locked
91447636
A
788 *
789 * Description: Given a process from which all status information needed
790 * has already been extracted, if the process is a ptrace
791 * attach process, detach it and give it back to its real
792 * parent, else recover all resources remaining associated
793 * with it.
794 *
2d21ac55
A
795 * Parameters: proc_t parent Parent of process being reaped
796 * proc_t child Process to reap
91447636
A
797 *
798 * Returns: 0 Process was not reaped because it
799 * came from an attach
800 * 1 Process was reaped
801 */
802static int
2d21ac55 803reap_child_locked(proc_t parent, proc_t child, int deadparent, int locked, int droplock)
1c79356b 804{
2d21ac55 805 proc_t trace_parent; /* Traced parent process, if tracing */
1c79356b 806
91447636
A
807 /*
808 * If we got the child via a ptrace 'attach',
809 * we need to give it back to the old parent.
810 */
2d21ac55
A
811 if (locked == 1)
812 proc_list_unlock();
813 if (child->p_oppid && (trace_parent = proc_find(child->p_oppid))) {
814 proc_lock(child);
91447636 815 child->p_oppid = 0;
2d21ac55 816 proc_unlock(child);
91447636 817 if (trace_parent != initproc) {
2d21ac55
A
818 /*
819 * proc internal fileds and p_ucred usage safe
820 * here as child is dead and is not reaped or
821 * reparented yet
822 */
823 proc_lock(trace_parent);
91447636
A
824 trace_parent->si_pid = child->p_pid;
825 trace_parent->si_status = child->p_xstat;
826 trace_parent->si_code = CLD_CONTINUED;
827 trace_parent->si_uid = child->p_ucred->cr_ruid;
2d21ac55 828 proc_unlock(trace_parent);
91447636 829 }
2d21ac55 830 proc_reparentlocked(child, trace_parent, 1, 0);
91447636 831 psignal(trace_parent, SIGCHLD);
2d21ac55 832 proc_list_lock();
91447636 833 wakeup((caddr_t)trace_parent);
2d21ac55
A
834 child->p_listflag &= ~P_LIST_WAITING;
835 wakeup(&child->p_stat);
836 proc_list_unlock();
837 proc_rele(trace_parent);
838 if ((locked == 1) && (droplock == 0))
839 proc_list_lock();
91447636
A
840 return (0);
841 }
2d21ac55
A
842
843 proc_knote(child, NOTE_REAP);
844
91447636
A
845 child->p_xstat = 0;
846 if (child->p_ru) {
2d21ac55
A
847 proc_lock(parent);
848#if 3839178
849 /*
850 * If the parent is ignoring SIGCHLD, then POSIX requires
851 * us to not add the resource usage to the parent process -
852 * we are only going to hand it off to init to get reaped.
853 * We should contest the standard in this case on the basis
854 * of RLIMIT_CPU.
855 */
856 if (!(parent->p_flag & P_NOCLDWAIT))
857#endif /* 3839178 */
858 ruadd(&parent->p_stats->p_cru, child->p_ru);
859 proc_unlock(parent);
91447636
A
860 FREE_ZONE(child->p_ru, sizeof *child->p_ru, M_ZOMBIE);
861 child->p_ru = NULL;
862 } else {
863 printf("Warning : lost p_ru for %s\n", child->p_comm);
864 }
1c79356b 865
91447636
A
866 /*
867 * Decrement the count of procs running with this uid.
2d21ac55
A
868 * p_ucred usage is safe here as it is an exited process.
869 * and refernce is dropped after these calls down below
870 * (locking protection is provided by list lock held in chgproccnt)
91447636
A
871 */
872 (void)chgproccnt(child->p_ucred->cr_ruid, -1);
1c79356b 873
2d21ac55
A
874#if CONFIG_LCTX
875 ALLLCTX_LOCK;
876 leavelctx(child);
877 ALLLCTX_UNLOCK;
878#endif
879
91447636
A
880 /*
881 * Free up credentials.
882 */
0c530ab8
A
883 if (IS_VALID_CRED(child->p_ucred)) {
884 kauth_cred_unref(&child->p_ucred);
885 }
1c79356b 886
2d21ac55
A
887 /* XXXX Note NOT SAFE TO USE p_ucred from this point onwards */
888
91447636
A
889 /*
890 * Finally finished with old proc entry.
891 * Unlink it from its process group and free it.
892 */
893 leavepgrp(child);
2d21ac55
A
894
895 proc_list_lock();
91447636 896 LIST_REMOVE(child, p_list); /* off zombproc */
2d21ac55 897 parent->p_childrencnt--;
91447636 898 LIST_REMOVE(child, p_sibling);
2d21ac55
A
899 /* If there are no more children wakeup parent */
900 if ((deadparent != 0) && (LIST_EMPTY(&parent->p_children)))
901 wakeup((caddr_t)parent); /* with list lock held */
902 child->p_listflag &= ~P_LIST_WAITING;
b36670ce 903 wakeup(&child->p_stat);
1c79356b 904
2d21ac55
A
905 /* Take it out of process hash */
906 LIST_REMOVE(child, p_hash);
907 child->p_listflag &= ~P_LIST_INHASH;
908 proc_checkdeadrefs(child);
909 nprocs--;
910
911 proc_list_unlock();
912
91447636
A
913 lck_mtx_destroy(&child->p_mlock, proc_lck_grp);
914 lck_mtx_destroy(&child->p_fdmlock, proc_lck_grp);
2d21ac55
A
915#if CONFIG_DTRACE
916 lck_mtx_destroy(&child->p_dtrace_sprlock, proc_lck_grp);
917#endif
918 lck_spin_destroy(&child->p_slock, proc_lck_grp);
919 workqueue_destroy_lock(child);
920
91447636 921 FREE_ZONE(child, sizeof *child, M_PROC);
2d21ac55
A
922 if ((locked == 1) && (droplock == 0))
923 proc_list_lock();
924
91447636 925 return (1);
1c79356b
A
926}
927
1c79356b
A
928
929int
91447636 930wait1continue(int result)
1c79356b 931{
7b1edb79 932 void *vt;
91447636 933 thread_t thread;
7b1edb79 934 int *retval;
2d21ac55 935 proc_t p;
1c79356b 936
7b1edb79
A
937 if (result)
938 return(result);
1c79356b 939
7b1edb79 940 p = current_proc();
91447636
A
941 thread = current_thread();
942 vt = get_bsduthreadarg(thread);
943 retval = get_bsduthreadrval(thread);
2d21ac55 944 return(wait4(p, (struct wait4_args *)vt, retval));
1c79356b
A
945}
946
947int
2d21ac55 948wait4(proc_t q, struct wait4_args *uap, register_t *retval)
1c79356b 949{
2d21ac55
A
950 __pthread_testcancel(1);
951 return(wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval));
952}
953
954int
955wait4_nocancel(proc_t q, struct wait4_nocancel_args *uap, register_t *retval)
956{
957 int nfound;
958 proc_t p;
1c79356b
A
959 int status, error;
960
1c79356b 961 if (uap->pid == 0)
2d21ac55 962 uap->pid = -q->p_pgrpid;
1c79356b
A
963
964loop:
2d21ac55
A
965 proc_list_lock();
966loop1:
1c79356b
A
967 nfound = 0;
968 for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) {
969 if (uap->pid != WAIT_ANY &&
970 p->p_pid != uap->pid &&
2d21ac55 971 p->p_pgrpid != -(uap->pid))
1c79356b 972 continue;
2d21ac55 973
1c79356b 974 nfound++;
91447636
A
975
976 /* XXX This is racy because we don't get the lock!!!! */
977
2d21ac55
A
978 if (p->p_listflag & P_LIST_WAITING) {
979 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
980 goto loop1;
7b1edb79 981 }
2d21ac55
A
982 p->p_listflag |= P_LIST_WAITING; /* only allow single thread to wait() */
983
7b1edb79 984
1c79356b 985 if (p->p_stat == SZOMB) {
2d21ac55
A
986 proc_list_unlock();
987#if CONFIG_MACF
988 if ((error = mac_proc_check_wait(q, p)) != 0)
989 goto out;
990#endif
1c79356b 991 retval[0] = p->p_pid;
1c79356b 992 if (uap->status) {
2d21ac55
A
993 /* Legacy apps expect only 8 bits of status */
994 status = 0xffff & p->p_xstat; /* convert to int */
91447636
A
995 error = copyout((caddr_t)&status,
996 uap->status,
997 sizeof(status));
2d21ac55
A
998 if (error)
999 goto out;
1c79356b 1000 }
91447636
A
1001 if (uap->rusage) {
1002 if (p->p_ru == NULL) {
1003 error = ENOMEM;
1004 } else {
1005 if (IS_64BIT_PROCESS(q)) {
1006 struct user_rusage my_rusage;
1007 munge_rusage(p->p_ru, &my_rusage);
1008 error = copyout((caddr_t)&my_rusage,
1009 uap->rusage,
1010 sizeof (my_rusage));
1011 }
1012 else {
1013 error = copyout((caddr_t)p->p_ru,
1014 uap->rusage,
1015 sizeof (struct rusage));
1016 }
9bccf70c 1017 }
91447636 1018 /* information unavailable? */
2d21ac55
A
1019 if (error)
1020 goto out;
1c79356b
A
1021 }
1022
91447636 1023 /* Clean up */
2d21ac55
A
1024 if (!reap_child_locked(q, p, 0, 0, 0)) {
1025 proc_list_lock();
1026 p->p_listflag &= ~P_LIST_WAITING;
b36670ce 1027 wakeup(&p->p_stat);
2d21ac55 1028 proc_list_unlock();
b36670ce 1029 }
91447636 1030
1c79356b
A
1031 return (0);
1032 }
2d21ac55
A
1033 if (p->p_stat == SSTOP && (p->p_lflag & P_LWAITED) == 0 &&
1034 (p->p_lflag & P_LTRACED || uap->options & WUNTRACED)) {
1035 proc_list_unlock();
1036#if CONFIG_MACF
1037 if ((error = mac_proc_check_wait(q, p)) != 0)
1038 goto out;
1039#endif
1040 proc_lock(p);
1041 p->p_lflag |= P_LWAITED;
1042 proc_unlock(p);
1c79356b 1043 retval[0] = p->p_pid;
1c79356b
A
1044 if (uap->status) {
1045 status = W_STOPCODE(p->p_xstat);
1046 error = copyout((caddr_t)&status,
91447636 1047 uap->status,
1c79356b
A
1048 sizeof(status));
1049 } else
1050 error = 0;
2d21ac55
A
1051 goto out;
1052 }
1053 /*
1054 * If we are waiting for continued processses, and this
1055 * process was continued
1056 */
1057 if ((uap->options & WCONTINUED) &&
1058 (p->p_flag & P_CONTINUED)) {
1059 proc_list_unlock();
1060#if CONFIG_MACF
1061 if ((error = mac_proc_check_wait(q, p)) != 0)
1062 goto out;
1063#endif
1064
1065 /* Prevent other process for waiting for this event */
1066 OSBitAndAtomic(~((uint32_t)P_CONTINUED), (UInt32 *)&p->p_flag);
1067 retval[0] = p->p_pid;
1068 if (uap->status) {
1069 status = W_STOPCODE(SIGCONT);
1070 error = copyout((caddr_t)&status,
1071 uap->status,
1072 sizeof(status));
1073 } else
1074 error = 0;
1075 goto out;
1c79356b 1076 }
2d21ac55 1077 p->p_listflag &= ~P_LIST_WAITING;
7b1edb79 1078 wakeup(&p->p_stat);
1c79356b 1079 }
2d21ac55
A
1080 /* list lock is held when we get here any which way */
1081 if (nfound == 0) {
1082 proc_list_unlock();
1c79356b 1083 return (ECHILD);
2d21ac55 1084 }
7b1edb79 1085
1c79356b
A
1086 if (uap->options & WNOHANG) {
1087 retval[0] = 0;
2d21ac55 1088 proc_list_unlock();
1c79356b
A
1089 return (0);
1090 }
1091
2d21ac55 1092 if ((error = msleep0((caddr_t)q, proc_list_mlock, PWAIT | PCATCH | PDROP, "wait", 0, wait1continue)))
91447636
A
1093 return (error);
1094
1095 goto loop;
2d21ac55
A
1096out:
1097 proc_list_lock();
1098 p->p_listflag &= ~P_LIST_WAITING;
1099 wakeup(&p->p_stat);
1100 proc_list_unlock();
1101 return (error);
91447636
A
1102}
1103
1104
1105int
1106waitidcontinue(int result)
1107{
1108 void *vt;
1109 thread_t thread;
1110 int *retval;
91447636
A
1111
1112 if (result)
1113 return(result);
1114
91447636
A
1115 thread = current_thread();
1116 vt = get_bsduthreadarg(thread);
1117 retval = get_bsduthreadrval(thread);
2d21ac55 1118 return(waitid(current_proc(), (struct waitid_args *)vt, retval));
91447636
A
1119}
1120
1121/*
1122 * Description: Suspend the calling thread until one child of the process
1123 * containing the calling thread changes state.
1124 *
1125 * Parameters: uap->idtype one of P_PID, P_PGID, P_ALL
1126 * uap->id pid_t or gid_t or ignored
1127 * uap->infop Address of signinfo_t struct in
1128 * user space into which to return status
1129 * uap->options flag values
1130 *
1131 * Returns: 0 Success
1132 * !0 Error returning status to user space
1133 */
1134int
2d21ac55
A
1135waitid(proc_t q, struct waitid_args *uap, register_t *retval)
1136{
1137 __pthread_testcancel(1);
1138 return(waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval));
1139}
1140
1141int
1142waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap, __unused register_t *retval)
91447636
A
1143{
1144 user_siginfo_t collect64; /* siginfo data to return to caller */
1145
2d21ac55
A
1146 int nfound;
1147 proc_t p;
91447636
A
1148 int error;
1149
2d21ac55
A
1150 /*
1151 * Forced validation of options for T.waitpid 21; should be a TSD!
1152 * This will pass the test, but note that we have more bits than the
1153 * standard specifies that we will allow in, in this case. The test
1154 * passes because they light all the bits, not just the ones we allow,
1155 * and so the following check returns EINVAL like the test wants.
1156 */
1157 if (((uap->options & (WNOHANG|WNOWAIT|WCONTINUED|WUNTRACED|WSTOPPED|WEXITED)) != uap->options) ||
1158 (uap->options == 0))
1159 return (EINVAL); /* bits set that aren't recognized */
1160
1161 /*
1162 * Overly critical options checking, per POSIX
1163 */
1164 switch(uap->idtype) {
1165 case P_PID: /* child with process ID equal to... */
1166 case P_PGID: /* child with process group ID equal to... */
1167 if (((int)uap->id) < 0)
1168 return (EINVAL);
1169 break;
1170 case P_ALL: /* any child */
1171 break;
1172 }
1173
91447636 1174loop:
2d21ac55
A
1175 proc_list_lock();
1176loop1:
91447636
A
1177 nfound = 0;
1178 for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) {
1179 switch(uap->idtype) {
1180 case P_PID: /* child with process ID equal to... */
1181 if (p->p_pid != (pid_t)uap->id)
1182 continue;
1183 break;
1184 case P_PGID: /* child with process group ID equal to... */
2d21ac55 1185 if (p->p_pgrpid != (pid_t)uap->id)
91447636
A
1186 continue;
1187 break;
1188 case P_ALL: /* any child */
1189 break;
1190 }
1191
1192 /* XXX This is racy because we don't get the lock!!!! */
1193
1194 /*
1195 * Wait collision; go to sleep and restart; used to maintain
1196 * the single return for waited process guarantee.
1197 */
2d21ac55
A
1198 if (p->p_listflag & P_LIST_WAITING) {
1199 (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitidcoll", 0);
1200 goto loop1;
91447636 1201 }
2d21ac55 1202 p->p_listflag |= P_LIST_WAITING; /* mark busy */
91447636
A
1203
1204 nfound++;
1205
1206 /*
1207 * Types of processes we are interested in
1208 *
1209 * XXX Don't know what to do for WCONTINUED?!?
1210 */
1211 switch(p->p_stat) {
1212 case SZOMB: /* Exited */
1213 if (!(uap->options & WEXITED))
1214 break;
1215
2d21ac55
A
1216 /* drop the lock and the thread is going to return */
1217 proc_list_unlock();
1218
91447636 1219 /* Collect "siginfo" information for caller */
2d21ac55 1220 collect64.si_signo = SIGCHLD;
91447636
A
1221 collect64.si_code = 0;
1222 collect64.si_errno = 0;
1223 collect64.si_pid = 0;
1224 collect64.si_uid = 0;
1225 collect64.si_addr = 0;
2d21ac55 1226 collect64.si_status = WEXITSTATUS(p->p_xstat);
91447636
A
1227 collect64.si_band = 0;
1228
1229 if (IS_64BIT_PROCESS(p)) {
1230 error = copyout((caddr_t)&collect64,
1231 uap->infop,
1232 sizeof(collect64));
1233 } else {
1234 siginfo_t collect;
1235 siginfo_64to32(&collect64,&collect);
1236 error = copyout((caddr_t)&collect,
1237 uap->infop,
1238 sizeof(collect));
1239 }
1240 /* information unavailable? */
2d21ac55
A
1241 if (error)
1242 goto out;
91447636
A
1243
1244 /* Prevent other process for waiting for this event? */
1245 if (!(uap->options & WNOWAIT)) {
1246 /* Clean up */
2d21ac55
A
1247 if (!reap_child_locked(q, p, 0, 0, 0)) {
1248 proc_list_lock();
1249 p->p_listflag &= ~P_LIST_WAITING;
b36670ce 1250 wakeup(&p->p_stat);
2d21ac55 1251 proc_list_unlock();
b36670ce 1252 }
2d21ac55
A
1253 } else {
1254 proc_list_lock();
1255 p->p_listflag &= ~P_LIST_WAITING;
1256 proc_list_unlock();
91447636
A
1257 }
1258
1259 return (0);
1260
1261 case SSTOP: /* Stopped */
1262 /*
1263 * If we are not interested in stopped processes, then
1264 * ignore this one.
1265 */
1266 if (!(uap->options & WSTOPPED))
1267 break;
1268
1269 /*
1270 * If someone has already waited it, we lost a race
1271 * to be the one to return status.
1272 */
2d21ac55 1273 if ((p->p_lflag & P_LWAITED) != 0)
91447636
A
1274 break;
1275
2d21ac55
A
1276 /* drop the lock and the thread is going to return */
1277 proc_list_unlock();
91447636
A
1278
1279 /* Collect "siginfo" information for caller */
2d21ac55 1280 collect64.si_signo = SIGCHLD;
91447636
A
1281 collect64.si_code = 0;
1282 collect64.si_errno = 0;
1283 collect64.si_pid = 0;
1284 collect64.si_uid = 0;
1285 collect64.si_addr = 0;
2d21ac55 1286 proc_lock(p);
91447636 1287 collect64.si_status = p->p_xstat;
2d21ac55 1288 proc_unlock(p);
91447636
A
1289 collect64.si_band = 0;
1290
1291 if (IS_64BIT_PROCESS(p)) {
1292 error = copyout((caddr_t)&collect64,
1293 uap->infop,
1294 sizeof(collect64));
1295 } else {
1296 siginfo_t collect;
1297 siginfo_64to32(&collect64,&collect);
1298 error = copyout((caddr_t)&collect,
1299 uap->infop,
1300 sizeof(collect));
1301 }
1302 /* information unavailable? */
2d21ac55
A
1303 if (error)
1304 goto out;
91447636
A
1305
1306 /* Prevent other process for waiting for this event? */
1307 if (!(uap->options & WNOWAIT)) {
2d21ac55
A
1308 proc_lock(p);
1309 p->p_lflag |= P_LWAITED;
1310 proc_unlock(p);
91447636
A
1311 }
1312
2d21ac55
A
1313 error = 0;
1314 goto out;
91447636
A
1315
1316 default: /* All others */
1317 /* ...meaning Continued */
1318 if (!(uap->options & WCONTINUED))
1319 break;
1320
1321 /*
1322 * If the flag isn't set, then this process has not
1323 * been stopped and continued, or the status has
1324 * already been reaped by another caller of waitid().
1325 */
1326 if ((p->p_flag & P_CONTINUED) == 0)
1327 break;
1328
2d21ac55
A
1329 /* drop the lock and the thread is going to return */
1330 proc_list_unlock();
1331
91447636 1332 /* Collect "siginfo" information for caller */
2d21ac55
A
1333 proc_lock(p);
1334 collect64.si_signo = SIGCHLD;
1335 collect64.si_code = CLD_CONTINUED;
91447636 1336 collect64.si_errno = 0;
2d21ac55 1337 collect64.si_pid = p->p_contproc;
91447636
A
1338 collect64.si_uid = 0;
1339 collect64.si_addr = 0;
1340 collect64.si_status = p->p_xstat;
1341 collect64.si_band = 0;
2d21ac55 1342 proc_unlock(p);
91447636
A
1343
1344 if (IS_64BIT_PROCESS(p)) {
1345 error = copyout((caddr_t)&collect64,
1346 uap->infop,
1347 sizeof(collect64));
1348 } else {
1349 siginfo_t collect;
1350 siginfo_64to32(&collect64,&collect);
1351 error = copyout((caddr_t)&collect,
1352 uap->infop,
1353 sizeof(collect));
1354 }
1355 /* information unavailable? */
2d21ac55
A
1356 if (error)
1357 goto out;
91447636
A
1358
1359 /* Prevent other process for waiting for this event? */
1360 if (!(uap->options & WNOWAIT)) {
2d21ac55 1361 OSBitAndAtomic(~((uint32_t)P_CONTINUED), (UInt32 *)&p->p_flag);
91447636
A
1362 }
1363
2d21ac55
A
1364 error = 0;
1365 goto out;
91447636 1366 }
2d21ac55 1367 /* LIST LOCK IS HELD HERE */
91447636 1368 /* Not a process we are interested in; go on to next child */
2d21ac55
A
1369
1370 p->p_listflag &= ~P_LIST_WAITING;
91447636
A
1371 wakeup(&p->p_stat);
1372 }
1373
2d21ac55 1374 /* list lock is always held */
91447636 1375 /* No child processes that could possibly satisfy the request? */
2d21ac55
A
1376 if (nfound == 0) {
1377 proc_list_unlock();
91447636 1378 return (ECHILD);
2d21ac55 1379 }
91447636
A
1380
1381 if (uap->options & WNOHANG) {
2d21ac55 1382 proc_list_unlock();
91447636
A
1383 return (0);
1384 }
1385
2d21ac55 1386 if ((error = msleep0((caddr_t)q, proc_list_mlock, PWAIT | PCATCH | PDROP, "waitid", 0, waitidcontinue)))
1c79356b 1387 return (error);
7b1edb79 1388
1c79356b 1389 goto loop;
2d21ac55
A
1390out:
1391 proc_list_lock();
1392 p->p_listflag &= ~P_LIST_WAITING;
1393 wakeup(&p->p_stat);
1394 proc_list_unlock();
1395 return (error);
1c79356b
A
1396}
1397
1398/*
1399 * make process 'parent' the new parent of process 'child'.
1400 */
1401void
2d21ac55 1402proc_reparentlocked(proc_t child, proc_t parent, int cansignal, int locked)
1c79356b 1403{
2d21ac55 1404 proc_t oldparent = PROC_NULL;
1c79356b
A
1405
1406 if (child->p_pptr == parent)
1407 return;
1408
2d21ac55
A
1409 if (locked == 0)
1410 proc_list_lock();
1411
1412 oldparent = child->p_pptr;
1413#if __PROC_INTERNAL_DEBUG
1414 if (oldparent == PROC_NULL)
1415 panic("proc_reparent: process %x does not have a parent\n", (unsigned int)child);
1416#endif
1417
1c79356b 1418 LIST_REMOVE(child, p_sibling);
2d21ac55
A
1419#if __PROC_INTERNAL_DEBUG
1420 if (oldparent->p_childrencnt == 0)
1421 panic("process children count already 0\n");
1422#endif
1423 oldparent->p_childrencnt--;
1424#if __PROC_INTERNAL_DEBUG1
1425 if (oldparent->p_childrencnt < 0)
1426 panic("process children count -ve\n");
1427#endif
1c79356b 1428 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
2d21ac55 1429 parent->p_childrencnt++;
1c79356b 1430 child->p_pptr = parent;
2d21ac55
A
1431 child->p_ppid = parent->p_pid;
1432
1433 proc_list_unlock();
91447636 1434
2d21ac55 1435 if ((cansignal != 0) && (initproc == parent) && (child->p_stat == SZOMB))
91447636 1436 psignal(initproc, SIGCHLD);
2d21ac55
A
1437 if (locked == 1)
1438 proc_list_lock();
1c79356b
A
1439}
1440
1c79356b
A
1441/*
1442 * Make the current process an "init" process, meaning
1443 * that it doesn't have a parent, and that it won't be
1444 * gunned down by kill(-1, 0).
1445 */
7b1edb79 1446kern_return_t
91447636 1447init_process(__unused struct init_process_args *args)
1c79356b 1448{
2d21ac55 1449 proc_t p = current_proc();
1c79356b 1450
e5568f75 1451 AUDIT_MACH_SYSCALL_ENTER(AUE_INITPROCESS);
91447636 1452 if (suser(kauth_cred_get(), &p->p_acflag)) {
e5568f75 1453 AUDIT_MACH_SYSCALL_EXIT(KERN_NO_ACCESS);
1c79356b 1454 return(KERN_NO_ACCESS);
e5568f75 1455 }
1c79356b 1456
2d21ac55 1457 if (p->p_pid != 1 && p->p_pgrpid != p->p_pid)
1c79356b 1458 enterpgrp(p, p->p_pid, 0);
2d21ac55 1459 OSBitOrAtomic(P_SYSTEM, (UInt32 *)&p->p_flag);
1c79356b
A
1460
1461 /*
1462 * Take us out of the sibling chain, and
1463 * out of our parent's child chain.
1464 */
2d21ac55 1465 proc_list_lock();
1c79356b
A
1466 LIST_REMOVE(p, p_sibling);
1467 p->p_sibling.le_prev = NULL;
1468 p->p_sibling.le_next = NULL;
1469 p->p_pptr = kernproc;
2d21ac55
A
1470 p->p_ppid = 0;
1471 proc_list_unlock();
1472
1c79356b 1473
e5568f75 1474 AUDIT_MACH_SYSCALL_EXIT(KERN_SUCCESS);
1c79356b
A
1475 return(KERN_SUCCESS);
1476}
1477
7b1edb79 1478
0b4e3aa0
A
1479/*
1480 * Exit: deallocate address space and other resources, change proc state
1481 * to zombie, and unlink proc from allproc and parent's lists. Save exit
1482 * status and rusage for wait(). Check for child processes and orphan them.
1483 */
1484
91447636 1485void
2d21ac55
A
1486vfork_exit(proc_t p, int rv)
1487{
1488 vfork_exit_internal(p, rv, 0);
1489}
1490
1491void
1492vfork_exit_internal(proc_t p, int rv, int forceexit)
0b4e3aa0 1493{
91447636
A
1494 thread_t self = current_thread();
1495#ifdef FIXME
0b4e3aa0 1496 struct task *task = p->task;
91447636 1497#endif
0b4e3aa0
A
1498 struct uthread *ut;
1499
91447636
A
1500 /*
1501 * If a thread in this task has already
1502 * called exit(), then halt any others
1503 * right here.
1504 */
1505
1506 ut = get_bsdthread_info(self);
91447636 1507
55e303ae 1508
2d21ac55
A
1509 proc_lock(p);
1510 if ((p->p_lflag & P_LPEXIT) == P_LPEXIT) {
1511 /*
1512 * This happens when a parent exits/killed and vfork is in progress
1513 * other threads. But shutdown code for ex has already called exit1()
1514 */
1515 proc_unlock(p);
1516 return;
1517 }
1518 p->p_lflag |= (P_LEXIT | P_LPEXIT);
1519 proc_unlock(p);
1520
1521 if (forceexit == 0) {
1522 /*
1523 * parent of a vfork child has already called exit() and the
1524 * thread that has vfork in proress terminates. So there is no
1525 * separate address space here and it has already been marked for
1526 * termination. This was never covered before and could cause problems
1527 * if we block here for outside code.
1528 */
1529 /* Notify the perf server */
1530 (void)sys_perf_notify(self, p->p_pid);
1531 }
55e303ae 1532
0b4e3aa0
A
1533 /*
1534 * Remove proc from allproc queue and from pidhash chain.
1535 * Need to do this before we do anything that can block.
1536 * Not doing causes things like mount() find this on allproc
1537 * in partially cleaned state.
1538 */
2d21ac55
A
1539
1540 proc_list_lock();
1541
0b4e3aa0 1542 LIST_REMOVE(p, p_list);
55e303ae 1543 LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
2d21ac55
A
1544 /* will not be visible via proc_find */
1545 p->p_listflag |= P_LIST_EXITED;
0b4e3aa0 1546
2d21ac55 1547 proc_list_unlock();
0b4e3aa0 1548
2d21ac55 1549 proc_lock(p);
0b4e3aa0 1550 p->p_xstat = rv;
2d21ac55
A
1551 p->p_lflag &= ~(P_LTRACED | P_LPPWAIT);
1552 p->p_sigignore = ~0;
1553 proc_unlock(p);
1554
1555 proc_spinlock(p);
1556 if (thread_call_cancel(p->p_rcall))
1557 p->p_ractive--;
1558
1559 while (p->p_ractive > 0) {
1560 proc_spinunlock(p);
1561
1562 delay(1);
1563
1564 proc_spinlock(p);
1565 }
1566 proc_spinunlock(p);
1567
1568 thread_call_free(p->p_rcall);
1569 p->p_rcall = NULL;
1570
1571 ut->uu_siglist = 0;
0b4e3aa0 1572
55e303ae 1573 vproc_exit(p);
0b4e3aa0
A
1574}
1575
0b4e3aa0 1576void
2d21ac55 1577vproc_exit(proc_t p)
0b4e3aa0 1578{
2d21ac55
A
1579 proc_t q;
1580 proc_t pp;
1581
1582 vnode_t tvp;
91447636 1583#ifdef FIXME
0b4e3aa0 1584 struct task *task = p->task;
91447636 1585#endif
2d21ac55
A
1586 struct pgrp * pg;
1587 struct session *sessp;
1588 boolean_t fstate;
0b4e3aa0 1589
91447636 1590 /* XXX Zombie allocation may fail, in which case stats get lost */
0b4e3aa0
A
1591 MALLOC_ZONE(p->p_ru, struct rusage *,
1592 sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
1593
2d21ac55
A
1594
1595 proc_refdrain(p);
1596
0b4e3aa0
A
1597 /*
1598 * Close open files and release open-file table.
1599 * This may block!
1600 */
1601 fdfree(p);
1602
2d21ac55
A
1603 sessp = proc_session(p);
1604 if (SESS_LEADER(p, sessp)) {
1605
1606 /* Protected by funnel for tty accesses */
1607 fstate = thread_funnel_set(kernel_flock, TRUE);
0b4e3aa0 1608
2d21ac55 1609 if (sessp->s_ttyvp != NULLVP) {
fa4905b1 1610 struct vnode *ttyvp;
2d21ac55 1611 int ttyvid;
91447636 1612 struct vfs_context context;
2d21ac55 1613 struct tty * tp;
fa4905b1 1614
0b4e3aa0
A
1615 /*
1616 * Controlling process.
1617 * Signal foreground pgrp,
1618 * drain controlling terminal
1619 * and revoke access to controlling terminal.
1620 */
2d21ac55
A
1621 tp = sessp->s_ttyp;
1622
1623 if ((tp != TTY_NULL) && (tp->t_session == sessp)) {
1624 tty_pgsignal(tp, SIGHUP, 1);
1625 (void) ttywait(tp);
0b4e3aa0
A
1626 /*
1627 * The tty could have been revoked
1628 * if we blocked.
1629 */
2d21ac55
A
1630
1631 session_lock(sessp);
1632 ttyvp = sessp->s_ttyvp;
1633 ttyvid = sessp->s_ttyvid;
1634 sessp->s_ttyvp = NULL;
1635 sessp->s_ttyvid = 0;
1636 sessp->s_ttyp = NULL;
1637 sessp->s_ttypgrpid = NO_PID;
1638 session_unlock(sessp);
1639
1640 if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
1641 context.vc_thread = proc_thread(p); /* XXX */
1642 context.vc_ucred = kauth_cred_proc_ref(p);
1643 VNOP_REVOKE(ttyvp, REVOKEALL, &context);
1644 vnode_put(ttyvp);
1645 kauth_cred_unref(&context.vc_ucred);
1646 }
1647 } else {
1648 session_lock(sessp);
1649 ttyvp = sessp->s_ttyvp;
1650 sessp->s_ttyvp = NULL;
1651 sessp->s_ttyvid = 0;
1652 sessp->s_ttyp = NULL;
1653 sessp->s_ttypgrpid = NO_PID;
1654 session_unlock(sessp);
0b4e3aa0 1655 }
2d21ac55 1656 if (ttyvp)
91447636 1657 vnode_rele(ttyvp);
0b4e3aa0
A
1658 /*
1659 * s_ttyp is not zero'd; we use this to indicate
1660 * that the session once had a controlling terminal.
1661 * (for logging and informational purposes)
1662 */
1663 }
2d21ac55
A
1664 (void) thread_funnel_set(kernel_flock, fstate);
1665
1666 session_lock(sessp);
1667 sessp->s_leader = NULL;
1668 session_unlock(sessp);
0b4e3aa0 1669 }
2d21ac55 1670 session_rele(sessp);
0b4e3aa0 1671
2d21ac55
A
1672 pg = proc_pgrp(p);
1673 fixjobc(p, pg, 0);
1674 pg_rele(pg);
9bccf70c 1675
2d21ac55 1676 p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
0b4e3aa0 1677
2d21ac55
A
1678 proc_list_lock();
1679 proc_childdrainstart(p);
1680 while ((q = p->p_children.lh_first) != NULL) {
1681 q->p_listflag |= P_LIST_DEADPARENT;
1682 if (q->p_stat == SZOMB) {
1683 if (p != q->p_pptr)
1684 panic("parent child linkage broken");
1685 /* check for lookups by zomb sysctl */
1686 while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
1687 msleep(&q->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
1688 }
1689 q->p_listflag |= P_LIST_WAITING;
1690 /*
1691 * This is a named reference and it is not granted
1692 * if the reap is already in progress. So we get
1693 * the reference here exclusively and their can be
1694 * no waiters. So there is no need for a wakeup
1695 * after we are done. AlsO the reap frees the structure
1696 * and the proc struct cannot be used for wakeups as well.
1697 * It is safe to use q here as this is system reap
1698 */
1699 (void)reap_child_locked(p, q, 1, 1, 0);
1700 } else {
1701 proc_reparentlocked(q, initproc, 0, 1);
1702 /*
1703 * Traced processes are killed
1704 * since their existence means someone is messing up.
1705 */
1706 if (q->p_lflag & P_LTRACED) {
1707 proc_list_unlock();
1708 proc_lock(q);
1709 q->p_lflag &= ~P_LTRACED;
1710 if (q->sigwait_thread) {
1711 proc_unlock(q);
1712 /*
1713 * The sigwait_thread could be stopped at a
1714 * breakpoint. Wake it up to kill.
1715 * Need to do this as it could be a thread which is not
1716 * the first thread in the task. So any attempts to kill
1717 * the process would result into a deadlock on q->sigwait.
1718 */
1719 thread_resume((thread_t)q->sigwait_thread);
1720 clear_wait(q->sigwait_thread, THREAD_INTERRUPTED);
1721 threadsignal((thread_t)q->sigwait_thread, SIGKILL, 0);
1722 } else
1723 proc_unlock(q);
1724
1725 psignal(q, SIGKILL);
1726 proc_list_lock();
0b4e3aa0 1727 }
0b4e3aa0
A
1728 }
1729 }
1730
2d21ac55
A
1731 proc_childdrainend(p);
1732 proc_list_unlock();
1733
1734 /*
1735 * Release reference to text vnode
1736 */
1737 tvp = p->p_textvp;
1738 p->p_textvp = NULL;
1739 if (tvp != NULLVP) {
1740 vnode_rele(tvp);
1741 }
1742
0b4e3aa0
A
1743 /*
1744 * Save exit status and final rusage info, adding in child rusage
91447636
A
1745 * info and self times. If we were unable to allocate a zombie
1746 * structure, this information is lost.
0b4e3aa0 1747 */
2d21ac55 1748 /* No need for locking here as no one than this thread can access this */
91447636
A
1749 if (p->p_ru != NULL) {
1750 *p->p_ru = p->p_stats->p_ru;
1751 timerclear(&p->p_ru->ru_utime);
1752 timerclear(&p->p_ru->ru_stime);
0b4e3aa0
A
1753
1754#ifdef FIXME
91447636 1755 if (task) {
0b4e3aa0
A
1756 task_basic_info_data_t tinfo;
1757 task_thread_times_info_data_t ttimesinfo;
1758 int task_info_stuff, task_ttimes_stuff;
1759 struct timeval ut,st;
1760
1761 task_info_stuff = TASK_BASIC_INFO_COUNT;
1762 task_info(task, TASK_BASIC_INFO,
1763 &tinfo, &task_info_stuff);
1764 p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds;
1765 p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds;
1766 p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds;
1767 p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds;
1768
1769 task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT;
1770 task_info(task, TASK_THREAD_TIMES_INFO,
1771 &ttimesinfo, &task_ttimes_stuff);
1772
1773 ut.tv_sec = ttimesinfo.user_time.seconds;
1774 ut.tv_usec = ttimesinfo.user_time.microseconds;
1775 st.tv_sec = ttimesinfo.system_time.seconds;
1776 st.tv_usec = ttimesinfo.system_time.microseconds;
1777 timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime);
91447636
A
1778 timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
1779 }
0b4e3aa0
A
1780#endif /* FIXME */
1781
91447636
A
1782 ruadd(p->p_ru, &p->p_stats->p_cru);
1783 }
0b4e3aa0
A
1784
1785 /*
1786 * Free up profiling buffers.
1787 */
1788 {
1789 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
1790
1791 p1 = p0->pr_next;
1792 p0->pr_next = NULL;
1793 p0->pr_scale = 0;
1794
1795 for (; p1 != NULL; p1 = pn) {
1796 pn = p1->pr_next;
91447636 1797 kfree(p1, sizeof *p1);
0b4e3aa0
A
1798 }
1799 }
1800
1801 /*
1802 * Other substructures are freed from wait().
1803 */
2d21ac55 1804 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
0b4e3aa0
A
1805 p->p_stats = NULL;
1806
2d21ac55 1807 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
0b4e3aa0
A
1808 p->p_sigacts = NULL;
1809
2d21ac55 1810 proc_limitdrop(p, 1);
0b4e3aa0
A
1811 p->p_limit = NULL;
1812
1813 /*
1814 * Finish up by terminating the task
1815 * and halt this thread (only if a
1816 * member of the task exiting).
1817 */
1818 p->task = TASK_NULL;
1819
1820 /*
1821 * Notify parent that we're gone.
1822 */
2d21ac55
A
1823 pp = proc_parent(p);
1824 if ((p->p_listflag & P_LIST_DEADPARENT) == 0) {
1825 if (pp != initproc) {
1826 proc_lock(pp);
1827 pp->si_pid = p->p_pid;
1828 pp->si_status = p->p_xstat;
1829 pp->si_code = CLD_EXITED;
1830 /*
1831 * p_ucred usage is safe as it is an exiting process
1832 * and reference is dropped in reap
1833 */
1834 pp->si_uid = p->p_ucred->cr_ruid;
1835 proc_unlock(pp);
1836 }
1837 /* mark as a zombie */
1838 /* mark as a zombie */
1839 /* No need to take proc lock as all refs are drained and
1840 * no one except parent (reaping ) can look at this.
1841 * The write is to an int and is coherent. Also parent is
1842 * keyed off of list lock for reaping
1843 */
1844 p->p_stat = SZOMB;
0b4e3aa0 1845
2d21ac55 1846 psignal(pp, SIGCHLD);
91447636 1847
2d21ac55
A
1848 /* and now wakeup the parent */
1849 proc_list_lock();
1850 wakeup((caddr_t)pp);
1851 proc_list_unlock();
1852 } else {
1853 proc_list_lock();
1854 p->p_stat = SZOMB;
1855 /* check for lookups by zomb sysctl */
1856 while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
1857 msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
1858 }
1859 p->p_listflag |= P_LIST_WAITING;
1860 /*
1861 * This is a named reference and it is not granted
1862 * if the reap is already in progress. So we get
1863 * the reference here exclusively and their can be
1864 * no waiters. So there is no need for a wakeup
1865 * after we are done. AlsO the reap frees the structure
1866 * and the proc struct cannot be used for wakeups as well.
1867 * It is safe to use p here as this is system reap
1868 */
1869 (void)reap_child_locked(pp, p, 0, 1, 1);
1870 /* list lock dropped by reap_child_locked */
1871 }
1872 proc_rele(pp);
0b4e3aa0 1873}
91447636
A
1874
1875
1876/*
1877 * munge_rusage
1878 * LP64 support - long is 64 bits if we are dealing with a 64 bit user
1879 * process. We munge the kernel (32 bit) version of rusage into the
1880 * 64 bit version.
1881 */
1882__private_extern__ void
1883munge_rusage(struct rusage *a_rusage_p, struct user_rusage *a_user_rusage_p)
1884{
1885 /* timeval changes size, so utime and stime need special handling */
1886 a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
1887 a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
1888 a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
1889 a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
1890 /*
1891 * everything else can be a direct assign, since there is no loss
1892 * of precision implied boing 32->64.
1893 */
1894 a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
1895 a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
1896 a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
1897 a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
1898 a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
1899 a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
1900 a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
1901 a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
1902 a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
1903 a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
1904 a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
1905 a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
1906 a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
1907 a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
1908}