]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_exit.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / bsd / kern / kern_exit.c
CommitLineData
1c79356b 1/*
5d5c5d0d
A
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
8ad349bb 4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
1c79356b 5 *
8ad349bb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
1c79356b
A
29 */
30/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
31/*
32 * Copyright (c) 1982, 1986, 1989, 1991, 1993
33 * The Regents of the University of California. All rights reserved.
34 * (c) UNIX System Laboratories, Inc.
35 * All or some portions of this file are derived from material licensed
36 * to the University of California by American Telephone and Telegraph
37 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
38 * the permission of UNIX System Laboratories, Inc.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
69 */
70
71#include <machine/reg.h>
72#include <machine/psl.h>
73
74#include "compat_43.h"
75
76#include <sys/param.h>
77#include <sys/systm.h>
78#include <sys/ioctl.h>
91447636
A
79#include <sys/proc_internal.h>
80#include <sys/kauth.h>
1c79356b
A
81#include <sys/tty.h>
82#include <sys/time.h>
83#include <sys/resource.h>
84#include <sys/kernel.h>
1c79356b 85#include <sys/wait.h>
91447636
A
86#include <sys/file_internal.h>
87#include <sys/vnode_internal.h>
1c79356b
A
88#include <sys/syslog.h>
89#include <sys/malloc.h>
90#include <sys/resourcevar.h>
91#include <sys/ptrace.h>
92#include <sys/user.h>
55e303ae 93#include <sys/aio_kern.h>
91447636
A
94#include <sys/sysproto.h>
95#include <sys/signalvar.h>
96#include <sys/filedesc.h> /* fdfree */
97#include <sys/shm_internal.h> /* shmexit */
98#include <sys/acct.h> /* acct_process */
99#include <machine/spl.h>
e5568f75
A
100
101#include <bsm/audit_kernel.h>
102#include <bsm/audit_kevents.h>
1c79356b
A
103
104#include <mach/mach_types.h>
91447636
A
105
106#include <kern/kern_types.h>
107#include <kern/kalloc.h>
108#include <kern/task.h>
1c79356b 109#include <kern/thread.h>
9bccf70c 110#include <kern/sched_prim.h>
1c79356b 111#include <kern/assert.h>
9bccf70c
A
112#if KTRACE
113#include <sys/ktrace.h>
114#endif
1c79356b 115
91447636
A
116#include <mach/mach_types.h>
117#include <mach/task.h>
118#include <mach/thread_act.h>
119#include <mach/mach_traps.h> /* init_process */
120
1c79356b 121extern char init_task_failure_data[];
91447636 122int exit1(struct proc *, int, int *);
55e303ae 123void proc_prepareexit(struct proc *p);
91447636 124void vfork_exit(struct proc *p, int rv);
55e303ae 125void vproc_exit(struct proc *p);
91447636
A
126__private_extern__ void munge_rusage(struct rusage *a_rusage_p, struct user_rusage *a_user_rusage_p);
127
128/*
129 * Things which should have prototypes in headers, but don't
130 */
131void unix_syscall_return(int);
132void *get_bsduthreadarg(thread_t);
133void proc_exit(struct proc *p);
134int wait1continue(int result);
135int waitidcontinue(int result);
136int *get_bsduthreadrval(thread_t);
137kern_return_t sys_perf_notify(struct task *task, exception_data_t code,
138 mach_msg_type_number_t codeCnt);
139
140/*
141 * NOTE: Source and target may *NOT* overlap!
142 * XXX Should share code with bsd/dev/ppc/unix_signal.c
143 */
144static void
145siginfo_64to32(user_siginfo_t *in, siginfo_t *out)
146{
147 out->si_signo = in->si_signo;
148 out->si_errno = in->si_errno;
149 out->si_code = in->si_code;
150 out->si_pid = in->si_pid;
151 out->si_uid = in->si_uid;
152 out->si_status = in->si_status;
153 out->si_addr = CAST_DOWN(void *,in->si_addr);
154 /* following cast works for sival_int because of padding */
155 out->si_value.sival_ptr = CAST_DOWN(void *,in->si_value.sival_ptr);
156 out->si_band = in->si_band; /* range reduction */
157 out->pad[0] = in->pad[0]; /* mcontext.ss.r1 */
158}
1c79356b
A
159
160/*
161 * exit --
162 * Death of process.
163 */
1c79356b 164void
91447636 165exit(struct proc *p, struct exit_args *uap, int *retval)
1c79356b 166{
0b4e3aa0 167 exit1(p, W_EXITCODE(uap->rval, 0), retval);
1c79356b 168
9bccf70c 169 /* drop funnel before we return */
1c79356b
A
170 thread_funnel_set(kernel_flock, FALSE);
171 thread_exception_return();
172 /* NOTREACHED */
173 while (TRUE)
9bccf70c 174 thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
175 /* NOTREACHED */
176}
177
178/*
179 * Exit: deallocate address space and other resources, change proc state
180 * to zombie, and unlink proc from allproc and parent's lists. Save exit
181 * status and rusage for wait(). Check for child processes and orphan them.
182 */
0b4e3aa0 183int
91447636 184exit1(struct proc *p, int rv, int *retval)
1c79356b 185{
91447636 186 thread_t self = current_thread();
1c79356b 187 struct task *task = p->task;
91447636 188 register int s;
1c79356b
A
189 struct uthread *ut;
190
191 /*
192 * If a thread in this task has already
193 * called exit(), then halt any others
194 * right here.
195 */
0b4e3aa0 196
55e303ae 197 ut = get_bsdthread_info(self);
91447636
A
198 if (ut->uu_flag & UT_VFORK) {
199 vfork_exit(p, rv);
55e303ae 200 vfork_return(self, p->p_pptr, p , retval);
0b4e3aa0
A
201 unix_syscall_return(0);
202 /* NOT REACHED */
203 }
e5568f75 204 AUDIT_SYSCALL_EXIT(0, p, ut); /* Exit is always successfull */
1c79356b
A
205 signal_lock(p);
206 while (p->exit_thread != self) {
207 if (sig_try_locked(p) <= 0) {
55e303ae 208 if (get_threadtask(self) != task) {
1c79356b 209 signal_unlock(p);
0b4e3aa0 210 return(0);
1c79356b
A
211 }
212 signal_unlock(p);
55e303ae 213 thread_terminate(self);
1c79356b
A
214 thread_funnel_set(kernel_flock, FALSE);
215 thread_exception_return();
216 /* NOTREACHED */
217 }
218 sig_lock_to_exit(p);
219 }
220 signal_unlock(p);
221 if (p->p_pid == 1) {
222 printf("pid 1 exited (signal %d, exit %d)",
223 WTERMSIG(rv), WEXITSTATUS(rv));
224 panic("init died\nState at Last Exception:\n\n%s",
225 init_task_failure_data);
226 }
227
228 s = splsched();
229 p->p_flag |= P_WEXIT;
230 splx(s);
55e303ae 231 proc_prepareexit(p);
1c79356b
A
232 p->p_xstat = rv;
233
234 /* task terminate will call proc_terminate and that cleans it up */
235 task_terminate_internal(task);
236
0b4e3aa0 237 return(0);
1c79356b
A
238}
239
240void
241proc_prepareexit(struct proc *p)
242{
1c79356b 243 struct uthread *ut;
55e303ae 244 exception_data_t code[EXCEPTION_CODE_MAX];
91447636 245 thread_t self = current_thread();
55e303ae 246
91447636
A
247 code[0] = (exception_data_t)0xFF000001; /* Set terminate code */
248 code[1] = (exception_data_t)p->p_pid; /* Pass out the pid */
249 /* Notify the perf server */
250 (void)sys_perf_notify(p->task, (exception_data_t)&code, 2);
1c79356b 251
1c79356b
A
252 /*
253 * Remove proc from allproc queue and from pidhash chain.
254 * Need to do this before we do anything that can block.
255 * Not doing causes things like mount() find this on allproc
256 * in partially cleaned state.
257 */
258 LIST_REMOVE(p, p_list);
55e303ae 259 LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
1c79356b
A
260 LIST_REMOVE(p, p_hash);
261
262#ifdef PGINPROF
263 vmsizmon();
264#endif
265 /*
266 * If parent is waiting for us to exit or exec,
267 * P_PPWAIT is set; we will wakeup the parent below.
268 */
269 p->p_flag &= ~(P_TRACED | P_PPWAIT);
91447636 270 p->p_sigignore = ~(sigcantmask);
1c79356b 271 p->p_siglist = 0;
55e303ae 272 ut = get_bsdthread_info(self);
9bccf70c
A
273 ut->uu_siglist = 0;
274 untimeout(realitexpire, (caddr_t)p->p_pid);
1c79356b
A
275}
276
277void
278proc_exit(struct proc *p)
279{
9bccf70c 280 register struct proc *q, *nq, *pp;
1c79356b 281 struct task *task = p->task;
91447636 282 register int s;
1c79356b
A
283 boolean_t funnel_state;
284
285 /* This can happen if thread_terminate of the single thread
286 * process
287 */
288
289 funnel_state = thread_funnel_set(kernel_flock, TRUE);
290 if( !(p->p_flag & P_WEXIT)) {
291 s = splsched();
292 p->p_flag |= P_WEXIT;
293 splx(s);
294 proc_prepareexit(p);
295 }
296
91447636
A
297 p->p_lflag |= P_LPEXIT;
298 /* XXX Zombie allocation may fail, in which case stats get lost */
1c79356b
A
299 MALLOC_ZONE(p->p_ru, struct rusage *,
300 sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
301
55e303ae
A
302 /*
303 * need to cancel async IO requests that can be cancelled and wait for those
304 * already active. MAY BLOCK!
305 */
ff6e181a
A
306
307 p->p_lflag |= P_LREFDRAIN;
308 while (p->p_internalref) {
309 p->p_lflag |= P_LREFDRAINWAIT;
310 msleep(&p->p_internalref, (lck_mtx_t *)0, 0, "proc_refdrain", 0) ;
311 }
312 p->p_lflag &= ~P_LREFDRAIN;
313 p->p_lflag |= P_LREFDEAD;
314
55e303ae
A
315 _aio_exit( p );
316
1c79356b
A
317 /*
318 * Close open files and release open-file table.
319 * This may block!
320 */
321 fdfree(p);
322
323 /* Close ref SYSV Shared memory*/
324 if (p->vm_shm)
325 shmexit(p);
9bccf70c
A
326 /* Release SYSV semaphores */
327 semexit(p);
1c79356b
A
328
329 if (SESS_LEADER(p)) {
330 register struct session *sp = p->p_session;
331
332 if (sp->s_ttyvp) {
fa4905b1 333 struct vnode *ttyvp;
91447636 334 struct vfs_context context;
fa4905b1 335
1c79356b
A
336 /*
337 * Controlling process.
338 * Signal foreground pgrp,
339 * drain controlling terminal
340 * and revoke access to controlling terminal.
341 */
342 if (sp->s_ttyp->t_session == sp) {
343 if (sp->s_ttyp->t_pgrp)
344 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
345 (void) ttywait(sp->s_ttyp);
346 /*
347 * The tty could have been revoked
348 * if we blocked.
349 */
91447636
A
350 context.vc_proc = p;
351 context.vc_ucred = p->p_ucred;
1c79356b 352 if (sp->s_ttyvp)
91447636 353 VNOP_REVOKE(sp->s_ttyvp, REVOKEALL, &context);
1c79356b 354 }
fa4905b1 355 ttyvp = sp->s_ttyvp;
1c79356b 356 sp->s_ttyvp = NULL;
91447636
A
357 if (ttyvp) {
358 vnode_rele(ttyvp);
359 }
1c79356b
A
360 /*
361 * s_ttyp is not zero'd; we use this to indicate
362 * that the session once had a controlling terminal.
363 * (for logging and informational purposes)
364 */
365 }
366 sp->s_leader = NULL;
367 }
368
369 fixjobc(p, p->p_pgrp, 0);
370 p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
9bccf70c
A
371 (void)acct_process(p);
372
1c79356b
A
373#if KTRACE
374 /*
375 * release trace file
376 */
91447636 377 p->p_traceflag = 0; /* don't trace the vnode_put() */
fa4905b1
A
378 if (p->p_tracep) {
379 struct vnode *tvp = p->p_tracep;
380 p->p_tracep = NULL;
91447636 381 vnode_rele(tvp);
fa4905b1 382 }
1c79356b
A
383#endif
384
91447636 385 while (q = p->p_children.lh_first) {
1c79356b
A
386 proc_reparent(q, initproc);
387 /*
388 * Traced processes are killed
389 * since their existence means someone is messing up.
390 */
391 if (q->p_flag & P_TRACED) {
392 q->p_flag &= ~P_TRACED;
393 if (q->sigwait_thread) {
1c79356b
A
394 /*
395 * The sigwait_thread could be stopped at a
396 * breakpoint. Wake it up to kill.
397 * Need to do this as it could be a thread which is not
398 * the first thread in the task. So any attempts to kill
399 * the process would result into a deadlock on q->sigwait.
400 */
91447636 401 thread_resume((thread_t)q->sigwait_thread);
55e303ae 402 clear_wait(q->sigwait_thread, THREAD_INTERRUPTED);
91447636 403 threadsignal((thread_t)q->sigwait_thread, SIGKILL, 0);
1c79356b
A
404 }
405 psignal(q, SIGKILL);
406 }
407 }
408
1c79356b
A
409 /*
410 * Save exit status and final rusage info, adding in child rusage
91447636
A
411 * info and self times. If we were unable to allocate a zombie
412 * structure, this information is lost.
1c79356b 413 */
91447636
A
414 if (p->p_ru != NULL) {
415 *p->p_ru = p->p_stats->p_ru;
1c79356b 416
91447636
A
417 timerclear(&p->p_ru->ru_utime);
418 timerclear(&p->p_ru->ru_stime);
1c79356b 419
91447636 420 if (task) {
1c79356b
A
421 task_basic_info_data_t tinfo;
422 task_thread_times_info_data_t ttimesinfo;
423 int task_info_stuff, task_ttimes_stuff;
424 struct timeval ut,st;
425
426 task_info_stuff = TASK_BASIC_INFO_COUNT;
427 task_info(task, TASK_BASIC_INFO,
91447636 428 (task_info_t)&tinfo, &task_info_stuff);
1c79356b
A
429 p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds;
430 p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds;
431 p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds;
432 p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds;
433
434 task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT;
435 task_info(task, TASK_THREAD_TIMES_INFO,
91447636 436 (task_info_t)&ttimesinfo, &task_ttimes_stuff);
1c79356b
A
437
438 ut.tv_sec = ttimesinfo.user_time.seconds;
439 ut.tv_usec = ttimesinfo.user_time.microseconds;
440 st.tv_sec = ttimesinfo.system_time.seconds;
441 st.tv_usec = ttimesinfo.system_time.microseconds;
442 timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime);
443 timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
91447636 444 }
1c79356b 445
91447636
A
446 ruadd(p->p_ru, &p->p_stats->p_cru);
447 }
1c79356b
A
448
449 /*
450 * Free up profiling buffers.
451 */
452 {
453 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
454
455 p1 = p0->pr_next;
456 p0->pr_next = NULL;
457 p0->pr_scale = 0;
458
459 for (; p1 != NULL; p1 = pn) {
460 pn = p1->pr_next;
91447636 461 kfree(p1, sizeof *p1);
1c79356b
A
462 }
463 }
464
465 /*
466 * Other substructures are freed from wait().
467 */
468 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_SUBPROC);
469 p->p_stats = NULL;
470
471 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SUBPROC);
472 p->p_sigacts = NULL;
473
474 if (--p->p_limit->p_refcnt == 0)
475 FREE_ZONE(p->p_limit, sizeof *p->p_limit, M_SUBPROC);
476 p->p_limit = NULL;
477
478 /*
479 * Finish up by terminating the task
480 * and halt this thread (only if a
481 * member of the task exiting).
482 */
483 p->task = TASK_NULL;
484 //task->proc = NULL;
485 set_bsdtask_info(task, NULL);
486
55e303ae
A
487 KNOTE(&p->p_klist, NOTE_EXIT);
488
1c79356b
A
489 /*
490 * Notify parent that we're gone.
491 */
9bccf70c 492 if (p->p_pptr->p_flag & P_NOCLDWAIT) {
91447636 493 struct proc *opp = p->p_pptr;
9bccf70c 494
55e303ae
A
495 /*
496 * Add child resource usage to parent before giving
91447636
A
497 * zombie to init. If we were unable to allocate a
498 * zombie structure, this information is lost.
55e303ae 499 */
91447636
A
500 if (p->p_ru != NULL)
501 ruadd(&p->p_pptr->p_stats->p_cru, p->p_ru);
55e303ae 502
9bccf70c
A
503 proc_reparent(p, initproc);
504 /* If there are no more children wakeup parent */
91447636
A
505 if (LIST_EMPTY(&opp->p_children))
506 wakeup((caddr_t)opp);
9bccf70c
A
507 }
508 /* should be fine as parent proc would be initproc */
509 pp = p->p_pptr;
510 if (pp != initproc) {
511 pp->si_pid = p->p_pid;
512 pp->si_status = p->p_xstat;
513 pp->si_code = CLD_EXITED;
91447636 514 pp->si_uid = p->p_ucred->cr_ruid;
9bccf70c 515 }
55e303ae 516 /* mark as a zombie */
1c79356b
A
517 p->p_stat = SZOMB;
518
91447636
A
519 psignal(pp, SIGCHLD);
520
1c79356b
A
521 /* and now wakeup the parent */
522 wakeup((caddr_t)p->p_pptr);
523
524 (void) thread_funnel_set(kernel_flock, funnel_state);
525}
526
527
91447636
A
528/*
529 * reap_child_process
530 *
531 * Description: Given a process from which all status information needed
532 * has already been extracted, if the process is a ptrace
533 * attach process, detach it and give it back to its real
534 * parent, else recover all resources remaining associated
535 * with it.
536 *
537 * Parameters: struct proc *parent Parent of process being reaped
538 * struct proc *child Process to reap
539 *
540 * Returns: 0 Process was not reaped because it
541 * came from an attach
542 * 1 Process was reaped
543 */
544static int
545reap_child_process(struct proc *parent, struct proc *child)
1c79356b 546{
91447636
A
547 struct proc *trace_parent; /* Traced parent process, if tracing */
548 struct vnode *tvp; /* Traced vnode pointer, if used */
1c79356b 549
91447636
A
550 /*
551 * If we got the child via a ptrace 'attach',
552 * we need to give it back to the old parent.
553 */
554 if (child->p_oppid && (trace_parent = pfind(child->p_oppid))) {
555 child->p_oppid = 0;
556 proc_reparent(child, trace_parent);
557 if (trace_parent != initproc) {
558 trace_parent->si_pid = child->p_pid;
559 trace_parent->si_status = child->p_xstat;
560 trace_parent->si_code = CLD_CONTINUED;
561 trace_parent->si_uid = child->p_ucred->cr_ruid;
562 }
563 psignal(trace_parent, SIGCHLD);
564 wakeup((caddr_t)trace_parent);
565 return (0);
566 }
567 child->p_xstat = 0;
568 if (child->p_ru) {
569 ruadd(&parent->p_stats->p_cru, child->p_ru);
570 FREE_ZONE(child->p_ru, sizeof *child->p_ru, M_ZOMBIE);
571 child->p_ru = NULL;
572 } else {
573 printf("Warning : lost p_ru for %s\n", child->p_comm);
574 }
1c79356b 575
91447636
A
576 /*
577 * Decrement the count of procs running with this uid.
578 */
579 (void)chgproccnt(child->p_ucred->cr_ruid, -1);
1c79356b 580
91447636
A
581 /*
582 * Free up credentials.
583 */
584 if (child->p_ucred != NOCRED) {
585 kauth_cred_t ucr = child->p_ucred;
586 child->p_ucred = NOCRED;
587 kauth_cred_rele(ucr);
588 }
1c79356b 589
91447636
A
590 /*
591 * Release reference to text vnode
592 */
593 tvp = child->p_textvp;
594 child->p_textvp = NULL;
595 if (tvp) {
596 vnode_rele(tvp);
597 }
598 /*
599 * Finally finished with old proc entry.
600 * Unlink it from its process group and free it.
601 */
602 leavepgrp(child);
603 LIST_REMOVE(child, p_list); /* off zombproc */
604 LIST_REMOVE(child, p_sibling);
b36670ce
A
605 child->p_lflag &= ~P_LWAITING;
606 wakeup(&child->p_stat);
1c79356b 607
91447636
A
608 lck_mtx_destroy(&child->p_mlock, proc_lck_grp);
609 lck_mtx_destroy(&child->p_fdmlock, proc_lck_grp);
610 FREE_ZONE(child, sizeof *child, M_PROC);
611 nprocs--;
612 return (1);
1c79356b
A
613}
614
1c79356b
A
615
616int
91447636 617wait1continue(int result)
1c79356b 618{
7b1edb79 619 void *vt;
91447636 620 thread_t thread;
7b1edb79
A
621 int *retval;
622 struct proc *p;
1c79356b 623
7b1edb79
A
624 if (result)
625 return(result);
1c79356b 626
7b1edb79 627 p = current_proc();
91447636
A
628 thread = current_thread();
629 vt = get_bsduthreadarg(thread);
630 retval = get_bsduthreadrval(thread);
631 return(wait4((struct proc *)p, (struct wait4_args *)vt, retval));
1c79356b
A
632}
633
634int
91447636 635wait4(struct proc *q, struct wait4_args *uap, register_t *retval)
1c79356b
A
636{
637 register int nfound;
91447636 638 register struct proc *p;
1c79356b
A
639 int status, error;
640
1c79356b
A
641 if (uap->pid == 0)
642 uap->pid = -q->p_pgid;
643
644loop:
645 nfound = 0;
646 for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) {
647 if (uap->pid != WAIT_ANY &&
648 p->p_pid != uap->pid &&
649 p->p_pgid != -(uap->pid))
650 continue;
651 nfound++;
91447636
A
652
653 /* XXX This is racy because we don't get the lock!!!! */
654
b36670ce 655 if (p->p_lflag & P_LWAITING) {
7b1edb79
A
656 (void)tsleep(&p->p_stat, PWAIT, "waitcoll", 0);
657 goto loop;
658 }
b36670ce 659 p->p_lflag |= P_LWAITING; /* only allow single thread to wait() */
7b1edb79 660
1c79356b
A
661 if (p->p_stat == SZOMB) {
662 retval[0] = p->p_pid;
1c79356b
A
663 if (uap->status) {
664 status = p->p_xstat; /* convert to int */
91447636
A
665 error = copyout((caddr_t)&status,
666 uap->status,
667 sizeof(status));
668 if (error) {
b36670ce 669 p->p_lflag &= ~P_LWAITING;
7b1edb79 670 wakeup(&p->p_stat);
1c79356b
A
671 return (error);
672 }
673 }
91447636
A
674 if (uap->rusage) {
675 if (p->p_ru == NULL) {
676 error = ENOMEM;
677 } else {
678 if (IS_64BIT_PROCESS(q)) {
679 struct user_rusage my_rusage;
680 munge_rusage(p->p_ru, &my_rusage);
681 error = copyout((caddr_t)&my_rusage,
682 uap->rusage,
683 sizeof (my_rusage));
684 }
685 else {
686 error = copyout((caddr_t)p->p_ru,
687 uap->rusage,
688 sizeof (struct rusage));
689 }
9bccf70c 690 }
91447636
A
691 /* information unavailable? */
692 if (error) {
b36670ce 693 p->p_lflag &= ~P_LWAITING;
91447636
A
694 wakeup(&p->p_stat);
695 return (error);
1c79356b 696 }
1c79356b
A
697 }
698
91447636 699 /* Clean up */
b36670ce
A
700 if (!reap_child_process(q, p)) {
701 p->p_lflag &= ~P_LWAITING;
702 wakeup(&p->p_stat);
703 }
91447636 704
1c79356b
A
705 return (0);
706 }
707 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
708 (p->p_flag & P_TRACED || uap->options & WUNTRACED)) {
709 p->p_flag |= P_WAITED;
710 retval[0] = p->p_pid;
1c79356b
A
711 if (uap->status) {
712 status = W_STOPCODE(p->p_xstat);
713 error = copyout((caddr_t)&status,
91447636 714 uap->status,
1c79356b
A
715 sizeof(status));
716 } else
717 error = 0;
b36670ce 718 p->p_lflag &= ~P_LWAITING;
7b1edb79 719 wakeup(&p->p_stat);
1c79356b
A
720 return (error);
721 }
b36670ce 722 p->p_lflag &= ~P_LWAITING;
7b1edb79 723 wakeup(&p->p_stat);
1c79356b 724 }
7b1edb79 725 if (nfound == 0)
1c79356b 726 return (ECHILD);
7b1edb79 727
1c79356b
A
728 if (uap->options & WNOHANG) {
729 retval[0] = 0;
1c79356b
A
730 return (0);
731 }
732
91447636
A
733 if ((error = tsleep0((caddr_t)q, PWAIT | PCATCH, "wait", 0, wait1continue)))
734 return (error);
735
736 goto loop;
737}
738
739
740int
741waitidcontinue(int result)
742{
743 void *vt;
744 thread_t thread;
745 int *retval;
746 struct proc *p;
747
748 if (result)
749 return(result);
750
751 p = current_proc();
752 thread = current_thread();
753 vt = get_bsduthreadarg(thread);
754 retval = get_bsduthreadrval(thread);
755 return(waitid((struct proc *)p, (struct waitid_args *)vt, retval));
756}
757
758/*
759 * Description: Suspend the calling thread until one child of the process
760 * containing the calling thread changes state.
761 *
762 * Parameters: uap->idtype one of P_PID, P_PGID, P_ALL
763 * uap->id pid_t or gid_t or ignored
764 * uap->infop Address of signinfo_t struct in
765 * user space into which to return status
766 * uap->options flag values
767 *
768 * Returns: 0 Success
769 * !0 Error returning status to user space
770 */
771int
772waitid(struct proc *q, struct waitid_args *uap, register_t *retval)
773{
774 user_siginfo_t collect64; /* siginfo data to return to caller */
775
776 register int nfound;
777 register struct proc *p;
778 int error;
779
780loop:
781 nfound = 0;
782 for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) {
783 switch(uap->idtype) {
784 case P_PID: /* child with process ID equal to... */
785 if (p->p_pid != (pid_t)uap->id)
786 continue;
787 break;
788 case P_PGID: /* child with process group ID equal to... */
789 if (p->p_pgid != (pid_t)uap->id)
790 continue;
791 break;
792 case P_ALL: /* any child */
793 break;
794 }
795
796 /* XXX This is racy because we don't get the lock!!!! */
797
798 /*
799 * Wait collision; go to sleep and restart; used to maintain
800 * the single return for waited process guarantee.
801 */
b36670ce 802 if (p->p_lflag & P_LWAITING) {
91447636
A
803 (void)tsleep(&p->p_stat, PWAIT, "waitidcoll", 0);
804 goto loop;
805 }
b36670ce 806 p->p_lflag |= P_LWAITING; /* mark busy */
91447636
A
807
808 nfound++;
809
810 /*
811 * Types of processes we are interested in
812 *
813 * XXX Don't know what to do for WCONTINUED?!?
814 */
815 switch(p->p_stat) {
816 case SZOMB: /* Exited */
817 if (!(uap->options & WEXITED))
818 break;
819
820 /* Collect "siginfo" information for caller */
821 collect64.si_signo = 0;
822 collect64.si_code = 0;
823 collect64.si_errno = 0;
824 collect64.si_pid = 0;
825 collect64.si_uid = 0;
826 collect64.si_addr = 0;
827 collect64.si_status = p->p_xstat;
828 collect64.si_band = 0;
829
830 if (IS_64BIT_PROCESS(p)) {
831 error = copyout((caddr_t)&collect64,
832 uap->infop,
833 sizeof(collect64));
834 } else {
835 siginfo_t collect;
836 siginfo_64to32(&collect64,&collect);
837 error = copyout((caddr_t)&collect,
838 uap->infop,
839 sizeof(collect));
840 }
841 /* information unavailable? */
842 if (error) {
b36670ce 843 p->p_lflag &= ~P_LWAITING;
91447636
A
844 wakeup(&p->p_stat);
845 return (error);
846 }
847
848 /* Prevent other process for waiting for this event? */
849 if (!(uap->options & WNOWAIT)) {
850 /* Clean up */
b36670ce
A
851 if (!reap_child_process(q, p)) {
852 p->p_lflag &= ~P_LWAITING;
853 wakeup(&p->p_stat);
854 }
91447636
A
855 }
856
857 return (0);
858
859 case SSTOP: /* Stopped */
860 /*
861 * If we are not interested in stopped processes, then
862 * ignore this one.
863 */
864 if (!(uap->options & WSTOPPED))
865 break;
866
867 /*
868 * If someone has already waited it, we lost a race
869 * to be the one to return status.
870 */
871 if ((p->p_flag & P_WAITED) != 0)
872 break;
873
874 /*
875 * If this is not a traced process, and they haven't
876 * indicated an interest in untraced processes, then
877 * ignore this one.
878 */
879 if (!(p->p_flag & P_TRACED) && !(uap->options & WUNTRACED))
880 break;
881
882 /* Collect "siginfo" information for caller */
883 collect64.si_signo = 0;
884 collect64.si_code = 0;
885 collect64.si_errno = 0;
886 collect64.si_pid = 0;
887 collect64.si_uid = 0;
888 collect64.si_addr = 0;
889 collect64.si_status = p->p_xstat;
890 collect64.si_band = 0;
891
892 if (IS_64BIT_PROCESS(p)) {
893 error = copyout((caddr_t)&collect64,
894 uap->infop,
895 sizeof(collect64));
896 } else {
897 siginfo_t collect;
898 siginfo_64to32(&collect64,&collect);
899 error = copyout((caddr_t)&collect,
900 uap->infop,
901 sizeof(collect));
902 }
903 /* information unavailable? */
904 if (error) {
b36670ce 905 p->p_lflag &= ~P_LWAITING;
91447636
A
906 wakeup(&p->p_stat);
907 return (error);
908 }
909
910 /* Prevent other process for waiting for this event? */
911 if (!(uap->options & WNOWAIT)) {
912 p->p_flag |= P_WAITED;
913 }
914
b36670ce 915 p->p_lflag &= ~P_LWAITING;
91447636
A
916 wakeup(&p->p_stat);
917 return (0);
918
919 default: /* All others */
920 /* ...meaning Continued */
921 if (!(uap->options & WCONTINUED))
922 break;
923
924 /*
925 * If the flag isn't set, then this process has not
926 * been stopped and continued, or the status has
927 * already been reaped by another caller of waitid().
928 */
929 if ((p->p_flag & P_CONTINUED) == 0)
930 break;
931
932 /* Collect "siginfo" information for caller */
933 collect64.si_signo = 0;
934 collect64.si_code = 0;
935 collect64.si_errno = 0;
936 collect64.si_pid = 0;
937 collect64.si_uid = 0;
938 collect64.si_addr = 0;
939 collect64.si_status = p->p_xstat;
940 collect64.si_band = 0;
941
942 if (IS_64BIT_PROCESS(p)) {
943 error = copyout((caddr_t)&collect64,
944 uap->infop,
945 sizeof(collect64));
946 } else {
947 siginfo_t collect;
948 siginfo_64to32(&collect64,&collect);
949 error = copyout((caddr_t)&collect,
950 uap->infop,
951 sizeof(collect));
952 }
953 /* information unavailable? */
954 if (error) {
b36670ce 955 p->p_lflag &= ~P_LWAITING;
91447636
A
956 wakeup(&p->p_stat);
957 return (error);
958 }
959
960 /* Prevent other process for waiting for this event? */
961 if (!(uap->options & WNOWAIT)) {
962 p->p_flag &= ~P_CONTINUED;
963 }
964
b36670ce 965 p->p_lflag &= ~P_LWAITING;
91447636
A
966 wakeup(&p->p_stat);
967 return (0);
968
969 break;
970 }
971
972
973 /* Not a process we are interested in; go on to next child */
b36670ce 974 p->p_lflag &= ~P_LWAITING;
91447636
A
975 wakeup(&p->p_stat);
976 }
977
978 /* No child processes that could possibly satisfy the request? */
979 if (nfound == 0)
980 return (ECHILD);
981
982 if (uap->options & WNOHANG) {
983 retval[0] = 0;
984 return (0);
985 }
986
987 if ((error = tsleep0((caddr_t)q, PWAIT | PCATCH, "waitid", 0, waitidcontinue)))
1c79356b 988 return (error);
7b1edb79 989
1c79356b
A
990 goto loop;
991}
992
993/*
994 * make process 'parent' the new parent of process 'child'.
995 */
996void
91447636 997proc_reparent(struct proc *child, struct proc *parent)
1c79356b
A
998{
999
1000 if (child->p_pptr == parent)
1001 return;
1002
1003 LIST_REMOVE(child, p_sibling);
1004 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1005 child->p_pptr = parent;
91447636
A
1006
1007 if (initproc == parent && child->p_stat == SZOMB)
1008 psignal(initproc, SIGCHLD);
1c79356b
A
1009}
1010
1c79356b
A
1011/*
1012 * Make the current process an "init" process, meaning
1013 * that it doesn't have a parent, and that it won't be
1014 * gunned down by kill(-1, 0).
1015 */
7b1edb79 1016kern_return_t
91447636 1017init_process(__unused struct init_process_args *args)
1c79356b
A
1018{
1019 register struct proc *p = current_proc();
1020
e5568f75 1021 AUDIT_MACH_SYSCALL_ENTER(AUE_INITPROCESS);
91447636 1022 if (suser(kauth_cred_get(), &p->p_acflag)) {
e5568f75 1023 AUDIT_MACH_SYSCALL_EXIT(KERN_NO_ACCESS);
1c79356b 1024 return(KERN_NO_ACCESS);
e5568f75 1025 }
1c79356b
A
1026
1027 if (p->p_pid != 1 && p->p_pgid != p->p_pid)
1028 enterpgrp(p, p->p_pid, 0);
1029 p->p_flag |= P_SYSTEM;
1030
1031 /*
1032 * Take us out of the sibling chain, and
1033 * out of our parent's child chain.
1034 */
1035 LIST_REMOVE(p, p_sibling);
1036 p->p_sibling.le_prev = NULL;
1037 p->p_sibling.le_next = NULL;
1038 p->p_pptr = kernproc;
1039
e5568f75 1040 AUDIT_MACH_SYSCALL_EXIT(KERN_SUCCESS);
1c79356b
A
1041 return(KERN_SUCCESS);
1042}
1043
7b1edb79 1044
0b4e3aa0
A
1045/*
1046 * Exit: deallocate address space and other resources, change proc state
1047 * to zombie, and unlink proc from allproc and parent's lists. Save exit
1048 * status and rusage for wait(). Check for child processes and orphan them.
1049 */
1050
91447636
A
1051void
1052vfork_exit(struct proc *p, int rv)
0b4e3aa0 1053{
91447636
A
1054 thread_t self = current_thread();
1055#ifdef FIXME
0b4e3aa0 1056 struct task *task = p->task;
91447636
A
1057#endif
1058 register int s;
0b4e3aa0 1059 struct uthread *ut;
55e303ae 1060 exception_data_t code[EXCEPTION_CODE_MAX];
0b4e3aa0 1061
91447636
A
1062 /*
1063 * If a thread in this task has already
1064 * called exit(), then halt any others
1065 * right here.
1066 */
1067
1068 ut = get_bsdthread_info(self);
1069#ifdef FIXME
1070 signal_lock(p);
1071 while (p->exit_thread != self) {
1072 if (sig_try_locked(p) <= 0) {
1073 if (get_threadtask(self) != task) {
1074 signal_unlock(p);
1075 return;
1076 }
1077 signal_unlock(p);
1078 thread_terminate(self);
1079 thread_funnel_set(kernel_flock, FALSE);
1080 thread_exception_return();
1081 /* NOTREACHED */
1082 }
1083 sig_lock_to_exit(p);
1084 }
1085 signal_unlock(p);
1086 if (p->p_pid == 1) {
1087 printf("pid 1 exited (signal %d, exit %d)",
1088 WTERMSIG(rv), WEXITSTATUS(rv));
1089panic("init died\nState at Last Exception:\n\n%s", init_task_failure_data);
1090 }
1091#endif /* FIXME */
1092
0b4e3aa0
A
1093 s = splsched();
1094 p->p_flag |= P_WEXIT;
91447636 1095 p->p_lflag |= P_LPEXIT;
0b4e3aa0 1096 splx(s);
55e303ae 1097
91447636
A
1098 code[0] = (exception_data_t)0xFF000001; /* Set terminate code */
1099 code[1] = (exception_data_t)p->p_pid; /* Pass out the pid */
1100 /* Notify the perf server */
1101 (void)sys_perf_notify(p->task, (exception_data_t)&code, 2);
55e303ae 1102
0b4e3aa0
A
1103 /*
1104 * Remove proc from allproc queue and from pidhash chain.
1105 * Need to do this before we do anything that can block.
1106 * Not doing causes things like mount() find this on allproc
1107 * in partially cleaned state.
1108 */
1109 LIST_REMOVE(p, p_list);
55e303ae 1110 LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
0b4e3aa0
A
1111 LIST_REMOVE(p, p_hash);
1112 /*
1113 * If parent is waiting for us to exit or exec,
1114 * P_PPWAIT is set; we will wakeup the parent below.
1115 */
1116 p->p_flag &= ~(P_TRACED | P_PPWAIT);
1117 p->p_sigignore = ~0;
1118 p->p_siglist = 0;
1119
9bccf70c
A
1120 ut->uu_siglist = 0;
1121 untimeout(realitexpire, (caddr_t)p->p_pid);
0b4e3aa0
A
1122
1123 p->p_xstat = rv;
1124
55e303ae 1125 vproc_exit(p);
0b4e3aa0
A
1126}
1127
0b4e3aa0
A
1128void
1129vproc_exit(struct proc *p)
1130{
9bccf70c 1131 register struct proc *q, *nq, *pp;
91447636 1132#ifdef FIXME
0b4e3aa0 1133 struct task *task = p->task;
91447636 1134#endif
0b4e3aa0 1135
91447636 1136 /* XXX Zombie allocation may fail, in which case stats get lost */
0b4e3aa0
A
1137 MALLOC_ZONE(p->p_ru, struct rusage *,
1138 sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
1139
1140 /*
1141 * Close open files and release open-file table.
1142 * This may block!
1143 */
1144 fdfree(p);
1145
0b4e3aa0
A
1146 if (SESS_LEADER(p)) {
1147 register struct session *sp = p->p_session;
1148
1149 if (sp->s_ttyvp) {
fa4905b1 1150 struct vnode *ttyvp;
91447636 1151 struct vfs_context context;
fa4905b1 1152
0b4e3aa0
A
1153 /*
1154 * Controlling process.
1155 * Signal foreground pgrp,
1156 * drain controlling terminal
1157 * and revoke access to controlling terminal.
1158 */
1159 if (sp->s_ttyp->t_session == sp) {
1160 if (sp->s_ttyp->t_pgrp)
1161 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
1162 (void) ttywait(sp->s_ttyp);
1163 /*
1164 * The tty could have been revoked
1165 * if we blocked.
1166 */
91447636
A
1167 context.vc_proc = p;
1168 context.vc_ucred = p->p_ucred;
0b4e3aa0 1169 if (sp->s_ttyvp)
91447636 1170 VNOP_REVOKE(sp->s_ttyvp, REVOKEALL, &context);
0b4e3aa0 1171 }
fa4905b1 1172 ttyvp = sp->s_ttyvp;
0b4e3aa0 1173 sp->s_ttyvp = NULL;
91447636
A
1174 if (ttyvp) {
1175 vnode_rele(ttyvp);
1176 }
0b4e3aa0
A
1177 /*
1178 * s_ttyp is not zero'd; we use this to indicate
1179 * that the session once had a controlling terminal.
1180 * (for logging and informational purposes)
1181 */
1182 }
1183 sp->s_leader = NULL;
1184 }
1185
1186 fixjobc(p, p->p_pgrp, 0);
1187 p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
9bccf70c 1188
0b4e3aa0
A
1189#if KTRACE
1190 /*
1191 * release trace file
1192 */
91447636 1193 p->p_traceflag = 0; /* don't trace the vnode_rele() */
fa4905b1
A
1194 if (p->p_tracep) {
1195 struct vnode *tvp = p->p_tracep;
1196 p->p_tracep = NULL;
91447636 1197 vnode_rele(tvp);
fa4905b1 1198 }
0b4e3aa0
A
1199#endif
1200
91447636 1201 while (q = p->p_children.lh_first) {
0b4e3aa0
A
1202 proc_reparent(q, initproc);
1203 /*
1204 * Traced processes are killed
1205 * since their existence means someone is messing up.
1206 */
1207 if (q->p_flag & P_TRACED) {
1208 q->p_flag &= ~P_TRACED;
1209 if (q->sigwait_thread) {
0b4e3aa0
A
1210 /*
1211 * The sigwait_thread could be stopped at a
1212 * breakpoint. Wake it up to kill.
1213 * Need to do this as it could be a thread which is not
1214 * the first thread in the task. So any attempts to kill
1215 * the process would result into a deadlock on q->sigwait.
1216 */
91447636 1217 thread_resume((thread_t)q->sigwait_thread);
55e303ae 1218 clear_wait(q->sigwait_thread, THREAD_INTERRUPTED);
91447636 1219 threadsignal((thread_t)q->sigwait_thread, SIGKILL, 0);
0b4e3aa0
A
1220 }
1221 psignal(q, SIGKILL);
1222 }
1223 }
1224
0b4e3aa0
A
1225 /*
1226 * Save exit status and final rusage info, adding in child rusage
91447636
A
1227 * info and self times. If we were unable to allocate a zombie
1228 * structure, this information is lost.
0b4e3aa0 1229 */
91447636
A
1230 if (p->p_ru != NULL) {
1231 *p->p_ru = p->p_stats->p_ru;
1232 timerclear(&p->p_ru->ru_utime);
1233 timerclear(&p->p_ru->ru_stime);
0b4e3aa0
A
1234
1235#ifdef FIXME
91447636 1236 if (task) {
0b4e3aa0
A
1237 task_basic_info_data_t tinfo;
1238 task_thread_times_info_data_t ttimesinfo;
1239 int task_info_stuff, task_ttimes_stuff;
1240 struct timeval ut,st;
1241
1242 task_info_stuff = TASK_BASIC_INFO_COUNT;
1243 task_info(task, TASK_BASIC_INFO,
1244 &tinfo, &task_info_stuff);
1245 p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds;
1246 p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds;
1247 p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds;
1248 p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds;
1249
1250 task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT;
1251 task_info(task, TASK_THREAD_TIMES_INFO,
1252 &ttimesinfo, &task_ttimes_stuff);
1253
1254 ut.tv_sec = ttimesinfo.user_time.seconds;
1255 ut.tv_usec = ttimesinfo.user_time.microseconds;
1256 st.tv_sec = ttimesinfo.system_time.seconds;
1257 st.tv_usec = ttimesinfo.system_time.microseconds;
1258 timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime);
91447636
A
1259 timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
1260 }
0b4e3aa0
A
1261#endif /* FIXME */
1262
91447636
A
1263 ruadd(p->p_ru, &p->p_stats->p_cru);
1264 }
0b4e3aa0
A
1265
1266 /*
1267 * Free up profiling buffers.
1268 */
1269 {
1270 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
1271
1272 p1 = p0->pr_next;
1273 p0->pr_next = NULL;
1274 p0->pr_scale = 0;
1275
1276 for (; p1 != NULL; p1 = pn) {
1277 pn = p1->pr_next;
91447636 1278 kfree(p1, sizeof *p1);
0b4e3aa0
A
1279 }
1280 }
1281
1282 /*
1283 * Other substructures are freed from wait().
1284 */
1285 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_SUBPROC);
1286 p->p_stats = NULL;
1287
1288 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SUBPROC);
1289 p->p_sigacts = NULL;
1290
1291 if (--p->p_limit->p_refcnt == 0)
1292 FREE_ZONE(p->p_limit, sizeof *p->p_limit, M_SUBPROC);
1293 p->p_limit = NULL;
1294
1295 /*
1296 * Finish up by terminating the task
1297 * and halt this thread (only if a
1298 * member of the task exiting).
1299 */
1300 p->task = TASK_NULL;
1301
1302 /*
1303 * Notify parent that we're gone.
1304 */
9bccf70c
A
1305 pp = p->p_pptr;
1306 if (pp != initproc) {
1307 pp->si_pid = p->p_pid;
1308 pp->si_status = p->p_xstat;
1309 pp->si_code = CLD_EXITED;
91447636 1310 pp->si_uid = p->p_ucred->cr_ruid;
9bccf70c 1311 }
55e303ae 1312 /* mark as a zombie */
0b4e3aa0
A
1313 p->p_stat = SZOMB;
1314
91447636
A
1315 psignal(p->p_pptr, SIGCHLD);
1316
0b4e3aa0
A
1317 /* and now wakeup the parent */
1318 wakeup((caddr_t)p->p_pptr);
0b4e3aa0 1319}
91447636
A
1320
1321
1322/*
1323 * munge_rusage
1324 * LP64 support - long is 64 bits if we are dealing with a 64 bit user
1325 * process. We munge the kernel (32 bit) version of rusage into the
1326 * 64 bit version.
1327 */
1328__private_extern__ void
1329munge_rusage(struct rusage *a_rusage_p, struct user_rusage *a_user_rusage_p)
1330{
1331 /* timeval changes size, so utime and stime need special handling */
1332 a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
1333 a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
1334 a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
1335 a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
1336 /*
1337 * everything else can be a direct assign, since there is no loss
1338 * of precision implied boing 32->64.
1339 */
1340 a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
1341 a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
1342 a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
1343 a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
1344 a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
1345 a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
1346 a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
1347 a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
1348 a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
1349 a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
1350 a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
1351 a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
1352 a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
1353 a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
1354}