]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_exit.c
xnu-792.6.61.tar.gz
[apple/xnu.git] / bsd / kern / kern_exit.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1982, 1986, 1989, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
61 */
62
63 #include <machine/reg.h>
64 #include <machine/psl.h>
65
66 #include "compat_43.h"
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/ioctl.h>
71 #include <sys/proc_internal.h>
72 #include <sys/kauth.h>
73 #include <sys/tty.h>
74 #include <sys/time.h>
75 #include <sys/resource.h>
76 #include <sys/kernel.h>
77 #include <sys/wait.h>
78 #include <sys/file_internal.h>
79 #include <sys/vnode_internal.h>
80 #include <sys/syslog.h>
81 #include <sys/malloc.h>
82 #include <sys/resourcevar.h>
83 #include <sys/ptrace.h>
84 #include <sys/user.h>
85 #include <sys/aio_kern.h>
86 #include <sys/sysproto.h>
87 #include <sys/signalvar.h>
88 #include <sys/filedesc.h> /* fdfree */
89 #include <sys/shm_internal.h> /* shmexit */
90 #include <sys/acct.h> /* acct_process */
91 #include <machine/spl.h>
92
93 #include <bsm/audit_kernel.h>
94 #include <bsm/audit_kevents.h>
95
96 #include <mach/mach_types.h>
97
98 #include <kern/kern_types.h>
99 #include <kern/kalloc.h>
100 #include <kern/task.h>
101 #include <kern/thread.h>
102 #include <kern/sched_prim.h>
103 #include <kern/assert.h>
104 #if KTRACE
105 #include <sys/ktrace.h>
106 #endif
107
108 #include <mach/mach_types.h>
109 #include <mach/task.h>
110 #include <mach/thread_act.h>
111 #include <mach/mach_traps.h> /* init_process */
112
113 extern char init_task_failure_data[];
114 int exit1(struct proc *, int, int *);
115 void proc_prepareexit(struct proc *p);
116 void vfork_exit(struct proc *p, int rv);
117 void vproc_exit(struct proc *p);
118 __private_extern__ void munge_rusage(struct rusage *a_rusage_p, struct user_rusage *a_user_rusage_p);
119
120 /*
121 * Things which should have prototypes in headers, but don't
122 */
123 void unix_syscall_return(int);
124 void *get_bsduthreadarg(thread_t);
125 void proc_exit(struct proc *p);
126 int wait1continue(int result);
127 int waitidcontinue(int result);
128 int *get_bsduthreadrval(thread_t);
129 kern_return_t sys_perf_notify(struct task *task, exception_data_t code,
130 mach_msg_type_number_t codeCnt);
131
132 /*
133 * NOTE: Source and target may *NOT* overlap!
134 * XXX Should share code with bsd/dev/ppc/unix_signal.c
135 */
136 static void
137 siginfo_64to32(user_siginfo_t *in, siginfo_t *out)
138 {
139 out->si_signo = in->si_signo;
140 out->si_errno = in->si_errno;
141 out->si_code = in->si_code;
142 out->si_pid = in->si_pid;
143 out->si_uid = in->si_uid;
144 out->si_status = in->si_status;
145 out->si_addr = CAST_DOWN(void *,in->si_addr);
146 /* following cast works for sival_int because of padding */
147 out->si_value.sival_ptr = CAST_DOWN(void *,in->si_value.sival_ptr);
148 out->si_band = in->si_band; /* range reduction */
149 out->pad[0] = in->pad[0]; /* mcontext.ss.r1 */
150 }
151
152 /*
153 * exit --
154 * Death of process.
155 */
156 void
157 exit(struct proc *p, struct exit_args *uap, int *retval)
158 {
159 exit1(p, W_EXITCODE(uap->rval, 0), retval);
160
161 /* drop funnel before we return */
162 thread_funnel_set(kernel_flock, FALSE);
163 thread_exception_return();
164 /* NOTREACHED */
165 while (TRUE)
166 thread_block(THREAD_CONTINUE_NULL);
167 /* NOTREACHED */
168 }
169
170 /*
171 * Exit: deallocate address space and other resources, change proc state
172 * to zombie, and unlink proc from allproc and parent's lists. Save exit
173 * status and rusage for wait(). Check for child processes and orphan them.
174 */
175 int
176 exit1(struct proc *p, int rv, int *retval)
177 {
178 thread_t self = current_thread();
179 struct task *task = p->task;
180 register int s;
181 struct uthread *ut;
182
183 /*
184 * If a thread in this task has already
185 * called exit(), then halt any others
186 * right here.
187 */
188
189 ut = get_bsdthread_info(self);
190 if (ut->uu_flag & UT_VFORK) {
191 vfork_exit(p, rv);
192 vfork_return(self, p->p_pptr, p , retval);
193 unix_syscall_return(0);
194 /* NOT REACHED */
195 }
196 AUDIT_SYSCALL_EXIT(0, p, ut); /* Exit is always successfull */
197 signal_lock(p);
198 while (p->exit_thread != self) {
199 if (sig_try_locked(p) <= 0) {
200 if (get_threadtask(self) != task) {
201 signal_unlock(p);
202 return(0);
203 }
204 signal_unlock(p);
205 thread_terminate(self);
206 thread_funnel_set(kernel_flock, FALSE);
207 thread_exception_return();
208 /* NOTREACHED */
209 }
210 sig_lock_to_exit(p);
211 }
212 signal_unlock(p);
213 if (p->p_pid == 1) {
214 printf("pid 1 exited (signal %d, exit %d)",
215 WTERMSIG(rv), WEXITSTATUS(rv));
216 panic("init died\nState at Last Exception:\n\n%s",
217 init_task_failure_data);
218 }
219
220 s = splsched();
221 p->p_flag |= P_WEXIT;
222 splx(s);
223 proc_prepareexit(p);
224 p->p_xstat = rv;
225
226 /* task terminate will call proc_terminate and that cleans it up */
227 task_terminate_internal(task);
228
229 return(0);
230 }
231
232 void
233 proc_prepareexit(struct proc *p)
234 {
235 struct uthread *ut;
236 exception_data_t code[EXCEPTION_CODE_MAX];
237 thread_t self = current_thread();
238
239 code[0] = (exception_data_t)0xFF000001; /* Set terminate code */
240 code[1] = (exception_data_t)p->p_pid; /* Pass out the pid */
241 /* Notify the perf server */
242 (void)sys_perf_notify(p->task, (exception_data_t)&code, 2);
243
244 /*
245 * Remove proc from allproc queue and from pidhash chain.
246 * Need to do this before we do anything that can block.
247 * Not doing causes things like mount() find this on allproc
248 * in partially cleaned state.
249 */
250 LIST_REMOVE(p, p_list);
251 LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
252 LIST_REMOVE(p, p_hash);
253
254 #ifdef PGINPROF
255 vmsizmon();
256 #endif
257 /*
258 * If parent is waiting for us to exit or exec,
259 * P_PPWAIT is set; we will wakeup the parent below.
260 */
261 p->p_flag &= ~(P_TRACED | P_PPWAIT);
262 p->p_sigignore = ~(sigcantmask);
263 p->p_siglist = 0;
264 ut = get_bsdthread_info(self);
265 ut->uu_siglist = 0;
266 untimeout(realitexpire, (caddr_t)p->p_pid);
267 }
268
269 void
270 proc_exit(struct proc *p)
271 {
272 register struct proc *q, *nq, *pp;
273 struct task *task = p->task;
274 register int s;
275 boolean_t funnel_state;
276
277 /* This can happen if thread_terminate of the single thread
278 * process
279 */
280
281 funnel_state = thread_funnel_set(kernel_flock, TRUE);
282 if( !(p->p_flag & P_WEXIT)) {
283 s = splsched();
284 p->p_flag |= P_WEXIT;
285 splx(s);
286 proc_prepareexit(p);
287 }
288
289 p->p_lflag |= P_LPEXIT;
290 /* XXX Zombie allocation may fail, in which case stats get lost */
291 MALLOC_ZONE(p->p_ru, struct rusage *,
292 sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
293
294 /*
295 * need to cancel async IO requests that can be cancelled and wait for those
296 * already active. MAY BLOCK!
297 */
298
299 p->p_lflag |= P_LREFDRAIN;
300 while (p->p_internalref) {
301 p->p_lflag |= P_LREFDRAINWAIT;
302 msleep(&p->p_internalref, (lck_mtx_t *)0, 0, "proc_refdrain", 0) ;
303 }
304 p->p_lflag &= ~P_LREFDRAIN;
305 p->p_lflag |= P_LREFDEAD;
306
307 _aio_exit( p );
308
309 /*
310 * Close open files and release open-file table.
311 * This may block!
312 */
313 fdfree(p);
314
315 /* Close ref SYSV Shared memory*/
316 if (p->vm_shm)
317 shmexit(p);
318 /* Release SYSV semaphores */
319 semexit(p);
320
321 if (SESS_LEADER(p)) {
322 register struct session *sp = p->p_session;
323
324 if (sp->s_ttyvp) {
325 struct vnode *ttyvp;
326 struct vfs_context context;
327
328 /*
329 * Controlling process.
330 * Signal foreground pgrp,
331 * drain controlling terminal
332 * and revoke access to controlling terminal.
333 */
334 if (sp->s_ttyp->t_session == sp) {
335 if (sp->s_ttyp->t_pgrp)
336 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
337 (void) ttywait(sp->s_ttyp);
338 /*
339 * The tty could have been revoked
340 * if we blocked.
341 */
342 context.vc_proc = p;
343 context.vc_ucred = p->p_ucred;
344 if (sp->s_ttyvp)
345 VNOP_REVOKE(sp->s_ttyvp, REVOKEALL, &context);
346 }
347 ttyvp = sp->s_ttyvp;
348 sp->s_ttyvp = NULL;
349 if (ttyvp) {
350 vnode_rele(ttyvp);
351 }
352 /*
353 * s_ttyp is not zero'd; we use this to indicate
354 * that the session once had a controlling terminal.
355 * (for logging and informational purposes)
356 */
357 }
358 sp->s_leader = NULL;
359 }
360
361 fixjobc(p, p->p_pgrp, 0);
362 p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
363 (void)acct_process(p);
364
365 #if KTRACE
366 /*
367 * release trace file
368 */
369 p->p_traceflag = 0; /* don't trace the vnode_put() */
370 if (p->p_tracep) {
371 struct vnode *tvp = p->p_tracep;
372 p->p_tracep = NULL;
373 vnode_rele(tvp);
374 }
375 #endif
376
377 while (q = p->p_children.lh_first) {
378 proc_reparent(q, initproc);
379 /*
380 * Traced processes are killed
381 * since their existence means someone is messing up.
382 */
383 if (q->p_flag & P_TRACED) {
384 q->p_flag &= ~P_TRACED;
385 if (q->sigwait_thread) {
386 /*
387 * The sigwait_thread could be stopped at a
388 * breakpoint. Wake it up to kill.
389 * Need to do this as it could be a thread which is not
390 * the first thread in the task. So any attempts to kill
391 * the process would result into a deadlock on q->sigwait.
392 */
393 thread_resume((thread_t)q->sigwait_thread);
394 clear_wait(q->sigwait_thread, THREAD_INTERRUPTED);
395 threadsignal((thread_t)q->sigwait_thread, SIGKILL, 0);
396 }
397 psignal(q, SIGKILL);
398 }
399 }
400
401 /*
402 * Save exit status and final rusage info, adding in child rusage
403 * info and self times. If we were unable to allocate a zombie
404 * structure, this information is lost.
405 */
406 if (p->p_ru != NULL) {
407 *p->p_ru = p->p_stats->p_ru;
408
409 timerclear(&p->p_ru->ru_utime);
410 timerclear(&p->p_ru->ru_stime);
411
412 if (task) {
413 task_basic_info_data_t tinfo;
414 task_thread_times_info_data_t ttimesinfo;
415 int task_info_stuff, task_ttimes_stuff;
416 struct timeval ut,st;
417
418 task_info_stuff = TASK_BASIC_INFO_COUNT;
419 task_info(task, TASK_BASIC_INFO,
420 (task_info_t)&tinfo, &task_info_stuff);
421 p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds;
422 p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds;
423 p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds;
424 p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds;
425
426 task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT;
427 task_info(task, TASK_THREAD_TIMES_INFO,
428 (task_info_t)&ttimesinfo, &task_ttimes_stuff);
429
430 ut.tv_sec = ttimesinfo.user_time.seconds;
431 ut.tv_usec = ttimesinfo.user_time.microseconds;
432 st.tv_sec = ttimesinfo.system_time.seconds;
433 st.tv_usec = ttimesinfo.system_time.microseconds;
434 timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime);
435 timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
436 }
437
438 ruadd(p->p_ru, &p->p_stats->p_cru);
439 }
440
441 /*
442 * Free up profiling buffers.
443 */
444 {
445 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
446
447 p1 = p0->pr_next;
448 p0->pr_next = NULL;
449 p0->pr_scale = 0;
450
451 for (; p1 != NULL; p1 = pn) {
452 pn = p1->pr_next;
453 kfree(p1, sizeof *p1);
454 }
455 }
456
457 /*
458 * Other substructures are freed from wait().
459 */
460 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_SUBPROC);
461 p->p_stats = NULL;
462
463 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SUBPROC);
464 p->p_sigacts = NULL;
465
466 if (--p->p_limit->p_refcnt == 0)
467 FREE_ZONE(p->p_limit, sizeof *p->p_limit, M_SUBPROC);
468 p->p_limit = NULL;
469
470 /*
471 * Finish up by terminating the task
472 * and halt this thread (only if a
473 * member of the task exiting).
474 */
475 p->task = TASK_NULL;
476 //task->proc = NULL;
477 set_bsdtask_info(task, NULL);
478
479 KNOTE(&p->p_klist, NOTE_EXIT);
480
481 /*
482 * Notify parent that we're gone.
483 */
484 if (p->p_pptr->p_flag & P_NOCLDWAIT) {
485 struct proc *opp = p->p_pptr;
486
487 /*
488 * Add child resource usage to parent before giving
489 * zombie to init. If we were unable to allocate a
490 * zombie structure, this information is lost.
491 */
492 if (p->p_ru != NULL)
493 ruadd(&p->p_pptr->p_stats->p_cru, p->p_ru);
494
495 proc_reparent(p, initproc);
496 /* If there are no more children wakeup parent */
497 if (LIST_EMPTY(&opp->p_children))
498 wakeup((caddr_t)opp);
499 }
500 /* should be fine as parent proc would be initproc */
501 pp = p->p_pptr;
502 if (pp != initproc) {
503 pp->si_pid = p->p_pid;
504 pp->si_status = p->p_xstat;
505 pp->si_code = CLD_EXITED;
506 pp->si_uid = p->p_ucred->cr_ruid;
507 }
508 /* mark as a zombie */
509 p->p_stat = SZOMB;
510
511 psignal(pp, SIGCHLD);
512
513 /* and now wakeup the parent */
514 wakeup((caddr_t)p->p_pptr);
515
516 (void) thread_funnel_set(kernel_flock, funnel_state);
517 }
518
519
520 /*
521 * reap_child_process
522 *
523 * Description: Given a process from which all status information needed
524 * has already been extracted, if the process is a ptrace
525 * attach process, detach it and give it back to its real
526 * parent, else recover all resources remaining associated
527 * with it.
528 *
529 * Parameters: struct proc *parent Parent of process being reaped
530 * struct proc *child Process to reap
531 *
532 * Returns: 0 Process was not reaped because it
533 * came from an attach
534 * 1 Process was reaped
535 */
536 static int
537 reap_child_process(struct proc *parent, struct proc *child)
538 {
539 struct proc *trace_parent; /* Traced parent process, if tracing */
540 struct vnode *tvp; /* Traced vnode pointer, if used */
541
542 /*
543 * If we got the child via a ptrace 'attach',
544 * we need to give it back to the old parent.
545 */
546 if (child->p_oppid && (trace_parent = pfind(child->p_oppid))) {
547 child->p_oppid = 0;
548 proc_reparent(child, trace_parent);
549 if (trace_parent != initproc) {
550 trace_parent->si_pid = child->p_pid;
551 trace_parent->si_status = child->p_xstat;
552 trace_parent->si_code = CLD_CONTINUED;
553 trace_parent->si_uid = child->p_ucred->cr_ruid;
554 }
555 psignal(trace_parent, SIGCHLD);
556 wakeup((caddr_t)trace_parent);
557 return (0);
558 }
559 child->p_xstat = 0;
560 if (child->p_ru) {
561 ruadd(&parent->p_stats->p_cru, child->p_ru);
562 FREE_ZONE(child->p_ru, sizeof *child->p_ru, M_ZOMBIE);
563 child->p_ru = NULL;
564 } else {
565 printf("Warning : lost p_ru for %s\n", child->p_comm);
566 }
567
568 /*
569 * Decrement the count of procs running with this uid.
570 */
571 (void)chgproccnt(child->p_ucred->cr_ruid, -1);
572
573 /*
574 * Free up credentials.
575 */
576 if (child->p_ucred != NOCRED) {
577 kauth_cred_t ucr = child->p_ucred;
578 child->p_ucred = NOCRED;
579 kauth_cred_rele(ucr);
580 }
581
582 /*
583 * Release reference to text vnode
584 */
585 tvp = child->p_textvp;
586 child->p_textvp = NULL;
587 if (tvp) {
588 vnode_rele(tvp);
589 }
590 /*
591 * Finally finished with old proc entry.
592 * Unlink it from its process group and free it.
593 */
594 leavepgrp(child);
595 LIST_REMOVE(child, p_list); /* off zombproc */
596 LIST_REMOVE(child, p_sibling);
597 child->p_lflag &= ~P_LWAITING;
598 wakeup(&child->p_stat);
599
600 lck_mtx_destroy(&child->p_mlock, proc_lck_grp);
601 lck_mtx_destroy(&child->p_fdmlock, proc_lck_grp);
602 FREE_ZONE(child, sizeof *child, M_PROC);
603 nprocs--;
604 return (1);
605 }
606
607
608 int
609 wait1continue(int result)
610 {
611 void *vt;
612 thread_t thread;
613 int *retval;
614 struct proc *p;
615
616 if (result)
617 return(result);
618
619 p = current_proc();
620 thread = current_thread();
621 vt = get_bsduthreadarg(thread);
622 retval = get_bsduthreadrval(thread);
623 return(wait4((struct proc *)p, (struct wait4_args *)vt, retval));
624 }
625
626 int
627 wait4(struct proc *q, struct wait4_args *uap, register_t *retval)
628 {
629 register int nfound;
630 register struct proc *p;
631 int status, error;
632
633 if (uap->pid == 0)
634 uap->pid = -q->p_pgid;
635
636 loop:
637 nfound = 0;
638 for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) {
639 if (uap->pid != WAIT_ANY &&
640 p->p_pid != uap->pid &&
641 p->p_pgid != -(uap->pid))
642 continue;
643 nfound++;
644
645 /* XXX This is racy because we don't get the lock!!!! */
646
647 if (p->p_lflag & P_LWAITING) {
648 (void)tsleep(&p->p_stat, PWAIT, "waitcoll", 0);
649 goto loop;
650 }
651 p->p_lflag |= P_LWAITING; /* only allow single thread to wait() */
652
653 if (p->p_stat == SZOMB) {
654 retval[0] = p->p_pid;
655 if (uap->status) {
656 status = p->p_xstat; /* convert to int */
657 error = copyout((caddr_t)&status,
658 uap->status,
659 sizeof(status));
660 if (error) {
661 p->p_lflag &= ~P_LWAITING;
662 wakeup(&p->p_stat);
663 return (error);
664 }
665 }
666 if (uap->rusage) {
667 if (p->p_ru == NULL) {
668 error = ENOMEM;
669 } else {
670 if (IS_64BIT_PROCESS(q)) {
671 struct user_rusage my_rusage;
672 munge_rusage(p->p_ru, &my_rusage);
673 error = copyout((caddr_t)&my_rusage,
674 uap->rusage,
675 sizeof (my_rusage));
676 }
677 else {
678 error = copyout((caddr_t)p->p_ru,
679 uap->rusage,
680 sizeof (struct rusage));
681 }
682 }
683 /* information unavailable? */
684 if (error) {
685 p->p_lflag &= ~P_LWAITING;
686 wakeup(&p->p_stat);
687 return (error);
688 }
689 }
690
691 /* Clean up */
692 if (!reap_child_process(q, p)) {
693 p->p_lflag &= ~P_LWAITING;
694 wakeup(&p->p_stat);
695 }
696
697 return (0);
698 }
699 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
700 (p->p_flag & P_TRACED || uap->options & WUNTRACED)) {
701 p->p_flag |= P_WAITED;
702 retval[0] = p->p_pid;
703 if (uap->status) {
704 status = W_STOPCODE(p->p_xstat);
705 error = copyout((caddr_t)&status,
706 uap->status,
707 sizeof(status));
708 } else
709 error = 0;
710 p->p_lflag &= ~P_LWAITING;
711 wakeup(&p->p_stat);
712 return (error);
713 }
714 p->p_lflag &= ~P_LWAITING;
715 wakeup(&p->p_stat);
716 }
717 if (nfound == 0)
718 return (ECHILD);
719
720 if (uap->options & WNOHANG) {
721 retval[0] = 0;
722 return (0);
723 }
724
725 if ((error = tsleep0((caddr_t)q, PWAIT | PCATCH, "wait", 0, wait1continue)))
726 return (error);
727
728 goto loop;
729 }
730
731
732 int
733 waitidcontinue(int result)
734 {
735 void *vt;
736 thread_t thread;
737 int *retval;
738 struct proc *p;
739
740 if (result)
741 return(result);
742
743 p = current_proc();
744 thread = current_thread();
745 vt = get_bsduthreadarg(thread);
746 retval = get_bsduthreadrval(thread);
747 return(waitid((struct proc *)p, (struct waitid_args *)vt, retval));
748 }
749
750 /*
751 * Description: Suspend the calling thread until one child of the process
752 * containing the calling thread changes state.
753 *
754 * Parameters: uap->idtype one of P_PID, P_PGID, P_ALL
755 * uap->id pid_t or gid_t or ignored
756 * uap->infop Address of signinfo_t struct in
757 * user space into which to return status
758 * uap->options flag values
759 *
760 * Returns: 0 Success
761 * !0 Error returning status to user space
762 */
763 int
764 waitid(struct proc *q, struct waitid_args *uap, register_t *retval)
765 {
766 user_siginfo_t collect64; /* siginfo data to return to caller */
767
768 register int nfound;
769 register struct proc *p;
770 int error;
771
772 loop:
773 nfound = 0;
774 for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) {
775 switch(uap->idtype) {
776 case P_PID: /* child with process ID equal to... */
777 if (p->p_pid != (pid_t)uap->id)
778 continue;
779 break;
780 case P_PGID: /* child with process group ID equal to... */
781 if (p->p_pgid != (pid_t)uap->id)
782 continue;
783 break;
784 case P_ALL: /* any child */
785 break;
786 }
787
788 /* XXX This is racy because we don't get the lock!!!! */
789
790 /*
791 * Wait collision; go to sleep and restart; used to maintain
792 * the single return for waited process guarantee.
793 */
794 if (p->p_lflag & P_LWAITING) {
795 (void)tsleep(&p->p_stat, PWAIT, "waitidcoll", 0);
796 goto loop;
797 }
798 p->p_lflag |= P_LWAITING; /* mark busy */
799
800 nfound++;
801
802 /*
803 * Types of processes we are interested in
804 *
805 * XXX Don't know what to do for WCONTINUED?!?
806 */
807 switch(p->p_stat) {
808 case SZOMB: /* Exited */
809 if (!(uap->options & WEXITED))
810 break;
811
812 /* Collect "siginfo" information for caller */
813 collect64.si_signo = 0;
814 collect64.si_code = 0;
815 collect64.si_errno = 0;
816 collect64.si_pid = 0;
817 collect64.si_uid = 0;
818 collect64.si_addr = 0;
819 collect64.si_status = p->p_xstat;
820 collect64.si_band = 0;
821
822 if (IS_64BIT_PROCESS(p)) {
823 error = copyout((caddr_t)&collect64,
824 uap->infop,
825 sizeof(collect64));
826 } else {
827 siginfo_t collect;
828 siginfo_64to32(&collect64,&collect);
829 error = copyout((caddr_t)&collect,
830 uap->infop,
831 sizeof(collect));
832 }
833 /* information unavailable? */
834 if (error) {
835 p->p_lflag &= ~P_LWAITING;
836 wakeup(&p->p_stat);
837 return (error);
838 }
839
840 /* Prevent other process for waiting for this event? */
841 if (!(uap->options & WNOWAIT)) {
842 /* Clean up */
843 if (!reap_child_process(q, p)) {
844 p->p_lflag &= ~P_LWAITING;
845 wakeup(&p->p_stat);
846 }
847 }
848
849 return (0);
850
851 case SSTOP: /* Stopped */
852 /*
853 * If we are not interested in stopped processes, then
854 * ignore this one.
855 */
856 if (!(uap->options & WSTOPPED))
857 break;
858
859 /*
860 * If someone has already waited it, we lost a race
861 * to be the one to return status.
862 */
863 if ((p->p_flag & P_WAITED) != 0)
864 break;
865
866 /*
867 * If this is not a traced process, and they haven't
868 * indicated an interest in untraced processes, then
869 * ignore this one.
870 */
871 if (!(p->p_flag & P_TRACED) && !(uap->options & WUNTRACED))
872 break;
873
874 /* Collect "siginfo" information for caller */
875 collect64.si_signo = 0;
876 collect64.si_code = 0;
877 collect64.si_errno = 0;
878 collect64.si_pid = 0;
879 collect64.si_uid = 0;
880 collect64.si_addr = 0;
881 collect64.si_status = p->p_xstat;
882 collect64.si_band = 0;
883
884 if (IS_64BIT_PROCESS(p)) {
885 error = copyout((caddr_t)&collect64,
886 uap->infop,
887 sizeof(collect64));
888 } else {
889 siginfo_t collect;
890 siginfo_64to32(&collect64,&collect);
891 error = copyout((caddr_t)&collect,
892 uap->infop,
893 sizeof(collect));
894 }
895 /* information unavailable? */
896 if (error) {
897 p->p_lflag &= ~P_LWAITING;
898 wakeup(&p->p_stat);
899 return (error);
900 }
901
902 /* Prevent other process for waiting for this event? */
903 if (!(uap->options & WNOWAIT)) {
904 p->p_flag |= P_WAITED;
905 }
906
907 p->p_lflag &= ~P_LWAITING;
908 wakeup(&p->p_stat);
909 return (0);
910
911 default: /* All others */
912 /* ...meaning Continued */
913 if (!(uap->options & WCONTINUED))
914 break;
915
916 /*
917 * If the flag isn't set, then this process has not
918 * been stopped and continued, or the status has
919 * already been reaped by another caller of waitid().
920 */
921 if ((p->p_flag & P_CONTINUED) == 0)
922 break;
923
924 /* Collect "siginfo" information for caller */
925 collect64.si_signo = 0;
926 collect64.si_code = 0;
927 collect64.si_errno = 0;
928 collect64.si_pid = 0;
929 collect64.si_uid = 0;
930 collect64.si_addr = 0;
931 collect64.si_status = p->p_xstat;
932 collect64.si_band = 0;
933
934 if (IS_64BIT_PROCESS(p)) {
935 error = copyout((caddr_t)&collect64,
936 uap->infop,
937 sizeof(collect64));
938 } else {
939 siginfo_t collect;
940 siginfo_64to32(&collect64,&collect);
941 error = copyout((caddr_t)&collect,
942 uap->infop,
943 sizeof(collect));
944 }
945 /* information unavailable? */
946 if (error) {
947 p->p_lflag &= ~P_LWAITING;
948 wakeup(&p->p_stat);
949 return (error);
950 }
951
952 /* Prevent other process for waiting for this event? */
953 if (!(uap->options & WNOWAIT)) {
954 p->p_flag &= ~P_CONTINUED;
955 }
956
957 p->p_lflag &= ~P_LWAITING;
958 wakeup(&p->p_stat);
959 return (0);
960
961 break;
962 }
963
964
965 /* Not a process we are interested in; go on to next child */
966 p->p_lflag &= ~P_LWAITING;
967 wakeup(&p->p_stat);
968 }
969
970 /* No child processes that could possibly satisfy the request? */
971 if (nfound == 0)
972 return (ECHILD);
973
974 if (uap->options & WNOHANG) {
975 retval[0] = 0;
976 return (0);
977 }
978
979 if ((error = tsleep0((caddr_t)q, PWAIT | PCATCH, "waitid", 0, waitidcontinue)))
980 return (error);
981
982 goto loop;
983 }
984
985 /*
986 * make process 'parent' the new parent of process 'child'.
987 */
988 void
989 proc_reparent(struct proc *child, struct proc *parent)
990 {
991
992 if (child->p_pptr == parent)
993 return;
994
995 LIST_REMOVE(child, p_sibling);
996 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
997 child->p_pptr = parent;
998
999 if (initproc == parent && child->p_stat == SZOMB)
1000 psignal(initproc, SIGCHLD);
1001 }
1002
1003 /*
1004 * Make the current process an "init" process, meaning
1005 * that it doesn't have a parent, and that it won't be
1006 * gunned down by kill(-1, 0).
1007 */
1008 kern_return_t
1009 init_process(__unused struct init_process_args *args)
1010 {
1011 register struct proc *p = current_proc();
1012
1013 AUDIT_MACH_SYSCALL_ENTER(AUE_INITPROCESS);
1014 if (suser(kauth_cred_get(), &p->p_acflag)) {
1015 AUDIT_MACH_SYSCALL_EXIT(KERN_NO_ACCESS);
1016 return(KERN_NO_ACCESS);
1017 }
1018
1019 if (p->p_pid != 1 && p->p_pgid != p->p_pid)
1020 enterpgrp(p, p->p_pid, 0);
1021 p->p_flag |= P_SYSTEM;
1022
1023 /*
1024 * Take us out of the sibling chain, and
1025 * out of our parent's child chain.
1026 */
1027 LIST_REMOVE(p, p_sibling);
1028 p->p_sibling.le_prev = NULL;
1029 p->p_sibling.le_next = NULL;
1030 p->p_pptr = kernproc;
1031
1032 AUDIT_MACH_SYSCALL_EXIT(KERN_SUCCESS);
1033 return(KERN_SUCCESS);
1034 }
1035
1036
1037 /*
1038 * Exit: deallocate address space and other resources, change proc state
1039 * to zombie, and unlink proc from allproc and parent's lists. Save exit
1040 * status and rusage for wait(). Check for child processes and orphan them.
1041 */
1042
1043 void
1044 vfork_exit(struct proc *p, int rv)
1045 {
1046 thread_t self = current_thread();
1047 #ifdef FIXME
1048 struct task *task = p->task;
1049 #endif
1050 register int s;
1051 struct uthread *ut;
1052 exception_data_t code[EXCEPTION_CODE_MAX];
1053
1054 /*
1055 * If a thread in this task has already
1056 * called exit(), then halt any others
1057 * right here.
1058 */
1059
1060 ut = get_bsdthread_info(self);
1061 #ifdef FIXME
1062 signal_lock(p);
1063 while (p->exit_thread != self) {
1064 if (sig_try_locked(p) <= 0) {
1065 if (get_threadtask(self) != task) {
1066 signal_unlock(p);
1067 return;
1068 }
1069 signal_unlock(p);
1070 thread_terminate(self);
1071 thread_funnel_set(kernel_flock, FALSE);
1072 thread_exception_return();
1073 /* NOTREACHED */
1074 }
1075 sig_lock_to_exit(p);
1076 }
1077 signal_unlock(p);
1078 if (p->p_pid == 1) {
1079 printf("pid 1 exited (signal %d, exit %d)",
1080 WTERMSIG(rv), WEXITSTATUS(rv));
1081 panic("init died\nState at Last Exception:\n\n%s", init_task_failure_data);
1082 }
1083 #endif /* FIXME */
1084
1085 s = splsched();
1086 p->p_flag |= P_WEXIT;
1087 p->p_lflag |= P_LPEXIT;
1088 splx(s);
1089
1090 code[0] = (exception_data_t)0xFF000001; /* Set terminate code */
1091 code[1] = (exception_data_t)p->p_pid; /* Pass out the pid */
1092 /* Notify the perf server */
1093 (void)sys_perf_notify(p->task, (exception_data_t)&code, 2);
1094
1095 /*
1096 * Remove proc from allproc queue and from pidhash chain.
1097 * Need to do this before we do anything that can block.
1098 * Not doing causes things like mount() find this on allproc
1099 * in partially cleaned state.
1100 */
1101 LIST_REMOVE(p, p_list);
1102 LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
1103 LIST_REMOVE(p, p_hash);
1104 /*
1105 * If parent is waiting for us to exit or exec,
1106 * P_PPWAIT is set; we will wakeup the parent below.
1107 */
1108 p->p_flag &= ~(P_TRACED | P_PPWAIT);
1109 p->p_sigignore = ~0;
1110 p->p_siglist = 0;
1111
1112 ut->uu_siglist = 0;
1113 untimeout(realitexpire, (caddr_t)p->p_pid);
1114
1115 p->p_xstat = rv;
1116
1117 vproc_exit(p);
1118 }
1119
1120 void
1121 vproc_exit(struct proc *p)
1122 {
1123 register struct proc *q, *nq, *pp;
1124 #ifdef FIXME
1125 struct task *task = p->task;
1126 #endif
1127
1128 /* XXX Zombie allocation may fail, in which case stats get lost */
1129 MALLOC_ZONE(p->p_ru, struct rusage *,
1130 sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
1131
1132 /*
1133 * Close open files and release open-file table.
1134 * This may block!
1135 */
1136 fdfree(p);
1137
1138 if (SESS_LEADER(p)) {
1139 register struct session *sp = p->p_session;
1140
1141 if (sp->s_ttyvp) {
1142 struct vnode *ttyvp;
1143 struct vfs_context context;
1144
1145 /*
1146 * Controlling process.
1147 * Signal foreground pgrp,
1148 * drain controlling terminal
1149 * and revoke access to controlling terminal.
1150 */
1151 if (sp->s_ttyp->t_session == sp) {
1152 if (sp->s_ttyp->t_pgrp)
1153 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
1154 (void) ttywait(sp->s_ttyp);
1155 /*
1156 * The tty could have been revoked
1157 * if we blocked.
1158 */
1159 context.vc_proc = p;
1160 context.vc_ucred = p->p_ucred;
1161 if (sp->s_ttyvp)
1162 VNOP_REVOKE(sp->s_ttyvp, REVOKEALL, &context);
1163 }
1164 ttyvp = sp->s_ttyvp;
1165 sp->s_ttyvp = NULL;
1166 if (ttyvp) {
1167 vnode_rele(ttyvp);
1168 }
1169 /*
1170 * s_ttyp is not zero'd; we use this to indicate
1171 * that the session once had a controlling terminal.
1172 * (for logging and informational purposes)
1173 */
1174 }
1175 sp->s_leader = NULL;
1176 }
1177
1178 fixjobc(p, p->p_pgrp, 0);
1179 p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
1180
1181 #if KTRACE
1182 /*
1183 * release trace file
1184 */
1185 p->p_traceflag = 0; /* don't trace the vnode_rele() */
1186 if (p->p_tracep) {
1187 struct vnode *tvp = p->p_tracep;
1188 p->p_tracep = NULL;
1189 vnode_rele(tvp);
1190 }
1191 #endif
1192
1193 while (q = p->p_children.lh_first) {
1194 proc_reparent(q, initproc);
1195 /*
1196 * Traced processes are killed
1197 * since their existence means someone is messing up.
1198 */
1199 if (q->p_flag & P_TRACED) {
1200 q->p_flag &= ~P_TRACED;
1201 if (q->sigwait_thread) {
1202 /*
1203 * The sigwait_thread could be stopped at a
1204 * breakpoint. Wake it up to kill.
1205 * Need to do this as it could be a thread which is not
1206 * the first thread in the task. So any attempts to kill
1207 * the process would result into a deadlock on q->sigwait.
1208 */
1209 thread_resume((thread_t)q->sigwait_thread);
1210 clear_wait(q->sigwait_thread, THREAD_INTERRUPTED);
1211 threadsignal((thread_t)q->sigwait_thread, SIGKILL, 0);
1212 }
1213 psignal(q, SIGKILL);
1214 }
1215 }
1216
1217 /*
1218 * Save exit status and final rusage info, adding in child rusage
1219 * info and self times. If we were unable to allocate a zombie
1220 * structure, this information is lost.
1221 */
1222 if (p->p_ru != NULL) {
1223 *p->p_ru = p->p_stats->p_ru;
1224 timerclear(&p->p_ru->ru_utime);
1225 timerclear(&p->p_ru->ru_stime);
1226
1227 #ifdef FIXME
1228 if (task) {
1229 task_basic_info_data_t tinfo;
1230 task_thread_times_info_data_t ttimesinfo;
1231 int task_info_stuff, task_ttimes_stuff;
1232 struct timeval ut,st;
1233
1234 task_info_stuff = TASK_BASIC_INFO_COUNT;
1235 task_info(task, TASK_BASIC_INFO,
1236 &tinfo, &task_info_stuff);
1237 p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds;
1238 p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds;
1239 p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds;
1240 p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds;
1241
1242 task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT;
1243 task_info(task, TASK_THREAD_TIMES_INFO,
1244 &ttimesinfo, &task_ttimes_stuff);
1245
1246 ut.tv_sec = ttimesinfo.user_time.seconds;
1247 ut.tv_usec = ttimesinfo.user_time.microseconds;
1248 st.tv_sec = ttimesinfo.system_time.seconds;
1249 st.tv_usec = ttimesinfo.system_time.microseconds;
1250 timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime);
1251 timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
1252 }
1253 #endif /* FIXME */
1254
1255 ruadd(p->p_ru, &p->p_stats->p_cru);
1256 }
1257
1258 /*
1259 * Free up profiling buffers.
1260 */
1261 {
1262 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
1263
1264 p1 = p0->pr_next;
1265 p0->pr_next = NULL;
1266 p0->pr_scale = 0;
1267
1268 for (; p1 != NULL; p1 = pn) {
1269 pn = p1->pr_next;
1270 kfree(p1, sizeof *p1);
1271 }
1272 }
1273
1274 /*
1275 * Other substructures are freed from wait().
1276 */
1277 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_SUBPROC);
1278 p->p_stats = NULL;
1279
1280 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SUBPROC);
1281 p->p_sigacts = NULL;
1282
1283 if (--p->p_limit->p_refcnt == 0)
1284 FREE_ZONE(p->p_limit, sizeof *p->p_limit, M_SUBPROC);
1285 p->p_limit = NULL;
1286
1287 /*
1288 * Finish up by terminating the task
1289 * and halt this thread (only if a
1290 * member of the task exiting).
1291 */
1292 p->task = TASK_NULL;
1293
1294 /*
1295 * Notify parent that we're gone.
1296 */
1297 pp = p->p_pptr;
1298 if (pp != initproc) {
1299 pp->si_pid = p->p_pid;
1300 pp->si_status = p->p_xstat;
1301 pp->si_code = CLD_EXITED;
1302 pp->si_uid = p->p_ucred->cr_ruid;
1303 }
1304 /* mark as a zombie */
1305 p->p_stat = SZOMB;
1306
1307 psignal(p->p_pptr, SIGCHLD);
1308
1309 /* and now wakeup the parent */
1310 wakeup((caddr_t)p->p_pptr);
1311 }
1312
1313
1314 /*
1315 * munge_rusage
1316 * LP64 support - long is 64 bits if we are dealing with a 64 bit user
1317 * process. We munge the kernel (32 bit) version of rusage into the
1318 * 64 bit version.
1319 */
1320 __private_extern__ void
1321 munge_rusage(struct rusage *a_rusage_p, struct user_rusage *a_user_rusage_p)
1322 {
1323 /* timeval changes size, so utime and stime need special handling */
1324 a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
1325 a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
1326 a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
1327 a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
1328 /*
1329 * everything else can be a direct assign, since there is no loss
1330 * of precision implied boing 32->64.
1331 */
1332 a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
1333 a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
1334 a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
1335 a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
1336 a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
1337 a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
1338 a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
1339 a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
1340 a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
1341 a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
1342 a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
1343 a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
1344 a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
1345 a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
1346 }