]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ | |
23 | /* | |
24 | * Copyright (c) 1982, 1986, 1989, 1991, 1993 | |
25 | * The Regents of the University of California. All rights reserved. | |
26 | * (c) UNIX System Laboratories, Inc. | |
27 | * All or some portions of this file are derived from material licensed | |
28 | * to the University of California by American Telephone and Telegraph | |
29 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with | |
30 | * the permission of UNIX System Laboratories, Inc. | |
31 | * | |
32 | * Redistribution and use in source and binary forms, with or without | |
33 | * modification, are permitted provided that the following conditions | |
34 | * are met: | |
35 | * 1. Redistributions of source code must retain the above copyright | |
36 | * notice, this list of conditions and the following disclaimer. | |
37 | * 2. Redistributions in binary form must reproduce the above copyright | |
38 | * notice, this list of conditions and the following disclaimer in the | |
39 | * documentation and/or other materials provided with the distribution. | |
40 | * 3. All advertising materials mentioning features or use of this software | |
41 | * must display the following acknowledgement: | |
42 | * This product includes software developed by the University of | |
43 | * California, Berkeley and its contributors. | |
44 | * 4. Neither the name of the University nor the names of its contributors | |
45 | * may be used to endorse or promote products derived from this software | |
46 | * without specific prior written permission. | |
47 | * | |
48 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
49 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
50 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
51 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
52 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
53 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
54 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
55 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
57 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
58 | * SUCH DAMAGE. | |
59 | * | |
60 | * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 | |
61 | */ | |
62 | ||
63 | #include <machine/reg.h> | |
64 | #include <machine/psl.h> | |
65 | ||
66 | #include "compat_43.h" | |
67 | ||
68 | #include <sys/param.h> | |
69 | #include <sys/systm.h> | |
70 | #include <sys/ioctl.h> | |
71 | #include <sys/proc.h> | |
72 | #include <sys/tty.h> | |
73 | #include <sys/time.h> | |
74 | #include <sys/resource.h> | |
75 | #include <sys/kernel.h> | |
76 | #include <sys/buf.h> | |
77 | #include <sys/wait.h> | |
78 | #include <sys/file.h> | |
79 | #include <sys/vnode.h> | |
80 | #include <sys/syslog.h> | |
81 | #include <sys/malloc.h> | |
82 | #include <sys/resourcevar.h> | |
83 | #include <sys/ptrace.h> | |
84 | #include <sys/user.h> | |
85 | ||
86 | #include <mach/mach_types.h> | |
87 | #include <kern/thread.h> | |
88 | #include <kern/thread_act.h> | |
89 | #include <kern/assert.h> | |
90 | ||
91 | extern char init_task_failure_data[]; | |
0b4e3aa0 | 92 | int exit1 __P((struct proc *, int, int *)); |
1c79356b A |
93 | |
94 | /* | |
95 | * exit -- | |
96 | * Death of process. | |
97 | */ | |
98 | struct exit_args { | |
99 | int rval; | |
100 | }; | |
101 | void | |
102 | exit(p, uap, retval) | |
103 | struct proc *p; | |
104 | struct exit_args *uap; | |
105 | int *retval; | |
106 | { | |
0b4e3aa0 | 107 | exit1(p, W_EXITCODE(uap->rval, 0), retval); |
1c79356b A |
108 | |
109 | /* drop funnel befewo we return */ | |
110 | thread_funnel_set(kernel_flock, FALSE); | |
111 | thread_exception_return(); | |
112 | /* NOTREACHED */ | |
113 | while (TRUE) | |
114 | thread_block(0); | |
115 | /* NOTREACHED */ | |
116 | } | |
117 | ||
118 | /* | |
119 | * Exit: deallocate address space and other resources, change proc state | |
120 | * to zombie, and unlink proc from allproc and parent's lists. Save exit | |
121 | * status and rusage for wait(). Check for child processes and orphan them. | |
122 | */ | |
0b4e3aa0 A |
123 | int |
124 | exit1(p, rv, retval) | |
1c79356b A |
125 | register struct proc *p; |
126 | int rv; | |
0b4e3aa0 | 127 | int * retval; |
1c79356b A |
128 | { |
129 | register struct proc *q, *nq; | |
130 | thread_t self = current_thread(); | |
131 | thread_act_t th_act_self = current_act(); | |
132 | struct task *task = p->task; | |
133 | register int i,s; | |
134 | struct uthread *ut; | |
135 | ||
136 | /* | |
137 | * If a thread in this task has already | |
138 | * called exit(), then halt any others | |
139 | * right here. | |
140 | */ | |
0b4e3aa0 A |
141 | |
142 | ut = get_bsdthread_info(th_act_self); | |
143 | if (ut->uu_flag & P_VFORK) { | |
144 | vfork_exit(p, rv); | |
145 | vfork_return(th_act_self, p->p_pptr, p , retval); | |
146 | unix_syscall_return(0); | |
147 | /* NOT REACHED */ | |
148 | } | |
1c79356b A |
149 | signal_lock(p); |
150 | while (p->exit_thread != self) { | |
151 | if (sig_try_locked(p) <= 0) { | |
152 | if (get_threadtask(th_act_self) != task) { | |
153 | signal_unlock(p); | |
0b4e3aa0 | 154 | return(0); |
1c79356b A |
155 | } |
156 | signal_unlock(p); | |
157 | thread_terminate(th_act_self); | |
158 | thread_funnel_set(kernel_flock, FALSE); | |
159 | thread_exception_return(); | |
160 | /* NOTREACHED */ | |
161 | } | |
162 | sig_lock_to_exit(p); | |
163 | } | |
164 | signal_unlock(p); | |
165 | if (p->p_pid == 1) { | |
166 | printf("pid 1 exited (signal %d, exit %d)", | |
167 | WTERMSIG(rv), WEXITSTATUS(rv)); | |
168 | panic("init died\nState at Last Exception:\n\n%s", | |
169 | init_task_failure_data); | |
170 | } | |
171 | ||
172 | s = splsched(); | |
173 | p->p_flag |= P_WEXIT; | |
174 | splx(s); | |
175 | proc_prepareexit(p); | |
176 | p->p_xstat = rv; | |
177 | ||
178 | /* task terminate will call proc_terminate and that cleans it up */ | |
179 | task_terminate_internal(task); | |
180 | ||
181 | /* | |
182 | * we come back and returns to AST which | |
183 | * should cleanup the rest | |
184 | */ | |
185 | #if 0 | |
186 | if (task == current_task()) { | |
187 | thread_exception_return(); | |
188 | /*NOTREACHED*/ | |
189 | } | |
190 | ||
191 | while (task == current_task()) { | |
192 | thread_terminate_self(); | |
193 | /*NOTREACHED*/ | |
194 | } | |
195 | #endif | |
0b4e3aa0 | 196 | return(0); |
1c79356b A |
197 | } |
198 | ||
199 | void | |
200 | proc_prepareexit(struct proc *p) | |
201 | { | |
202 | int s; | |
203 | struct uthread *ut; | |
204 | thread_t self = current_thread(); | |
205 | thread_act_t th_act_self = current_act(); | |
206 | ||
207 | ||
208 | /* | |
209 | * Remove proc from allproc queue and from pidhash chain. | |
210 | * Need to do this before we do anything that can block. | |
211 | * Not doing causes things like mount() find this on allproc | |
212 | * in partially cleaned state. | |
213 | */ | |
214 | LIST_REMOVE(p, p_list); | |
215 | LIST_REMOVE(p, p_hash); | |
216 | ||
217 | #ifdef PGINPROF | |
218 | vmsizmon(); | |
219 | #endif | |
220 | /* | |
221 | * If parent is waiting for us to exit or exec, | |
222 | * P_PPWAIT is set; we will wakeup the parent below. | |
223 | */ | |
224 | p->p_flag &= ~(P_TRACED | P_PPWAIT); | |
225 | p->p_sigignore = ~0; | |
226 | p->p_siglist = 0; | |
227 | ut = get_bsdthread_info(th_act_self); | |
228 | ut->uu_sig = 0; | |
229 | untimeout(realitexpire, (caddr_t)p); | |
230 | ||
231 | } | |
232 | ||
233 | void | |
234 | proc_exit(struct proc *p) | |
235 | { | |
236 | register struct proc *q, *nq; | |
237 | thread_t self = current_thread(); | |
238 | thread_act_t th_act_self = current_act(); | |
239 | struct task *task = p->task; | |
240 | register int i,s; | |
241 | struct uthread *ut; | |
242 | boolean_t funnel_state; | |
243 | ||
244 | /* This can happen if thread_terminate of the single thread | |
245 | * process | |
246 | */ | |
247 | ||
248 | funnel_state = thread_funnel_set(kernel_flock, TRUE); | |
249 | if( !(p->p_flag & P_WEXIT)) { | |
250 | s = splsched(); | |
251 | p->p_flag |= P_WEXIT; | |
252 | splx(s); | |
253 | proc_prepareexit(p); | |
254 | } | |
255 | ||
256 | MALLOC_ZONE(p->p_ru, struct rusage *, | |
257 | sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK); | |
258 | ||
259 | /* | |
260 | * Close open files and release open-file table. | |
261 | * This may block! | |
262 | */ | |
263 | fdfree(p); | |
264 | ||
265 | /* Close ref SYSV Shared memory*/ | |
266 | if (p->vm_shm) | |
267 | shmexit(p); | |
268 | ||
269 | if (SESS_LEADER(p)) { | |
270 | register struct session *sp = p->p_session; | |
271 | ||
272 | if (sp->s_ttyvp) { | |
273 | /* | |
274 | * Controlling process. | |
275 | * Signal foreground pgrp, | |
276 | * drain controlling terminal | |
277 | * and revoke access to controlling terminal. | |
278 | */ | |
279 | if (sp->s_ttyp->t_session == sp) { | |
280 | if (sp->s_ttyp->t_pgrp) | |
281 | pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); | |
282 | (void) ttywait(sp->s_ttyp); | |
283 | /* | |
284 | * The tty could have been revoked | |
285 | * if we blocked. | |
286 | */ | |
287 | if (sp->s_ttyvp) | |
288 | VOP_REVOKE(sp->s_ttyvp, REVOKEALL); | |
289 | } | |
290 | if (sp->s_ttyvp) | |
291 | vrele(sp->s_ttyvp); | |
292 | sp->s_ttyvp = NULL; | |
293 | /* | |
294 | * s_ttyp is not zero'd; we use this to indicate | |
295 | * that the session once had a controlling terminal. | |
296 | * (for logging and informational purposes) | |
297 | */ | |
298 | } | |
299 | sp->s_leader = NULL; | |
300 | } | |
301 | ||
302 | fixjobc(p, p->p_pgrp, 0); | |
303 | p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; | |
304 | #if KTRACE | |
305 | /* | |
306 | * release trace file | |
307 | */ | |
308 | p->p_traceflag = 0; /* don't trace the vrele() */ | |
309 | if (p->p_tracep) | |
310 | vrele(p->p_tracep); | |
311 | #endif | |
312 | ||
313 | ||
314 | q = p->p_children.lh_first; | |
315 | if (q) /* only need this if any child is S_ZOMB */ | |
316 | wakeup((caddr_t) initproc); | |
317 | for (; q != 0; q = nq) { | |
318 | nq = q->p_sibling.le_next; | |
319 | proc_reparent(q, initproc); | |
320 | /* | |
321 | * Traced processes are killed | |
322 | * since their existence means someone is messing up. | |
323 | */ | |
324 | if (q->p_flag & P_TRACED) { | |
325 | q->p_flag &= ~P_TRACED; | |
326 | if (q->sigwait_thread) { | |
327 | thread_t sig_shuttle = getshuttle_thread(q->sigwait_thread); | |
328 | /* | |
329 | * The sigwait_thread could be stopped at a | |
330 | * breakpoint. Wake it up to kill. | |
331 | * Need to do this as it could be a thread which is not | |
332 | * the first thread in the task. So any attempts to kill | |
333 | * the process would result into a deadlock on q->sigwait. | |
334 | */ | |
335 | thread_resume((struct thread *)q->sigwait_thread); | |
336 | clear_wait(sig_shuttle, THREAD_INTERRUPTED); | |
337 | threadsignal(q->sigwait_thread, SIGKILL, 0); | |
338 | } | |
339 | psignal(q, SIGKILL); | |
340 | } | |
341 | } | |
342 | ||
343 | ||
344 | /* | |
345 | * Save exit status and final rusage info, adding in child rusage | |
346 | * info and self times. | |
347 | */ | |
348 | *p->p_ru = p->p_stats->p_ru; | |
349 | ||
350 | timerclear(&p->p_ru->ru_utime); | |
351 | timerclear(&p->p_ru->ru_stime); | |
352 | ||
353 | if (task) { | |
354 | task_basic_info_data_t tinfo; | |
355 | task_thread_times_info_data_t ttimesinfo; | |
356 | int task_info_stuff, task_ttimes_stuff; | |
357 | struct timeval ut,st; | |
358 | ||
359 | task_info_stuff = TASK_BASIC_INFO_COUNT; | |
360 | task_info(task, TASK_BASIC_INFO, | |
361 | &tinfo, &task_info_stuff); | |
362 | p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds; | |
363 | p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds; | |
364 | p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds; | |
365 | p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds; | |
366 | ||
367 | task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT; | |
368 | task_info(task, TASK_THREAD_TIMES_INFO, | |
369 | &ttimesinfo, &task_ttimes_stuff); | |
370 | ||
371 | ut.tv_sec = ttimesinfo.user_time.seconds; | |
372 | ut.tv_usec = ttimesinfo.user_time.microseconds; | |
373 | st.tv_sec = ttimesinfo.system_time.seconds; | |
374 | st.tv_usec = ttimesinfo.system_time.microseconds; | |
375 | timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime); | |
376 | timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime); | |
377 | } | |
378 | ||
379 | ||
380 | ruadd(p->p_ru, &p->p_stats->p_cru); | |
381 | ||
382 | /* | |
383 | * Free up profiling buffers. | |
384 | */ | |
385 | { | |
386 | struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn; | |
387 | ||
388 | p1 = p0->pr_next; | |
389 | p0->pr_next = NULL; | |
390 | p0->pr_scale = 0; | |
391 | ||
392 | for (; p1 != NULL; p1 = pn) { | |
393 | pn = p1->pr_next; | |
394 | kfree((vm_offset_t)p1, sizeof *p1); | |
395 | } | |
396 | } | |
397 | ||
398 | /* | |
399 | * Other substructures are freed from wait(). | |
400 | */ | |
401 | FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_SUBPROC); | |
402 | p->p_stats = NULL; | |
403 | ||
404 | FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SUBPROC); | |
405 | p->p_sigacts = NULL; | |
406 | ||
407 | if (--p->p_limit->p_refcnt == 0) | |
408 | FREE_ZONE(p->p_limit, sizeof *p->p_limit, M_SUBPROC); | |
409 | p->p_limit = NULL; | |
410 | ||
411 | /* | |
412 | * Finish up by terminating the task | |
413 | * and halt this thread (only if a | |
414 | * member of the task exiting). | |
415 | */ | |
416 | p->task = TASK_NULL; | |
417 | //task->proc = NULL; | |
418 | set_bsdtask_info(task, NULL); | |
419 | ||
420 | /* | |
421 | * Notify parent that we're gone. | |
422 | */ | |
423 | psignal(p->p_pptr, SIGCHLD); | |
424 | ||
425 | /* Place onto zombproc. */ | |
426 | LIST_INSERT_HEAD(&zombproc, p, p_list); | |
427 | p->p_stat = SZOMB; | |
428 | ||
429 | /* and now wakeup the parent */ | |
430 | wakeup((caddr_t)p->p_pptr); | |
431 | ||
432 | (void) thread_funnel_set(kernel_flock, funnel_state); | |
433 | } | |
434 | ||
435 | ||
436 | struct wait4_args { | |
437 | int pid; | |
438 | int *status; | |
439 | int options; | |
440 | struct rusage *rusage; | |
441 | }; | |
442 | ||
443 | #if COMPAT_43 | |
444 | int | |
445 | owait(p, uap, retval) | |
446 | struct proc *p; | |
447 | void *uap; | |
448 | int *retval; | |
449 | { | |
450 | struct wait4_args *a; | |
451 | ||
452 | a = (struct wait4_args *)get_bsduthreadarg(current_act()); | |
453 | ||
454 | a->options = 0; | |
455 | a->rusage = NULL; | |
456 | a->pid = WAIT_ANY; | |
457 | a->status = NULL; | |
458 | return (wait1(p, a, retval, 1)); | |
459 | } | |
460 | ||
461 | int | |
462 | wait4(p, uap, retval) | |
463 | struct proc *p; | |
464 | struct wait4_args *uap; | |
465 | int *retval; | |
466 | { | |
467 | ||
468 | return (wait1(p, uap, retval, 0)); | |
469 | } | |
470 | ||
471 | struct owait3_args { | |
472 | int *status; | |
473 | int options; | |
474 | struct rusage *rusage; | |
475 | }; | |
476 | ||
477 | int | |
478 | owait3(p, uap, retval) | |
479 | struct proc *p; | |
480 | struct owait3_args *uap; | |
481 | int *retval; | |
482 | { | |
483 | struct wait4_args *a; | |
484 | ||
485 | a = (struct wait4_args *)get_bsduthreadarg(current_act); | |
486 | ||
487 | a->rusage = uap->rusage; | |
488 | a->options = uap->options; | |
489 | a->status = uap->status; | |
490 | a->pid = WAIT_ANY; | |
491 | ||
492 | return (wait1(p, a, retval, 1)); | |
493 | } | |
494 | ||
495 | #else | |
496 | #define wait1 wait4 | |
497 | #endif | |
498 | ||
499 | int | |
500 | wait1continue(result) | |
501 | { | |
502 | void *vt; | |
503 | thread_act_t thread; | |
504 | struct uthread *ut; | |
505 | int *retval; | |
506 | struct proc *p; | |
507 | ||
0b4e3aa0 | 508 | p = current_proc(); |
1c79356b A |
509 | p->p_flag &= ~P_WAITING; |
510 | ||
511 | if (result != 0) { | |
512 | return(result); | |
513 | } | |
514 | ||
515 | thread = current_act(); | |
516 | ut = get_bsdthread_info(thread); | |
517 | vt = get_bsduthreadarg(thread); | |
518 | retval = get_bsduthreadrval(thread); | |
519 | wait1((struct proc *)p, (struct wait4_args *)vt, retval, 0); | |
520 | } | |
521 | ||
522 | int | |
523 | wait1(q, uap, retval, compat) | |
524 | register struct proc *q; | |
525 | register struct wait4_args *uap; | |
526 | register_t *retval; | |
527 | #if COMPAT_43 | |
528 | int compat; | |
529 | #endif | |
530 | { | |
531 | register int nfound; | |
532 | register struct proc *p, *t; | |
533 | int status, error; | |
534 | ||
535 | ||
536 | #if 0 | |
537 | /* since we are funneled we don't need to do this atomically, yet */ | |
538 | if (q->p_flag & P_WAITING) { | |
539 | return(EINVAL); | |
540 | } | |
541 | q->p_flag |= P_WAITING; /* only allow single thread to wait() */ | |
542 | #endif | |
543 | ||
544 | if (uap->pid == 0) | |
545 | uap->pid = -q->p_pgid; | |
546 | ||
547 | loop: | |
548 | nfound = 0; | |
549 | for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) { | |
550 | if (uap->pid != WAIT_ANY && | |
551 | p->p_pid != uap->pid && | |
552 | p->p_pgid != -(uap->pid)) | |
553 | continue; | |
554 | nfound++; | |
555 | if (p->p_stat == SZOMB) { | |
556 | retval[0] = p->p_pid; | |
557 | #if COMPAT_43 | |
558 | if (compat) | |
559 | retval[1] = p->p_xstat; | |
560 | else | |
561 | #endif | |
562 | if (uap->status) { | |
563 | status = p->p_xstat; /* convert to int */ | |
564 | if (error = copyout((caddr_t)&status, | |
565 | (caddr_t)uap->status, | |
566 | sizeof(status))) { | |
567 | q->p_flag &= ~P_WAITING; | |
568 | return (error); | |
569 | } | |
570 | } | |
571 | if (uap->rusage && | |
572 | (error = copyout((caddr_t)p->p_ru, | |
573 | (caddr_t)uap->rusage, | |
574 | sizeof (struct rusage)))) { | |
575 | q->p_flag &= ~P_WAITING; | |
576 | return (error); | |
577 | } | |
578 | /* | |
579 | * If we got the child via a ptrace 'attach', | |
580 | * we need to give it back to the old parent. | |
581 | */ | |
582 | if (p->p_oppid && (t = pfind(p->p_oppid))) { | |
583 | p->p_oppid = 0; | |
584 | proc_reparent(p, t); | |
585 | psignal(t, SIGCHLD); | |
586 | wakeup((caddr_t)t); | |
587 | q->p_flag &= ~P_WAITING; | |
588 | return (0); | |
589 | } | |
590 | p->p_xstat = 0; | |
591 | if (p->p_ru) { | |
592 | ruadd(&q->p_stats->p_cru, p->p_ru); | |
593 | FREE_ZONE(p->p_ru, sizeof *p->p_ru, M_ZOMBIE); | |
594 | p->p_ru = NULL; | |
595 | } else { | |
596 | printf("Warning : lost p_ru for %s\n", p->p_comm); | |
597 | } | |
598 | ||
599 | /* | |
600 | * Decrement the count of procs running with this uid. | |
601 | */ | |
602 | (void)chgproccnt(p->p_cred->p_ruid, -1); | |
603 | ||
604 | /* | |
605 | * Free up credentials. | |
606 | */ | |
607 | if (--p->p_cred->p_refcnt == 0) { | |
608 | struct ucred *ucr = p->p_ucred; | |
609 | struct pcred *pcr; | |
610 | ||
611 | if (ucr != NOCRED) { | |
612 | p->p_ucred = NOCRED; | |
613 | crfree(ucr); | |
614 | } | |
615 | pcr = p->p_cred; | |
616 | p->p_cred = NULL; | |
617 | FREE_ZONE(pcr, sizeof *pcr, M_SUBPROC); | |
618 | } | |
619 | ||
620 | /* | |
621 | * Release reference to text vnode | |
622 | */ | |
623 | if (p->p_textvp) | |
624 | vrele(p->p_textvp); | |
625 | ||
626 | /* | |
627 | * Finally finished with old proc entry. | |
628 | * Unlink it from its process group and free it. | |
629 | */ | |
630 | leavepgrp(p); | |
631 | LIST_REMOVE(p, p_list); /* off zombproc */ | |
632 | LIST_REMOVE(p, p_sibling); | |
633 | FREE_ZONE(p, sizeof *p, M_PROC); | |
634 | nprocs--; | |
635 | q->p_flag &= ~P_WAITING; | |
636 | return (0); | |
637 | } | |
638 | if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 && | |
639 | (p->p_flag & P_TRACED || uap->options & WUNTRACED)) { | |
640 | p->p_flag |= P_WAITED; | |
641 | retval[0] = p->p_pid; | |
642 | #if COMPAT_43 | |
643 | if (compat) { | |
644 | retval[1] = W_STOPCODE(p->p_xstat); | |
645 | error = 0; | |
646 | } else | |
647 | #endif | |
648 | if (uap->status) { | |
649 | status = W_STOPCODE(p->p_xstat); | |
650 | error = copyout((caddr_t)&status, | |
651 | (caddr_t)uap->status, | |
652 | sizeof(status)); | |
653 | } else | |
654 | error = 0; | |
655 | q->p_flag &= ~P_WAITING; | |
656 | return (error); | |
657 | } | |
658 | } | |
659 | if (nfound == 0) { | |
660 | q->p_flag &= ~P_WAITING; | |
661 | return (ECHILD); | |
662 | } | |
663 | if (uap->options & WNOHANG) { | |
664 | retval[0] = 0; | |
665 | q->p_flag &= ~P_WAITING; | |
666 | return (0); | |
667 | } | |
668 | ||
669 | if (error = tsleep0((caddr_t)q, PWAIT | PCATCH, "wait", 0, wait1continue)) { | |
670 | q->p_flag &= ~P_WAITING; | |
671 | return (error); | |
672 | } | |
673 | goto loop; | |
674 | } | |
675 | ||
676 | /* | |
677 | * make process 'parent' the new parent of process 'child'. | |
678 | */ | |
679 | void | |
680 | proc_reparent(child, parent) | |
681 | register struct proc *child; | |
682 | register struct proc *parent; | |
683 | { | |
684 | ||
685 | if (child->p_pptr == parent) | |
686 | return; | |
687 | ||
688 | LIST_REMOVE(child, p_sibling); | |
689 | LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); | |
690 | child->p_pptr = parent; | |
691 | } | |
692 | ||
693 | kern_return_t | |
694 | init_process(void) | |
695 | /* | |
696 | * Make the current process an "init" process, meaning | |
697 | * that it doesn't have a parent, and that it won't be | |
698 | * gunned down by kill(-1, 0). | |
699 | */ | |
700 | { | |
701 | register struct proc *p = current_proc(); | |
702 | ||
703 | if (suser(p->p_ucred, &p->p_acflag)) | |
704 | return(KERN_NO_ACCESS); | |
705 | ||
706 | if (p->p_pid != 1 && p->p_pgid != p->p_pid) | |
707 | enterpgrp(p, p->p_pid, 0); | |
708 | p->p_flag |= P_SYSTEM; | |
709 | ||
710 | /* | |
711 | * Take us out of the sibling chain, and | |
712 | * out of our parent's child chain. | |
713 | */ | |
714 | LIST_REMOVE(p, p_sibling); | |
715 | p->p_sibling.le_prev = NULL; | |
716 | p->p_sibling.le_next = NULL; | |
717 | p->p_pptr = kernproc; | |
718 | ||
719 | return(KERN_SUCCESS); | |
720 | } | |
721 | ||
722 | void | |
723 | process_terminate_self(void) | |
724 | { | |
725 | struct proc *p = current_proc(); | |
726 | ||
727 | if (p != NULL) { | |
0b4e3aa0 | 728 | exit1(p, W_EXITCODE(0, SIGKILL), (int *)NULL); |
1c79356b A |
729 | /*NOTREACHED*/ |
730 | } | |
731 | } | |
0b4e3aa0 A |
732 | /* |
733 | * Exit: deallocate address space and other resources, change proc state | |
734 | * to zombie, and unlink proc from allproc and parent's lists. Save exit | |
735 | * status and rusage for wait(). Check for child processes and orphan them. | |
736 | */ | |
737 | ||
738 | void | |
739 | vfork_exit(p, rv) | |
740 | register struct proc *p; | |
741 | int rv; | |
742 | { | |
743 | register struct proc *q, *nq; | |
744 | thread_t self = current_thread(); | |
745 | thread_act_t th_act_self = current_act(); | |
746 | struct task *task = p->task; | |
747 | register int i,s; | |
748 | struct uthread *ut; | |
749 | ||
750 | /* | |
751 | * If a thread in this task has already | |
752 | * called exit(), then halt any others | |
753 | * right here. | |
754 | */ | |
755 | ||
756 | ut = get_bsdthread_info(th_act_self); | |
757 | #ifdef FIXME | |
758 | signal_lock(p); | |
759 | while (p->exit_thread != self) { | |
760 | if (sig_try_locked(p) <= 0) { | |
761 | if (get_threadtask(th_act_self) != task) { | |
762 | signal_unlock(p); | |
763 | return; | |
764 | } | |
765 | signal_unlock(p); | |
766 | thread_terminate(th_act_self); | |
767 | thread_funnel_set(kernel_flock, FALSE); | |
768 | thread_exception_return(); | |
769 | /* NOTREACHED */ | |
770 | } | |
771 | sig_lock_to_exit(p); | |
772 | } | |
773 | signal_unlock(p); | |
774 | if (p->p_pid == 1) { | |
775 | printf("pid 1 exited (signal %d, exit %d)", | |
776 | WTERMSIG(rv), WEXITSTATUS(rv)); | |
777 | panic("init died\nState at Last Exception:\n\n%s", init_task_failure_data); | |
778 | } | |
779 | #endif /* FIXME */ | |
780 | ||
781 | s = splsched(); | |
782 | p->p_flag |= P_WEXIT; | |
783 | splx(s); | |
784 | /* | |
785 | * Remove proc from allproc queue and from pidhash chain. | |
786 | * Need to do this before we do anything that can block. | |
787 | * Not doing causes things like mount() find this on allproc | |
788 | * in partially cleaned state. | |
789 | */ | |
790 | LIST_REMOVE(p, p_list); | |
791 | LIST_REMOVE(p, p_hash); | |
792 | /* | |
793 | * If parent is waiting for us to exit or exec, | |
794 | * P_PPWAIT is set; we will wakeup the parent below. | |
795 | */ | |
796 | p->p_flag &= ~(P_TRACED | P_PPWAIT); | |
797 | p->p_sigignore = ~0; | |
798 | p->p_siglist = 0; | |
799 | ||
800 | ut->uu_sig = 0; | |
801 | untimeout(realitexpire, (caddr_t)p); | |
802 | ||
803 | p->p_xstat = rv; | |
804 | ||
805 | vproc_exit(p); | |
806 | } | |
807 | ||
808 | ||
809 | void | |
810 | vproc_exit(struct proc *p) | |
811 | { | |
812 | register struct proc *q, *nq; | |
813 | thread_t self = current_thread(); | |
814 | thread_act_t th_act_self = current_act(); | |
815 | struct task *task = p->task; | |
816 | register int i,s; | |
817 | struct uthread *ut; | |
818 | boolean_t funnel_state; | |
819 | ||
820 | MALLOC_ZONE(p->p_ru, struct rusage *, | |
821 | sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK); | |
822 | ||
823 | /* | |
824 | * Close open files and release open-file table. | |
825 | * This may block! | |
826 | */ | |
827 | fdfree(p); | |
828 | ||
829 | /* Close ref SYSV Shared memory*/ | |
830 | if (p->vm_shm) | |
831 | shmexit(p); | |
832 | ||
833 | if (SESS_LEADER(p)) { | |
834 | register struct session *sp = p->p_session; | |
835 | ||
836 | if (sp->s_ttyvp) { | |
837 | /* | |
838 | * Controlling process. | |
839 | * Signal foreground pgrp, | |
840 | * drain controlling terminal | |
841 | * and revoke access to controlling terminal. | |
842 | */ | |
843 | if (sp->s_ttyp->t_session == sp) { | |
844 | if (sp->s_ttyp->t_pgrp) | |
845 | pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); | |
846 | (void) ttywait(sp->s_ttyp); | |
847 | /* | |
848 | * The tty could have been revoked | |
849 | * if we blocked. | |
850 | */ | |
851 | if (sp->s_ttyvp) | |
852 | VOP_REVOKE(sp->s_ttyvp, REVOKEALL); | |
853 | } | |
854 | if (sp->s_ttyvp) | |
855 | vrele(sp->s_ttyvp); | |
856 | sp->s_ttyvp = NULL; | |
857 | /* | |
858 | * s_ttyp is not zero'd; we use this to indicate | |
859 | * that the session once had a controlling terminal. | |
860 | * (for logging and informational purposes) | |
861 | */ | |
862 | } | |
863 | sp->s_leader = NULL; | |
864 | } | |
865 | ||
866 | fixjobc(p, p->p_pgrp, 0); | |
867 | p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; | |
868 | #if KTRACE | |
869 | /* | |
870 | * release trace file | |
871 | */ | |
872 | p->p_traceflag = 0; /* don't trace the vrele() */ | |
873 | if (p->p_tracep) | |
874 | vrele(p->p_tracep); | |
875 | #endif | |
876 | ||
877 | ||
878 | q = p->p_children.lh_first; | |
879 | if (q) /* only need this if any child is S_ZOMB */ | |
880 | wakeup((caddr_t) initproc); | |
881 | for (; q != 0; q = nq) { | |
882 | nq = q->p_sibling.le_next; | |
883 | proc_reparent(q, initproc); | |
884 | /* | |
885 | * Traced processes are killed | |
886 | * since their existence means someone is messing up. | |
887 | */ | |
888 | if (q->p_flag & P_TRACED) { | |
889 | q->p_flag &= ~P_TRACED; | |
890 | if (q->sigwait_thread) { | |
891 | thread_t sig_shuttle = getshuttle_thread(q->sigwait_thread); | |
892 | /* | |
893 | * The sigwait_thread could be stopped at a | |
894 | * breakpoint. Wake it up to kill. | |
895 | * Need to do this as it could be a thread which is not | |
896 | * the first thread in the task. So any attempts to kill | |
897 | * the process would result into a deadlock on q->sigwait. | |
898 | */ | |
899 | thread_resume((struct thread *)q->sigwait_thread); | |
900 | clear_wait(sig_shuttle, THREAD_INTERRUPTED); | |
901 | threadsignal(q->sigwait_thread, SIGKILL, 0); | |
902 | } | |
903 | psignal(q, SIGKILL); | |
904 | } | |
905 | } | |
906 | ||
907 | ||
908 | /* | |
909 | * Save exit status and final rusage info, adding in child rusage | |
910 | * info and self times. | |
911 | */ | |
912 | *p->p_ru = p->p_stats->p_ru; | |
913 | ||
914 | timerclear(&p->p_ru->ru_utime); | |
915 | timerclear(&p->p_ru->ru_stime); | |
916 | ||
917 | #ifdef FIXME | |
918 | if (task) { | |
919 | task_basic_info_data_t tinfo; | |
920 | task_thread_times_info_data_t ttimesinfo; | |
921 | int task_info_stuff, task_ttimes_stuff; | |
922 | struct timeval ut,st; | |
923 | ||
924 | task_info_stuff = TASK_BASIC_INFO_COUNT; | |
925 | task_info(task, TASK_BASIC_INFO, | |
926 | &tinfo, &task_info_stuff); | |
927 | p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds; | |
928 | p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds; | |
929 | p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds; | |
930 | p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds; | |
931 | ||
932 | task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT; | |
933 | task_info(task, TASK_THREAD_TIMES_INFO, | |
934 | &ttimesinfo, &task_ttimes_stuff); | |
935 | ||
936 | ut.tv_sec = ttimesinfo.user_time.seconds; | |
937 | ut.tv_usec = ttimesinfo.user_time.microseconds; | |
938 | st.tv_sec = ttimesinfo.system_time.seconds; | |
939 | st.tv_usec = ttimesinfo.system_time.microseconds; | |
940 | timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime); | |
941 | timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime); | |
942 | } | |
943 | #endif /* FIXME */ | |
944 | ||
945 | ruadd(p->p_ru, &p->p_stats->p_cru); | |
946 | ||
947 | /* | |
948 | * Free up profiling buffers. | |
949 | */ | |
950 | { | |
951 | struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn; | |
952 | ||
953 | p1 = p0->pr_next; | |
954 | p0->pr_next = NULL; | |
955 | p0->pr_scale = 0; | |
956 | ||
957 | for (; p1 != NULL; p1 = pn) { | |
958 | pn = p1->pr_next; | |
959 | kfree((vm_offset_t)p1, sizeof *p1); | |
960 | } | |
961 | } | |
962 | ||
963 | /* | |
964 | * Other substructures are freed from wait(). | |
965 | */ | |
966 | FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_SUBPROC); | |
967 | p->p_stats = NULL; | |
968 | ||
969 | FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SUBPROC); | |
970 | p->p_sigacts = NULL; | |
971 | ||
972 | if (--p->p_limit->p_refcnt == 0) | |
973 | FREE_ZONE(p->p_limit, sizeof *p->p_limit, M_SUBPROC); | |
974 | p->p_limit = NULL; | |
975 | ||
976 | /* | |
977 | * Finish up by terminating the task | |
978 | * and halt this thread (only if a | |
979 | * member of the task exiting). | |
980 | */ | |
981 | p->task = TASK_NULL; | |
982 | ||
983 | /* | |
984 | * Notify parent that we're gone. | |
985 | */ | |
986 | psignal(p->p_pptr, SIGCHLD); | |
987 | ||
988 | /* Place onto zombproc. */ | |
989 | LIST_INSERT_HEAD(&zombproc, p, p_list); | |
990 | p->p_stat = SZOMB; | |
991 | ||
992 | /* and now wakeup the parent */ | |
993 | wakeup((caddr_t)p->p_pptr); | |
994 | ||
995 | } | |
996 |