]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_fork.c
xnu-344.tar.gz
[apple/xnu.git] / bsd / kern / kern_fork.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1982, 1986, 1989, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)kern_fork.c 8.8 (Berkeley) 2/14/95
61 */
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/filedesc.h>
66 #include <sys/kernel.h>
67 #include <sys/malloc.h>
68 #include <sys/proc.h>
69 #include <sys/user.h>
70 #include <sys/resourcevar.h>
71 #include <sys/vnode.h>
72 #include <sys/file.h>
73 #include <sys/acct.h>
74 #if KTRACE
75 #include <sys/ktrace.h>
76 #endif
77
78 #include <mach/mach_types.h>
79 #include <kern/mach_param.h>
80
81 #include <machine/spl.h>
82
83 thread_act_t cloneproc(struct proc *, int);
84 struct proc * forkproc(struct proc *, int);
85 thread_act_t procdup();
86
87 #define DOFORK 0x1 /* fork() system call */
88 #define DOVFORK 0x2 /* vfork() system call */
89 static int fork1(struct proc *, long, register_t *);
90
91 /*
92 * fork system call.
93 */
94 int
95 fork(p, uap, retval)
96 struct proc *p;
97 void *uap;
98 register_t *retval;
99 {
100 return (fork1(p, (long)DOFORK, retval));
101 }
102
103 /*
104 * vfork system call
105 */
106 int
107 vfork(p, uap, retval)
108 struct proc *p;
109 void *uap;
110 register_t *retval;
111 {
112 register struct proc * newproc;
113 register uid_t uid;
114 thread_act_t cur_act = (thread_act_t)current_act();
115 int count;
116 task_t t;
117 uthread_t ut;
118
119 /*
120 * Although process entries are dynamically created, we still keep
121 * a global limit on the maximum number we will create. Don't allow
122 * a nonprivileged user to use the last process; don't let root
123 * exceed the limit. The variable nprocs is the current number of
124 * processes, maxproc is the limit.
125 */
126 uid = p->p_cred->p_ruid;
127 if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
128 tablefull("proc");
129 retval[1] = 0;
130 return (EAGAIN);
131 }
132
133 /*
134 * Increment the count of procs running with this uid. Don't allow
135 * a nonprivileged user to exceed their current limit.
136 */
137 count = chgproccnt(uid, 1);
138 if (uid != 0 && count > p->p_rlimit[RLIMIT_NPROC].rlim_cur) {
139 (void)chgproccnt(uid, -1);
140 return (EAGAIN);
141 }
142
143 ut = (struct uthread *)get_bsdthread_info(cur_act);
144 if (ut->uu_flag & P_VFORK) {
145 printf("vfork called recursively by %s\n", p->p_comm);
146 return (EINVAL);
147 }
148 p->p_flag |= P_VFORK;
149 p->p_vforkcnt++;
150
151 /* The newly created process comes with signal lock held */
152 newproc = (struct proc *)forkproc(p,1);
153
154 LIST_INSERT_AFTER(p, newproc, p_pglist);
155 newproc->p_pptr = p;
156 newproc->task = p->task;
157 LIST_INSERT_HEAD(&p->p_children, newproc, p_sibling);
158 LIST_INIT(&newproc->p_children);
159 LIST_INSERT_HEAD(&allproc, newproc, p_list);
160 LIST_INSERT_HEAD(PIDHASH(newproc->p_pid), newproc, p_hash);
161 TAILQ_INIT(& newproc->p_evlist);
162 newproc->p_stat = SRUN;
163 newproc->p_flag |= P_INVFORK;
164 newproc->p_vforkact = cur_act;
165
166 ut->uu_flag |= P_VFORK;
167 ut->uu_proc = newproc;
168 ut->uu_userstate = (void *)act_thread_csave();
169 ut->uu_vforkmask = ut->uu_sigmask;
170
171 thread_set_child(cur_act, newproc->p_pid);
172
173 newproc->p_stats->p_start = time;
174 newproc->p_acflag = AFORK;
175
176 /*
177 * Preserve synchronization semantics of vfork. If waiting for
178 * child to exec or exit, set P_PPWAIT on child, and sleep on our
179 * proc (in case of exit).
180 */
181 newproc->p_flag |= P_PPWAIT;
182
183 /* drop the signal lock on the child */
184 signal_unlock(newproc);
185
186 retval[0] = newproc->p_pid;
187 retval[1] = 1; /* mark child */
188
189 return (0);
190 }
191
192 /*
193 * Return to parent vfork ehread()
194 */
195 void
196 vfork_return(th_act, p, p2, retval)
197 thread_act_t th_act;
198 struct proc * p;
199 struct proc *p2;
200 register_t *retval;
201 {
202 long flags;
203 register uid_t uid;
204 thread_t newth, self = current_thread();
205 thread_act_t cur_act = (thread_act_t)current_act();
206 int s, count;
207 task_t t;
208 uthread_t ut;
209
210 ut = (struct uthread *)get_bsdthread_info(cur_act);
211
212 act_thread_catt(ut->uu_userstate);
213
214 /* Make sure only one at this time */
215 p->p_vforkcnt--;
216 if (p->p_vforkcnt <0)
217 panic("vfork cnt is -ve");
218 if (p->p_vforkcnt <=0)
219 p->p_flag &= ~P_VFORK;
220 ut->uu_userstate = 0;
221 ut->uu_flag &= ~P_VFORK;
222 ut->uu_proc = 0;
223 ut->uu_sigmask = ut->uu_vforkmask;
224 p2->p_flag &= ~P_INVFORK;
225 p2->p_vforkact = (void *)0;
226
227 thread_set_parent(cur_act, p2->p_pid);
228
229 if (retval) {
230 retval[0] = p2->p_pid;
231 retval[1] = 0; /* mark parent */
232 }
233
234 return;
235 }
236
237 thread_act_t
238 procdup(
239 struct proc *child,
240 struct proc *parent)
241 {
242 thread_act_t thread;
243 task_t task;
244 kern_return_t result;
245 extern task_t kernel_task;
246
247 if (parent->task == kernel_task)
248 result = task_create_local(TASK_NULL, FALSE, FALSE, &task);
249 else
250 result = task_create_local(parent->task, TRUE, FALSE, &task);
251 if (result != KERN_SUCCESS)
252 printf("fork/procdup: task_create failed. Code: 0x%x\n", result);
253 child->task = task;
254 /* task->proc = child; */
255 set_bsdtask_info(task, child);
256 if (child->p_nice != 0)
257 resetpriority(child);
258 result = thread_create(task, &thread);
259 if (result != KERN_SUCCESS)
260 printf("fork/procdup: thread_create failed. Code: 0x%x\n", result);
261
262 return(thread);
263 }
264
265
266 static int
267 fork1(p1, flags, retval)
268 struct proc *p1;
269 long flags;
270 register_t *retval;
271 {
272 register struct proc *p2;
273 register uid_t uid;
274 thread_act_t newth;
275 int s, count;
276 task_t t;
277
278 /*
279 * Although process entries are dynamically created, we still keep
280 * a global limit on the maximum number we will create. Don't allow
281 * a nonprivileged user to use the last process; don't let root
282 * exceed the limit. The variable nprocs is the current number of
283 * processes, maxproc is the limit.
284 */
285 uid = p1->p_cred->p_ruid;
286 if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
287 tablefull("proc");
288 retval[1] = 0;
289 return (EAGAIN);
290 }
291
292 /*
293 * Increment the count of procs running with this uid. Don't allow
294 * a nonprivileged user to exceed their current limit.
295 */
296 count = chgproccnt(uid, 1);
297 if (uid != 0 && count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur) {
298 (void)chgproccnt(uid, -1);
299 return (EAGAIN);
300 }
301
302 /* The newly created process comes with signal lock held */
303 newth = cloneproc(p1, 1);
304 thread_dup(newth);
305 /* p2 = newth->task->proc; */
306 p2 = (struct proc *)(get_bsdtask_info(get_threadtask(newth)));
307
308 thread_set_child(newth, p2->p_pid);
309
310 s = splhigh();
311 p2->p_stats->p_start = time;
312 splx(s);
313 p2->p_acflag = AFORK;
314
315 /*
316 * Preserve synchronization semantics of vfork. If waiting for
317 * child to exec or exit, set P_PPWAIT on child, and sleep on our
318 * proc (in case of exit).
319 */
320 if (flags == DOVFORK)
321 p2->p_flag |= P_PPWAIT;
322 /* drop the signal lock on the child */
323 signal_unlock(p2);
324
325 (void) thread_resume(newth);
326
327 /* drop the extra references we got during the creation */
328 if (t = (task_t)get_threadtask(newth)) {
329 task_deallocate(t);
330 }
331 act_deallocate(newth);
332
333 while (p2->p_flag & P_PPWAIT)
334 tsleep(p1, PWAIT, "ppwait", 0);
335
336 retval[0] = p2->p_pid;
337 retval[1] = 0; /* mark parent */
338
339 return (0);
340 }
341
342 /*
343 * cloneproc()
344 *
345 * Create a new process from a specified process.
346 * On return newly created child process has signal
347 * lock held to block delivery of signal to it if called with
348 * lock set. fork() code needs to explicity remove this lock
349 * before signals can be delivered
350 */
351 thread_act_t
352 cloneproc(p1, lock)
353 register struct proc *p1;
354 register int lock;
355 {
356 register struct proc *p2;
357 thread_act_t th;
358
359 p2 = (struct proc *)forkproc(p1,lock);
360
361
362 th = procdup(p2, p1); /* child, parent */
363
364 LIST_INSERT_AFTER(p1, p2, p_pglist);
365 p2->p_pptr = p1;
366 LIST_INSERT_HEAD(&p1->p_children, p2, p_sibling);
367 LIST_INIT(&p2->p_children);
368 LIST_INSERT_HEAD(&allproc, p2, p_list);
369 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
370 TAILQ_INIT(&p2->p_evlist);
371 /*
372 * Make child runnable, set start time.
373 */
374 p2->p_stat = SRUN;
375
376 return(th);
377 }
378
379 struct proc *
380 forkproc(p1, lock)
381 register struct proc *p1;
382 register int lock;
383 {
384 register struct proc *p2, *newproc;
385 static int nextpid = 0, pidchecked = 0;
386 thread_t th;
387
388 /* Allocate new proc. */
389 MALLOC_ZONE(newproc, struct proc *,
390 sizeof *newproc, M_PROC, M_WAITOK);
391 MALLOC_ZONE(newproc->p_cred, struct pcred *,
392 sizeof *newproc->p_cred, M_SUBPROC, M_WAITOK);
393 MALLOC_ZONE(newproc->p_stats, struct pstats *,
394 sizeof *newproc->p_stats, M_SUBPROC, M_WAITOK);
395 MALLOC_ZONE(newproc->p_sigacts, struct sigacts *,
396 sizeof *newproc->p_sigacts, M_SUBPROC, M_WAITOK);
397
398 /*
399 * Find an unused process ID. We remember a range of unused IDs
400 * ready to use (from nextpid+1 through pidchecked-1).
401 */
402 nextpid++;
403 retry:
404 /*
405 * If the process ID prototype has wrapped around,
406 * restart somewhat above 0, as the low-numbered procs
407 * tend to include daemons that don't exit.
408 */
409 if (nextpid >= PID_MAX) {
410 nextpid = 100;
411 pidchecked = 0;
412 }
413 if (nextpid >= pidchecked) {
414 int doingzomb = 0;
415
416 pidchecked = PID_MAX;
417 /*
418 * Scan the active and zombie procs to check whether this pid
419 * is in use. Remember the lowest pid that's greater
420 * than nextpid, so we can avoid checking for a while.
421 */
422 p2 = allproc.lh_first;
423 again:
424 for (; p2 != 0; p2 = p2->p_list.le_next) {
425 while (p2->p_pid == nextpid ||
426 p2->p_pgrp->pg_id == nextpid ||
427 p2->p_session->s_sid == nextpid) {
428 nextpid++;
429 if (nextpid >= pidchecked)
430 goto retry;
431 }
432 if (p2->p_pid > nextpid && pidchecked > p2->p_pid)
433 pidchecked = p2->p_pid;
434 if (p2->p_pgrp && p2->p_pgrp->pg_id > nextpid &&
435 pidchecked > p2->p_pgrp->pg_id)
436 pidchecked = p2->p_pgrp->pg_id;
437 if (p2->p_session->s_sid > nextpid &&
438 pidchecked > p2->p_session->s_sid)
439 pidchecked = p2->p_session->s_sid;
440 }
441 if (!doingzomb) {
442 doingzomb = 1;
443 p2 = zombproc.lh_first;
444 goto again;
445 }
446 }
447
448 nprocs++;
449 p2 = newproc;
450 p2->p_stat = SIDL;
451 p2->p_pid = nextpid;
452
453 /*
454 * Make a proc table entry for the new process.
455 * Start by zeroing the section of proc that is zero-initialized,
456 * then copy the section that is copied directly from the parent.
457 */
458 bzero(&p2->p_startzero,
459 (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
460 bcopy(&p1->p_startcopy, &p2->p_startcopy,
461 (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
462 p2->vm_shm = (void *)NULL; /* Make sure it is zero */
463
464 /*
465 * Duplicate sub-structures as needed.
466 * Increase reference counts on shared objects.
467 * The p_stats and p_sigacts substructs are set in vm_fork.
468 */
469 p2->p_flag = P_INMEM;
470 if (p1->p_flag & P_PROFIL)
471 startprofclock(p2);
472 bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred));
473 p2->p_cred->p_refcnt = 1;
474 crhold(p1->p_ucred);
475 lockinit(&p2->p_cred->pc_lock, PLOCK, "proc cred", 0, 0);
476
477 /* bump references to the text vnode */
478 p2->p_textvp = p1->p_textvp;
479 if (p2->p_textvp)
480 VREF(p2->p_textvp);
481
482 p2->p_fd = fdcopy(p1);
483 if (p1->vm_shm) {
484 shmfork(p1,p2);
485 }
486 /*
487 * If p_limit is still copy-on-write, bump refcnt,
488 * otherwise get a copy that won't be modified.
489 * (If PL_SHAREMOD is clear, the structure is shared
490 * copy-on-write.)
491 */
492 if (p1->p_limit->p_lflags & PL_SHAREMOD)
493 p2->p_limit = limcopy(p1->p_limit);
494 else {
495 p2->p_limit = p1->p_limit;
496 p2->p_limit->p_refcnt++;
497 }
498
499 bzero(&p2->p_stats->pstat_startzero,
500 (unsigned) ((caddr_t)&p2->p_stats->pstat_endzero -
501 (caddr_t)&p2->p_stats->pstat_startzero));
502 bcopy(&p1->p_stats->pstat_startcopy, &p2->p_stats->pstat_startcopy,
503 ((caddr_t)&p2->p_stats->pstat_endcopy -
504 (caddr_t)&p2->p_stats->pstat_startcopy));
505
506 if (p1->p_sigacts != NULL)
507 (void)memcpy(p2->p_sigacts,
508 p1->p_sigacts, sizeof *p2->p_sigacts);
509 else
510 (void)memset(p2->p_sigacts, 0, sizeof *p2->p_sigacts);
511
512 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
513 p2->p_flag |= P_CONTROLT;
514
515 p2->p_xstat = 0;
516 p2->p_ru = NULL;
517
518 p2->p_debugger = 0; /* don't inherit */
519 lockinit(&p2->signal_lock, PVM, "signal", 0, 0);
520 /* block all signals to reach the process */
521 if (lock)
522 signal_lock(p2);
523 p2->sigwait = FALSE;
524 p2->sigwait_thread = NULL;
525 p2->exit_thread = NULL;
526 p2->user_stack = p1->user_stack;
527 p2->p_xxxsigpending = 0;
528 p2->p_vforkcnt = 0;
529 p2->p_vforkact = 0;
530 TAILQ_INIT(&p2->p_uthlist);
531
532 #if KTRACE
533 /*
534 * Copy traceflag and tracefile if enabled.
535 * If not inherited, these were zeroed above.
536 */
537 if (p1->p_traceflag&KTRFAC_INHERIT) {
538 p2->p_traceflag = p1->p_traceflag;
539 if ((p2->p_tracep = p1->p_tracep) != NULL)
540 VREF(p2->p_tracep);
541 }
542 #endif
543 return(p2);
544
545 }
546
547 #include <kern/zalloc.h>
548
549 struct zone *uthread_zone;
550 int uthread_zone_inited = 0;
551
552 void
553 uthread_zone_init()
554 {
555 if (!uthread_zone_inited) {
556 uthread_zone = zinit(sizeof(struct uthread),
557 THREAD_MAX * sizeof(struct uthread),
558 THREAD_CHUNK * sizeof(struct uthread),
559 "uthreads");
560 uthread_zone_inited = 1;
561 }
562 }
563
564 void *
565 uthread_alloc(task_t task, thread_act_t thr_act )
566 {
567 struct proc *p;
568 struct uthread *uth, *uth_parent;
569 void *ut;
570 extern task_t kernel_task;
571 boolean_t funnel_state;
572
573 if (!uthread_zone_inited)
574 uthread_zone_init();
575
576 ut = (void *)zalloc(uthread_zone);
577 bzero(ut, sizeof(struct uthread));
578
579 if (task != kernel_task) {
580 uth = (struct uthread *)ut;
581 p = get_bsdtask_info(task);
582
583 funnel_state = thread_funnel_set(kernel_flock, TRUE);
584 uth_parent = (struct uthread *)get_bsdthread_info(current_act());
585 if (uth_parent) {
586 if (uth_parent->uu_flag & USAS_OLDMASK)
587 uth->uu_sigmask = uth_parent->uu_oldmask;
588 else
589 uth->uu_sigmask = uth_parent->uu_sigmask;
590 }
591 uth->uu_act = thr_act;
592 //signal_lock(p);
593 if (p)
594 TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list);
595 //signal_unlock(p);
596 (void)thread_funnel_set(kernel_flock, funnel_state);
597 }
598
599 return (ut);
600 }
601
602
603 void
604 uthread_free(task_t task, void *uthread, void * bsd_info)
605 {
606 struct _select *sel;
607 struct uthread *uth = (struct uthread *)uthread;
608 struct proc * p = (struct proc *)bsd_info;
609 extern task_t kernel_task;
610 int size;
611 boolean_t funnel_state;
612
613 sel = &uth->uu_state.ss_select;
614 /* cleanup the select bit space */
615 if (sel->nbytes) {
616 FREE(sel->ibits, M_TEMP);
617 FREE(sel->obits, M_TEMP);
618 }
619
620 if (sel->allocsize && uth->uu_wqsub){
621 kfree(uth->uu_wqsub, sel->allocsize);
622 sel->count = sel->nfcount = 0;
623 sel->allocsize = 0;
624 uth->uu_wqsub = 0;
625 sel->wql = 0;
626 }
627
628 if ((task != kernel_task) && p) {
629 funnel_state = thread_funnel_set(kernel_flock, TRUE);
630 //signal_lock(p);
631 TAILQ_REMOVE(&p->p_uthlist, uth, uu_list);
632 //signal_unlock(p);
633 (void)thread_funnel_set(kernel_flock, funnel_state);
634 }
635 /* and free the uthread itself */
636 zfree(uthread_zone, (vm_offset_t)uthread);
637 }