X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/0b4e3aa066abc0728aacb4bbeb86f53f9737156e..5eebf7385fedb1517b66b53c28e5aa6bb0a2be50:/bsd/kern/kern_fork.c?ds=sidebyside diff --git a/bsd/kern/kern_fork.c b/bsd/kern/kern_fork.c index 58aa6ecf4..f8b8cf3d8 100644 --- a/bsd/kern/kern_fork.c +++ b/bsd/kern/kern_fork.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -60,6 +60,7 @@ * @(#)kern_fork.c 8.8 (Berkeley) 2/14/95 */ +#include #include #include #include @@ -71,16 +72,23 @@ #include #include #include +#include + +#include + +#if KTRACE #include +#include +#endif #include #include #include -thread_t cloneproc(struct proc *, int); +thread_act_t cloneproc(struct proc *, int); struct proc * forkproc(struct proc *, int); -thread_t procdup(); +thread_act_t procdup(); #define DOFORK 0x1 /* fork() system call */ #define DOVFORK 0x2 /* vfork() system call */ @@ -141,6 +149,7 @@ vfork(p, uap, retval) ut = (struct uthread *)get_bsdthread_info(cur_act); if (ut->uu_flag & P_VFORK) { printf("vfork called recursively by %s\n", p->p_comm); + (void)chgproccnt(uid, -1); return (EINVAL); } p->p_flag |= P_VFORK; @@ -149,6 +158,8 @@ vfork(p, uap, retval) /* The newly created process comes with signal lock held */ newproc = (struct proc *)forkproc(p,1); + AUDIT_ARG(pid, newproc->p_pid); + LIST_INSERT_AFTER(p, newproc, p_pglist); newproc->p_pptr = p; newproc->task = p->task; @@ -164,6 +175,7 @@ vfork(p, uap, retval) ut->uu_flag |= P_VFORK; ut->uu_proc = newproc; ut->uu_userstate = (void *)act_thread_csave(); + ut->uu_vforkmask = ut->uu_sigmask; thread_set_child(cur_act, newproc->p_pid); @@ -198,29 +210,30 @@ vfork_return(th_act, p, p2, retval) { long flags; register uid_t uid; - thread_t newth, self = current_thread(); - thread_act_t cur_act = (thread_act_t)current_act(); int s, count; task_t t; uthread_t ut; - ut = (struct uthread *)get_bsdthread_info(cur_act); + ut = (struct uthread *)get_bsdthread_info(th_act); act_thread_catt(ut->uu_userstate); /* Make sure only one at this time */ - p->p_vforkcnt--; - if (p->p_vforkcnt <0) - panic("vfork cnt is -ve"); - if (p->p_vforkcnt <=0) - p->p_flag &= ~P_VFORK; + if (p) { + p->p_vforkcnt--; + if (p->p_vforkcnt <0) + panic("vfork cnt is -ve"); + if (p->p_vforkcnt <=0) + p->p_flag &= ~P_VFORK; + } ut->uu_userstate = 0; ut->uu_flag &= ~P_VFORK; ut->uu_proc = 0; + ut->uu_sigmask = ut->uu_vforkmask; p2->p_flag &= ~P_INVFORK; p2->p_vforkact = (void *)0; - thread_set_parent(cur_act, p2->p_pid); + thread_set_parent(th_act, p2->p_pid); if (retval) { retval[0] = p2->p_pid; @@ -230,20 +243,21 @@ vfork_return(th_act, p, p2, retval) return; } -thread_t +thread_act_t procdup( struct proc *child, struct proc *parent) { - thread_t thread; + thread_act_t thread; task_t task; kern_return_t result; + pmap_t pmap; extern task_t kernel_task; if (parent->task == kernel_task) - result = task_create_local(TASK_NULL, FALSE, FALSE, &task); + result = task_create_internal(TASK_NULL, FALSE, &task); else - result = task_create_local(parent->task, TRUE, FALSE, &task); + result = task_create_internal(parent->task, TRUE, &task); if (result != KERN_SUCCESS) printf("fork/procdup: task_create failed. Code: 0x%x\n", result); child->task = task; @@ -251,6 +265,7 @@ procdup( set_bsdtask_info(task, child); if (child->p_nice != 0) resetpriority(child); + result = thread_create(task, &thread); if (result != KERN_SUCCESS) printf("fork/procdup: thread_create failed. Code: 0x%x\n", result); @@ -267,7 +282,7 @@ fork1(p1, flags, retval) { register struct proc *p2; register uid_t uid; - thread_t newth, self = current_thread(); + thread_act_t newth; int s, count; task_t t; @@ -297,10 +312,12 @@ fork1(p1, flags, retval) /* The newly created process comes with signal lock held */ newth = cloneproc(p1, 1); - thread_dup(current_act(), newth); + thread_dup(newth); /* p2 = newth->task->proc; */ p2 = (struct proc *)(get_bsdtask_info(get_threadtask(newth))); + AUDIT_ARG(pid, p2->p_pid); + thread_set_child(newth, p2->p_pid); s = splhigh(); @@ -326,6 +343,8 @@ fork1(p1, flags, retval) } act_deallocate(newth); + KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); + while (p2->p_flag & P_PPWAIT) tsleep(p1, PWAIT, "ppwait", 0); @@ -344,15 +363,17 @@ fork1(p1, flags, retval) * lock set. fork() code needs to explicity remove this lock * before signals can be delivered */ -thread_t +thread_act_t cloneproc(p1, lock) register struct proc *p1; register int lock; { register struct proc *p2; - thread_t th; + thread_act_t th; p2 = (struct proc *)forkproc(p1,lock); + + th = procdup(p2, p1); /* child, parent */ LIST_INSERT_AFTER(p1, p2, p_pglist); @@ -417,7 +438,8 @@ retry: again: for (; p2 != 0; p2 = p2->p_list.le_next) { while (p2->p_pid == nextpid || - p2->p_pgrp->pg_id == nextpid) { + p2->p_pgrp->pg_id == nextpid || + p2->p_session->s_sid == nextpid) { nextpid++; if (nextpid >= pidchecked) goto retry; @@ -427,6 +449,9 @@ again: if (p2->p_pgrp && p2->p_pgrp->pg_id > nextpid && pidchecked > p2->p_pgrp->pg_id) pidchecked = p2->p_pgrp->pg_id; + if (p2->p_session->s_sid > nextpid && + pidchecked > p2->p_session->s_sid) + pidchecked = p2->p_session->s_sid; } if (!doingzomb) { doingzomb = 1; @@ -440,6 +465,7 @@ again: p2->p_stat = SIDL; p2->p_pid = nextpid; + p2->p_shutdownstate = 0; /* * Make a proc table entry for the new process. * Start by zeroing the section of proc that is zero-initialized, @@ -451,20 +477,28 @@ again: (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); p2->vm_shm = (void *)NULL; /* Make sure it is zero */ + /* + * Copy the audit info. + */ + audit_proc_fork(p1, p2); + /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. * The p_stats and p_sigacts substructs are set in vm_fork. */ p2->p_flag = P_INMEM; + p2->p_flag |= (p1->p_flag & P_CLASSIC); // copy from parent + p2->p_flag |= (p1->p_flag & P_AFFINITY); // copy from parent if (p1->p_flag & P_PROFIL) startprofclock(p2); bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred)); p2->p_cred->p_refcnt = 1; crhold(p1->p_ucred); lockinit(&p2->p_cred->pc_lock, PLOCK, "proc cred", 0, 0); + klist_init(&p2->p_klist); - /* bump references to the text vnode (for procfs) */ + /* bump references to the text vnode */ p2->p_textvp = p1->p_textvp; if (p2->p_textvp) VREF(p2->p_textvp); @@ -502,6 +536,8 @@ again: if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) p2->p_flag |= P_CONTROLT; + p2->p_argslen = p1->p_argslen; + p2->p_argc = p1->p_argc; p2->p_xstat = 0; p2->p_ru = NULL; @@ -514,9 +550,13 @@ again: p2->sigwait_thread = NULL; p2->exit_thread = NULL; p2->user_stack = p1->user_stack; - p2->p_sigpending = 0; p2->p_vforkcnt = 0; p2->p_vforkact = 0; + TAILQ_INIT(&p2->p_uthlist); + TAILQ_INIT(&p2->aio_activeq); + TAILQ_INIT(&p2->aio_doneq); + p2->aio_active_count = 0; + p2->aio_done_count = 0; #if KTRACE /* @@ -525,8 +565,11 @@ again: */ if (p1->p_traceflag&KTRFAC_INHERIT) { p2->p_traceflag = p1->p_traceflag; - if ((p2->p_tracep = p1->p_tracep) != NULL) + if ((p2->p_tracep = p1->p_tracep) != NULL) { + if (UBCINFOEXISTS(p2->p_tracep)) + ubc_hold(p2->p_tracep); VREF(p2->p_tracep); + } } #endif return(p2); @@ -551,25 +594,63 @@ uthread_zone_init() } void * -uthread_alloc(void) +uthread_alloc(task_t task, thread_act_t thr_act ) { + struct proc *p; + struct uthread *uth, *uth_parent; void *ut; + extern task_t kernel_task; + boolean_t funnel_state; if (!uthread_zone_inited) uthread_zone_init(); ut = (void *)zalloc(uthread_zone); bzero(ut, sizeof(struct uthread)); + + if (task != kernel_task) { + uth = (struct uthread *)ut; + p = (struct proc *) get_bsdtask_info(task); + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + uth_parent = (struct uthread *)get_bsdthread_info(current_act()); + if (uth_parent) { + if (uth_parent->uu_flag & USAS_OLDMASK) + uth->uu_sigmask = uth_parent->uu_oldmask; + else + uth->uu_sigmask = uth_parent->uu_sigmask; + } + uth->uu_act = thr_act; + //signal_lock(p); + if (p) + TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list); + //signal_unlock(p); + (void)thread_funnel_set(kernel_flock, funnel_state); + } + return (ut); } void -uthread_free(void *uthread) +uthread_free(task_t task, thread_t act, void *uthread, void * bsd_info) { struct _select *sel; struct uthread *uth = (struct uthread *)uthread; + struct proc * p = (struct proc *)bsd_info; + extern task_t kernel_task; int size; + boolean_t funnel_state; + struct nlminfo *nlmp; + struct proc * vproc; + + /* + * Per-thread audit state should never last beyond system + * call return. Since we don't audit the thread creation/ + * removal, the thread state pointer should never be + * non-NULL when we get here. + */ + assert(uth->uu_ar == NULL); sel = &uth->uu_state.ss_select; /* cleanup the select bit space */ @@ -586,6 +667,25 @@ uthread_free(void *uthread) sel->wql = 0; } + if ((nlmp = uth->uu_nlminfo)) { + uth->uu_nlminfo = 0; + FREE(nlmp, M_LOCKF); + } + + if ((task != kernel_task) ) { + int vfork_exit(struct proc *, int); + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + if (p) + TAILQ_REMOVE(&p->p_uthlist, uth, uu_list); + if ((uth->uu_flag & P_VFORK) && (vproc = uth->uu_proc) + && (vproc->p_flag & P_INVFORK)) { + if (!vfork_exit(vproc, W_EXITCODE(0, SIGKILL))) + vfork_return(act, p, vproc, NULL); + + } + (void)thread_funnel_set(kernel_flock, funnel_state); + } /* and free the uthread itself */ zfree(uthread_zone, (vm_offset_t)uthread); }