]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_fork.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / bsd / kern / kern_fork.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
24 /*
25 * Copyright (c) 1982, 1986, 1989, 1991, 1993
26 * The Regents of the University of California. All rights reserved.
27 * (c) UNIX System Laboratories, Inc.
28 * All or some portions of this file are derived from material licensed
29 * to the University of California by American Telephone and Telegraph
30 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
31 * the permission of UNIX System Laboratories, Inc.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_fork.c 8.8 (Berkeley) 2/14/95
62 */
63
64 #include <kern/assert.h>
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/filedesc.h>
68 #include <sys/kernel.h>
69 #include <sys/malloc.h>
70 #include <sys/proc_internal.h>
71 #include <sys/kauth.h>
72 #include <sys/user.h>
73 #include <sys/resourcevar.h>
74 #include <sys/vnode_internal.h>
75 #include <sys/file_internal.h>
76 #include <sys/acct.h>
77 #if KTRACE
78 #include <sys/ktrace.h>
79 #endif
80
81 #include <bsm/audit_kernel.h>
82
83 #include <mach/mach_types.h>
84 #include <kern/kern_types.h>
85 #include <kern/kalloc.h>
86 #include <kern/mach_param.h>
87 #include <kern/task.h>
88 #include <kern/zalloc.h>
89
90 #include <machine/spl.h>
91
92 #include <vm/vm_protos.h> // for vm_map_commpage64
93
94 thread_t cloneproc(struct proc *, int);
95 struct proc * forkproc(struct proc *, int);
96 thread_t procdup(struct proc *child, struct proc *parent);
97
98 #define DOFORK 0x1 /* fork() system call */
99 #define DOVFORK 0x2 /* vfork() system call */
100 static int fork1(struct proc *, long, register_t *);
101
102 /*
103 * fork system call.
104 */
105 int
106 fork(struct proc *p, __unused void *uap, register_t *retval)
107 {
108 return (fork1(p, (long)DOFORK, retval));
109 }
110
111 /*
112 * vfork system call
113 */
114 int
115 vfork(struct proc *p, void *uap, register_t *retval)
116 {
117 register struct proc * newproc;
118 register uid_t uid;
119 thread_t cur_act = (thread_t)current_thread();
120 int count;
121 task_t t;
122 uthread_t ut;
123
124 /*
125 * Although process entries are dynamically created, we still keep
126 * a global limit on the maximum number we will create. Don't allow
127 * a nonprivileged user to use the last process; don't let root
128 * exceed the limit. The variable nprocs is the current number of
129 * processes, maxproc is the limit.
130 */
131 uid = kauth_cred_get()->cr_ruid;
132 if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
133 tablefull("proc");
134 retval[1] = 0;
135 return (EAGAIN);
136 }
137
138 /*
139 * Increment the count of procs running with this uid. Don't allow
140 * a nonprivileged user to exceed their current limit.
141 */
142 count = chgproccnt(uid, 1);
143 if (uid != 0 && count > p->p_rlimit[RLIMIT_NPROC].rlim_cur) {
144 (void)chgproccnt(uid, -1);
145 return (EAGAIN);
146 }
147
148 ut = (struct uthread *)get_bsdthread_info(cur_act);
149 if (ut->uu_flag & UT_VFORK) {
150 printf("vfork called recursively by %s\n", p->p_comm);
151 (void)chgproccnt(uid, -1);
152 return (EINVAL);
153 }
154 p->p_flag |= P_VFORK;
155 p->p_vforkcnt++;
156
157 /* The newly created process comes with signal lock held */
158 newproc = (struct proc *)forkproc(p,1);
159
160 AUDIT_ARG(pid, newproc->p_pid);
161
162 LIST_INSERT_AFTER(p, newproc, p_pglist);
163 newproc->p_pptr = p;
164 newproc->task = p->task;
165 LIST_INSERT_HEAD(&p->p_children, newproc, p_sibling);
166 LIST_INIT(&newproc->p_children);
167 LIST_INSERT_HEAD(&allproc, newproc, p_list);
168 LIST_INSERT_HEAD(PIDHASH(newproc->p_pid), newproc, p_hash);
169 TAILQ_INIT(& newproc->p_evlist);
170 newproc->p_stat = SRUN;
171 newproc->p_flag |= P_INVFORK;
172 newproc->p_vforkact = cur_act;
173
174 ut->uu_flag |= UT_VFORK;
175 ut->uu_proc = newproc;
176 ut->uu_userstate = (void *)act_thread_csave();
177 ut->uu_vforkmask = ut->uu_sigmask;
178
179 /* temporarily drop thread-set-id state */
180 if (ut->uu_flag & UT_SETUID) {
181 ut->uu_flag |= UT_WASSETUID;
182 ut->uu_flag &= ~UT_SETUID;
183 }
184
185 thread_set_child(cur_act, newproc->p_pid);
186
187 microtime(&newproc->p_stats->p_start);
188 newproc->p_acflag = AFORK;
189
190 /*
191 * Preserve synchronization semantics of vfork. If waiting for
192 * child to exec or exit, set P_PPWAIT on child, and sleep on our
193 * proc (in case of exit).
194 */
195 newproc->p_flag |= P_PPWAIT;
196
197 /* drop the signal lock on the child */
198 signal_unlock(newproc);
199
200 retval[0] = newproc->p_pid;
201 retval[1] = 1; /* mark child */
202
203 return (0);
204 }
205
206 /*
207 * Return to parent vfork ehread()
208 */
209 void
210 vfork_return(__unused thread_t th_act, struct proc *p, struct proc *p2,
211 register_t *retval)
212 {
213 thread_t cur_act = (thread_t)current_thread();
214 uthread_t ut;
215
216 ut = (struct uthread *)get_bsdthread_info(cur_act);
217
218 act_thread_catt(ut->uu_userstate);
219
220 /* Make sure only one at this time */
221 p->p_vforkcnt--;
222 if (p->p_vforkcnt <0)
223 panic("vfork cnt is -ve");
224 if (p->p_vforkcnt <=0)
225 p->p_flag &= ~P_VFORK;
226 ut->uu_userstate = 0;
227 ut->uu_flag &= ~UT_VFORK;
228 /* restore thread-set-id state */
229 if (ut->uu_flag & UT_WASSETUID) {
230 ut->uu_flag |= UT_SETUID;
231 ut->uu_flag &= UT_WASSETUID;
232 }
233 ut->uu_proc = 0;
234 ut->uu_sigmask = ut->uu_vforkmask;
235 p2->p_flag &= ~P_INVFORK;
236 p2->p_vforkact = (void *)0;
237
238 thread_set_parent(cur_act, p2->p_pid);
239
240 if (retval) {
241 retval[0] = p2->p_pid;
242 retval[1] = 0; /* mark parent */
243 }
244
245 return;
246 }
247
248 thread_t
249 procdup(struct proc *child, struct proc *parent)
250 {
251 thread_t thread;
252 task_t task;
253 kern_return_t result;
254
255 if (parent->task == kernel_task)
256 result = task_create_internal(TASK_NULL, FALSE, &task);
257 else
258 result = task_create_internal(parent->task, TRUE, &task);
259 if (result != KERN_SUCCESS)
260 printf("fork/procdup: task_create failed. Code: 0x%x\n", result);
261 child->task = task;
262 /* task->proc = child; */
263 set_bsdtask_info(task, child);
264 if (parent->p_flag & P_LP64) {
265 task_set_64bit(task, TRUE);
266 child->p_flag |= P_LP64;
267 #ifdef __PPC__
268 /* LP64todo - clean up this hacked mapping of commpage */
269 pmap_map_sharedpage(task, get_map_pmap(get_task_map(task)));
270 vm_map_commpage64(get_task_map(task));
271 #endif /* __PPC__ */
272 } else {
273 task_set_64bit(task, FALSE);
274 child->p_flag &= ~P_LP64;
275 }
276 if (child->p_nice != 0)
277 resetpriority(child);
278
279 result = thread_create(task, &thread);
280 if (result != KERN_SUCCESS)
281 printf("fork/procdup: thread_create failed. Code: 0x%x\n", result);
282
283 return(thread);
284 }
285
286
287 static int
288 fork1(p1, flags, retval)
289 struct proc *p1;
290 long flags;
291 register_t *retval;
292 {
293 register struct proc *p2;
294 register uid_t uid;
295 thread_t newth;
296 int count;
297 task_t t;
298
299 /*
300 * Although process entries are dynamically created, we still keep
301 * a global limit on the maximum number we will create. Don't allow
302 * a nonprivileged user to use the last process; don't let root
303 * exceed the limit. The variable nprocs is the current number of
304 * processes, maxproc is the limit.
305 */
306 uid = kauth_cred_get()->cr_ruid;
307 if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
308 tablefull("proc");
309 retval[1] = 0;
310 return (EAGAIN);
311 }
312
313 /*
314 * Increment the count of procs running with this uid. Don't allow
315 * a nonprivileged user to exceed their current limit.
316 */
317 count = chgproccnt(uid, 1);
318 if (uid != 0 && count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur) {
319 (void)chgproccnt(uid, -1);
320 return (EAGAIN);
321 }
322
323 /* The newly created process comes with signal lock held */
324 newth = cloneproc(p1, 1);
325 thread_dup(newth);
326 /* p2 = newth->task->proc; */
327 p2 = (struct proc *)(get_bsdtask_info(get_threadtask(newth)));
328 set_security_token(p2); /* propagate change of PID */
329
330 AUDIT_ARG(pid, p2->p_pid);
331
332 thread_set_child(newth, p2->p_pid);
333
334 microtime(&p2->p_stats->p_start);
335 p2->p_acflag = AFORK;
336
337 /*
338 * Preserve synchronization semantics of vfork. If waiting for
339 * child to exec or exit, set P_PPWAIT on child, and sleep on our
340 * proc (in case of exit).
341 */
342 if (flags == DOVFORK)
343 p2->p_flag |= P_PPWAIT;
344 /* drop the signal lock on the child */
345 signal_unlock(p2);
346
347 (void) thread_resume(newth);
348
349 /* drop the extra references we got during the creation */
350 if ((t = (task_t)get_threadtask(newth)) != NULL) {
351 task_deallocate(t);
352 }
353 thread_deallocate(newth);
354
355 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
356
357 while (p2->p_flag & P_PPWAIT)
358 tsleep(p1, PWAIT, "ppwait", 0);
359
360 retval[0] = p2->p_pid;
361 retval[1] = 0; /* mark parent */
362
363 return (0);
364 }
365
366 /*
367 * cloneproc()
368 *
369 * Create a new process from a specified process.
370 * On return newly created child process has signal
371 * lock held to block delivery of signal to it if called with
372 * lock set. fork() code needs to explicity remove this lock
373 * before signals can be delivered
374 */
375 thread_t
376 cloneproc(p1, lock)
377 register struct proc *p1;
378 register int lock;
379 {
380 register struct proc *p2;
381 thread_t th;
382
383 p2 = (struct proc *)forkproc(p1,lock);
384
385
386 th = procdup(p2, p1); /* child, parent */
387
388 LIST_INSERT_AFTER(p1, p2, p_pglist);
389 p2->p_pptr = p1;
390 LIST_INSERT_HEAD(&p1->p_children, p2, p_sibling);
391 LIST_INIT(&p2->p_children);
392 LIST_INSERT_HEAD(&allproc, p2, p_list);
393 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
394 TAILQ_INIT(&p2->p_evlist);
395 /*
396 * Make child runnable, set start time.
397 */
398 p2->p_stat = SRUN;
399
400 return(th);
401 }
402
403 struct proc *
404 forkproc(p1, lock)
405 register struct proc *p1;
406 register int lock;
407 {
408 register struct proc *p2, *newproc;
409 static int nextpid = 0, pidchecked = 0;
410
411 /* Allocate new proc. */
412 MALLOC_ZONE(newproc, struct proc *,
413 sizeof *newproc, M_PROC, M_WAITOK);
414 if (newproc == NULL)
415 panic("forkproc: M_PROC zone exhausted");
416 MALLOC_ZONE(newproc->p_stats, struct pstats *,
417 sizeof *newproc->p_stats, M_SUBPROC, M_WAITOK);
418 if (newproc->p_stats == NULL)
419 panic("forkproc: M_SUBPROC zone exhausted (p_stats)");
420 MALLOC_ZONE(newproc->p_sigacts, struct sigacts *,
421 sizeof *newproc->p_sigacts, M_SUBPROC, M_WAITOK);
422 if (newproc->p_sigacts == NULL)
423 panic("forkproc: M_SUBPROC zone exhausted (p_sigacts)");
424
425 /*
426 * Find an unused process ID. We remember a range of unused IDs
427 * ready to use (from nextpid+1 through pidchecked-1).
428 */
429 nextpid++;
430 retry:
431 /*
432 * If the process ID prototype has wrapped around,
433 * restart somewhat above 0, as the low-numbered procs
434 * tend to include daemons that don't exit.
435 */
436 if (nextpid >= PID_MAX) {
437 nextpid = 100;
438 pidchecked = 0;
439 }
440 if (nextpid >= pidchecked) {
441 int doingzomb = 0;
442
443 pidchecked = PID_MAX;
444 /*
445 * Scan the active and zombie procs to check whether this pid
446 * is in use. Remember the lowest pid that's greater
447 * than nextpid, so we can avoid checking for a while.
448 */
449 p2 = allproc.lh_first;
450 again:
451 for (; p2 != 0; p2 = p2->p_list.le_next) {
452 while (p2->p_pid == nextpid ||
453 p2->p_pgrp->pg_id == nextpid ||
454 p2->p_session->s_sid == nextpid) {
455 nextpid++;
456 if (nextpid >= pidchecked)
457 goto retry;
458 }
459 if (p2->p_pid > nextpid && pidchecked > p2->p_pid)
460 pidchecked = p2->p_pid;
461 if (p2->p_pgrp && p2->p_pgrp->pg_id > nextpid &&
462 pidchecked > p2->p_pgrp->pg_id)
463 pidchecked = p2->p_pgrp->pg_id;
464 if (p2->p_session->s_sid > nextpid &&
465 pidchecked > p2->p_session->s_sid)
466 pidchecked = p2->p_session->s_sid;
467 }
468 if (!doingzomb) {
469 doingzomb = 1;
470 p2 = zombproc.lh_first;
471 goto again;
472 }
473 }
474
475 nprocs++;
476 p2 = newproc;
477 p2->p_stat = SIDL;
478 p2->p_shutdownstate = 0;
479 p2->p_pid = nextpid;
480
481 /*
482 * Make a proc table entry for the new process.
483 * Start by zeroing the section of proc that is zero-initialized,
484 * then copy the section that is copied directly from the parent.
485 */
486 bzero(&p2->p_startzero,
487 (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
488 bcopy(&p1->p_startcopy, &p2->p_startcopy,
489 (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
490 p2->vm_shm = (void *)NULL; /* Make sure it is zero */
491
492 /*
493 * Some flags are inherited from the parent.
494 * Duplicate sub-structures as needed.
495 * Increase reference counts on shared objects.
496 * The p_stats and p_sigacts substructs are set in vm_fork.
497 */
498 p2->p_flag = (p1->p_flag & (P_LP64 | P_CLASSIC | P_AFFINITY));
499 if (p1->p_flag & P_PROFIL)
500 startprofclock(p2);
501 /*
502 * Note that if the current thread has an assumed identity, this
503 * credential will be granted to the new process.
504 */
505 p2->p_ucred = kauth_cred_get_with_ref();
506
507 lck_mtx_init(&p2->p_mlock, proc_lck_grp, proc_lck_attr);
508 lck_mtx_init(&p2->p_fdmlock, proc_lck_grp, proc_lck_attr);
509 klist_init(&p2->p_klist);
510
511 /* bump references to the text vnode */
512 p2->p_textvp = p1->p_textvp;
513 if (p2->p_textvp) {
514 vnode_rele(p2->p_textvp);
515 }
516 /* XXX may fail to copy descriptors to child */
517 p2->p_fd = fdcopy(p1);
518
519 if (p1->vm_shm) {
520 /* XXX may fail to attach shm to child */
521 (void)shmfork(p1,p2);
522 }
523 /*
524 * If p_limit is still copy-on-write, bump refcnt,
525 * otherwise get a copy that won't be modified.
526 * (If PL_SHAREMOD is clear, the structure is shared
527 * copy-on-write.)
528 */
529 if (p1->p_limit->p_lflags & PL_SHAREMOD)
530 p2->p_limit = limcopy(p1->p_limit);
531 else {
532 p2->p_limit = p1->p_limit;
533 p2->p_limit->p_refcnt++;
534 }
535
536 bzero(&p2->p_stats->pstat_startzero,
537 (unsigned) ((caddr_t)&p2->p_stats->pstat_endzero -
538 (caddr_t)&p2->p_stats->pstat_startzero));
539 bcopy(&p1->p_stats->pstat_startcopy, &p2->p_stats->pstat_startcopy,
540 ((caddr_t)&p2->p_stats->pstat_endcopy -
541 (caddr_t)&p2->p_stats->pstat_startcopy));
542
543 bzero(&p2->p_stats->user_p_prof, sizeof(struct user_uprof));
544
545 if (p1->p_sigacts != NULL)
546 (void)memcpy(p2->p_sigacts,
547 p1->p_sigacts, sizeof *p2->p_sigacts);
548 else
549 (void)memset(p2->p_sigacts, 0, sizeof *p2->p_sigacts);
550
551 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
552 p2->p_flag |= P_CONTROLT;
553
554 p2->p_argslen = p1->p_argslen;
555 p2->p_argc = p1->p_argc;
556 p2->p_xstat = 0;
557 p2->p_ru = NULL;
558
559 p2->p_debugger = 0; /* don't inherit */
560 lockinit(&p2->signal_lock, PVM, "signal", 0, 0);
561 /* block all signals to reach the process */
562 if (lock)
563 signal_lock(p2);
564 p2->sigwait = FALSE;
565 p2->sigwait_thread = NULL;
566 p2->exit_thread = NULL;
567 p2->user_stack = p1->user_stack;
568 p2->p_vforkcnt = 0;
569 p2->p_vforkact = 0;
570 p2->p_lflag = 0;
571 p2->p_ladvflag = 0;
572 p2->p_internalref = 0;
573 TAILQ_INIT(&p2->p_uthlist);
574 TAILQ_INIT(&p2->aio_activeq);
575 TAILQ_INIT(&p2->aio_doneq);
576 p2->aio_active_count = 0;
577 p2->aio_done_count = 0;
578
579 #if KTRACE
580 /*
581 * Copy traceflag and tracefile if enabled.
582 * If not inherited, these were zeroed above.
583 */
584 if (p1->p_traceflag&KTRFAC_INHERIT) {
585 p2->p_traceflag = p1->p_traceflag;
586 if ((p2->p_tracep = p1->p_tracep) != NULL) {
587 vnode_ref(p2->p_tracep);
588 }
589 }
590 #endif
591 return(p2);
592
593 }
594
595 void
596 proc_lock(proc_t p)
597 {
598 lck_mtx_lock(&p->p_mlock);
599 }
600
601 void
602 proc_unlock(proc_t p)
603 {
604 lck_mtx_unlock(&p->p_mlock);
605 }
606
607 #include <kern/zalloc.h>
608
609 struct zone *uthread_zone;
610 int uthread_zone_inited = 0;
611
612 void
613 uthread_zone_init(void)
614 {
615 if (!uthread_zone_inited) {
616 uthread_zone = zinit(sizeof(struct uthread),
617 THREAD_MAX * sizeof(struct uthread),
618 THREAD_CHUNK * sizeof(struct uthread),
619 "uthreads");
620 uthread_zone_inited = 1;
621 }
622 }
623
624 void *
625 uthread_alloc(task_t task, thread_t thr_act )
626 {
627 struct proc *p;
628 struct uthread *uth, *uth_parent;
629 void *ut;
630 boolean_t funnel_state;
631
632 if (!uthread_zone_inited)
633 uthread_zone_init();
634
635 ut = (void *)zalloc(uthread_zone);
636 bzero(ut, sizeof(struct uthread));
637
638 p = (struct proc *) get_bsdtask_info(task);
639 uth = (struct uthread *)ut;
640
641 /*
642 * Thread inherits credential from the creating thread, if both
643 * are in the same task.
644 *
645 * If the creating thread has no credential or is from another
646 * task we can leave the new thread credential NULL. If it needs
647 * one later, it will be lazily assigned from the task's process.
648 */
649 uth_parent = (struct uthread *)get_bsdthread_info(current_thread());
650 if ((task == current_task()) &&
651 (uth_parent != NULL) &&
652 (uth_parent->uu_ucred != NOCRED)) {
653 uth->uu_ucred = uth_parent->uu_ucred;
654 kauth_cred_ref(uth->uu_ucred);
655 /* the credential we just inherited is an assumed credential */
656 if (uth_parent->uu_flag & UT_SETUID)
657 uth->uu_flag |= UT_SETUID;
658 } else {
659 uth->uu_ucred = NOCRED;
660 }
661
662 if (task != kernel_task) {
663
664 funnel_state = thread_funnel_set(kernel_flock, TRUE);
665 if (uth_parent) {
666 if (uth_parent->uu_flag & UT_SAS_OLDMASK)
667 uth->uu_sigmask = uth_parent->uu_oldmask;
668 else
669 uth->uu_sigmask = uth_parent->uu_sigmask;
670 }
671 uth->uu_act = thr_act;
672 //signal_lock(p);
673 if (p) {
674 TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list);
675 }
676 //signal_unlock(p);
677 (void)thread_funnel_set(kernel_flock, funnel_state);
678 }
679
680 return (ut);
681 }
682
683
684 void
685 uthread_free(task_t task, void *uthread, void * bsd_info)
686 {
687 struct _select *sel;
688 struct uthread *uth = (struct uthread *)uthread;
689 struct proc * p = (struct proc *)bsd_info;
690 boolean_t funnel_state;
691
692 /*
693 * Per-thread audit state should never last beyond system
694 * call return. Since we don't audit the thread creation/
695 * removal, the thread state pointer should never be
696 * non-NULL when we get here.
697 */
698 assert(uth->uu_ar == NULL);
699
700 sel = &uth->uu_select;
701 /* cleanup the select bit space */
702 if (sel->nbytes) {
703 FREE(sel->ibits, M_TEMP);
704 FREE(sel->obits, M_TEMP);
705 }
706
707 if (sel->allocsize && sel->wqset){
708 kfree(sel->wqset, sel->allocsize);
709 sel->count = 0;
710 sel->allocsize = 0;
711 sel->wqset = 0;
712 sel->wql = 0;
713 }
714
715 if (uth->uu_ucred != NOCRED)
716 kauth_cred_rele(uth->uu_ucred);
717
718 if ((task != kernel_task) && p) {
719 funnel_state = thread_funnel_set(kernel_flock, TRUE);
720 //signal_lock(p);
721 TAILQ_REMOVE(&p->p_uthlist, uth, uu_list);
722 //signal_unlock(p);
723 (void)thread_funnel_set(kernel_flock, funnel_state);
724 }
725 /* and free the uthread itself */
726 zfree(uthread_zone, uthread);
727 }