]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_fork.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / bsd / kern / kern_fork.c
1 /*
2 * Copyright (c) 2000-2007, 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_fork.c 8.8 (Berkeley) 2/14/95
67 */
68 /*
69 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74 /*
75 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
76 * support for mandatory and extensible security protections. This notice
77 * is included in support of clause 2.2 (b) of the Apple Public License,
78 * Version 2.0.
79 */
80
81 #include <kern/assert.h>
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/filedesc.h>
85 #include <sys/kernel.h>
86 #include <sys/malloc.h>
87 #include <sys/proc_internal.h>
88 #include <sys/kauth.h>
89 #include <sys/user.h>
90 #include <sys/reason.h>
91 #include <sys/resourcevar.h>
92 #include <sys/vnode_internal.h>
93 #include <sys/file_internal.h>
94 #include <sys/acct.h>
95 #include <sys/codesign.h>
96 #include <sys/sysproto.h>
97 #if CONFIG_PERSONAS
98 #include <sys/persona.h>
99 #endif
100 #include <sys/doc_tombstone.h>
101 #if CONFIG_DTRACE
102 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
103 extern void (*dtrace_proc_waitfor_exec_ptr)(proc_t);
104 extern void dtrace_proc_fork(proc_t, proc_t, int);
105
106 /*
107 * Since dtrace_proc_waitfor_exec_ptr can be added/removed in dtrace_subr.c,
108 * we will store its value before actually calling it.
109 */
110 static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL;
111
112 #include <sys/dtrace_ptss.h>
113 #endif
114
115 #include <security/audit/audit.h>
116
117 #include <mach/mach_types.h>
118 #include <kern/coalition.h>
119 #include <kern/kern_types.h>
120 #include <kern/kalloc.h>
121 #include <kern/mach_param.h>
122 #include <kern/task.h>
123 #include <kern/thread.h>
124 #include <kern/thread_call.h>
125 #include <kern/zalloc.h>
126
127 #include <machine/spl.h>
128
129 #if CONFIG_MACF
130 #include <security/mac.h>
131 #include <security/mac_mach_internal.h>
132 #endif
133
134 #include <vm/vm_map.h>
135 #include <vm/vm_protos.h>
136 #include <vm/vm_shared_region.h>
137
138 #include <sys/shm_internal.h> /* for shmfork() */
139 #include <mach/task.h> /* for thread_create() */
140 #include <mach/thread_act.h> /* for thread_resume() */
141
142 #include <sys/sdt.h>
143
144 #if CONFIG_MEMORYSTATUS
145 #include <sys/kern_memorystatus.h>
146 #endif
147
148 /* XXX routines which should have Mach prototypes, but don't */
149 void thread_set_parent(thread_t parent, int pid);
150 extern void act_thread_catt(void *ctx);
151 void thread_set_child(thread_t child, int pid);
152 void *act_thread_csave(void);
153 extern boolean_t task_is_exec_copy(task_t);
154
155
156 thread_t cloneproc(task_t, coalition_t *, proc_t, int, int);
157 proc_t forkproc(proc_t);
158 void forkproc_free(proc_t);
159 thread_t fork_create_child(task_t parent_task, coalition_t *parent_coalitions, proc_t child, int inherit_memory, int is64bit, int in_exec);
160 void proc_vfork_begin(proc_t parent_proc);
161 void proc_vfork_end(proc_t parent_proc);
162
163 #define DOFORK 0x1 /* fork() system call */
164 #define DOVFORK 0x2 /* vfork() system call */
165
166 /*
167 * proc_vfork_begin
168 *
169 * Description: start a vfork on a process
170 *
171 * Parameters: parent_proc process (re)entering vfork state
172 *
173 * Returns: (void)
174 *
175 * Notes: Although this function increments a count, a count in
176 * excess of 1 is not currently supported. According to the
177 * POSIX standard, calling anything other than execve() or
178 * _exit() following a vfork(), including calling vfork()
179 * itself again, will result in undefined behaviour
180 */
181 void
182 proc_vfork_begin(proc_t parent_proc)
183 {
184 proc_lock(parent_proc);
185 parent_proc->p_lflag |= P_LVFORK;
186 parent_proc->p_vforkcnt++;
187 proc_unlock(parent_proc);
188 }
189
190 /*
191 * proc_vfork_end
192 *
193 * Description: stop a vfork on a process
194 *
195 * Parameters: parent_proc process leaving vfork state
196 *
197 * Returns: (void)
198 *
199 * Notes: Decrements the count; currently, reentrancy of vfork()
200 * is unsupported on the current process
201 */
202 void
203 proc_vfork_end(proc_t parent_proc)
204 {
205 proc_lock(parent_proc);
206 parent_proc->p_vforkcnt--;
207 if (parent_proc->p_vforkcnt < 0)
208 panic("vfork cnt is -ve");
209 if (parent_proc->p_vforkcnt == 0)
210 parent_proc->p_lflag &= ~P_LVFORK;
211 proc_unlock(parent_proc);
212 }
213
214
215 /*
216 * vfork
217 *
218 * Description: vfork system call
219 *
220 * Parameters: void [no arguments]
221 *
222 * Retval: 0 (to child process)
223 * !0 pid of child (to parent process)
224 * -1 error (see "Returns:")
225 *
226 * Returns: EAGAIN Administrative limit reached
227 * EINVAL vfork() called during vfork()
228 * ENOMEM Failed to allocate new process
229 *
230 * Note: After a successful call to this function, the parent process
231 * has its task, thread, and uthread lent to the child process,
232 * and control is returned to the caller; if this function is
233 * invoked as a system call, the return is to user space, and
234 * is effectively running on the child process.
235 *
236 * Subsequent calls that operate on process state are permitted,
237 * though discouraged, and will operate on the child process; any
238 * operations on the task, thread, or uthread will result in
239 * changes in the parent state, and, if inheritable, the child
240 * state, when a task, thread, and uthread are realized for the
241 * child process at execve() time, will also be effected. Given
242 * this, it's recemmended that people use the posix_spawn() call
243 * instead.
244 *
245 * BLOCK DIAGRAM OF VFORK
246 *
247 * Before:
248 *
249 * ,----------------. ,-------------.
250 * | | task | |
251 * | parent_thread | ------> | parent_task |
252 * | | <.list. | |
253 * `----------------' `-------------'
254 * uthread | ^ bsd_info | ^
255 * v | vc_thread v | task
256 * ,----------------. ,-------------.
257 * | | | |
258 * | parent_uthread | <.list. | parent_proc | <-- current_proc()
259 * | | | |
260 * `----------------' `-------------'
261 * uu_proc |
262 * v
263 * NULL
264 *
265 * After:
266 *
267 * ,----------------. ,-------------.
268 * | | task | |
269 * ,----> | parent_thread | ------> | parent_task |
270 * | | | <.list. | |
271 * | `----------------' `-------------'
272 * | uthread | ^ bsd_info | ^
273 * | v | vc_thread v | task
274 * | ,----------------. ,-------------.
275 * | | | | |
276 * | | parent_uthread | <.list. | parent_proc |
277 * | | | | |
278 * | `----------------' `-------------'
279 * | uu_proc | . list
280 * | v v
281 * | ,----------------.
282 * `----- | |
283 * p_vforkact | child_proc | <-- current_proc()
284 * | |
285 * `----------------'
286 */
287 int
288 vfork(proc_t parent_proc, __unused struct vfork_args *uap, int32_t *retval)
289 {
290 thread_t child_thread;
291 int err;
292
293 if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_VFORK, NULL)) != 0) {
294 retval[1] = 0;
295 } else {
296 uthread_t ut = get_bsdthread_info(current_thread());
297 proc_t child_proc = ut->uu_proc;
298
299 retval[0] = child_proc->p_pid;
300 retval[1] = 1; /* flag child return for user space */
301
302 /*
303 * Drop the signal lock on the child which was taken on our
304 * behalf by forkproc()/cloneproc() to prevent signals being
305 * received by the child in a partially constructed state.
306 */
307 proc_signalend(child_proc, 0);
308 proc_transend(child_proc, 0);
309
310 proc_knote(parent_proc, NOTE_FORK | child_proc->p_pid);
311 DTRACE_PROC1(create, proc_t, child_proc);
312 ut->uu_flag &= ~UT_VFORKING;
313 }
314
315 return (err);
316 }
317
318
319 /*
320 * fork1
321 *
322 * Description: common code used by all new process creation other than the
323 * bootstrap of the initial process on the system
324 *
325 * Parameters: parent_proc parent process of the process being
326 * child_threadp pointer to location to receive the
327 * Mach thread_t of the child process
328 * created
329 * kind kind of creation being requested
330 * coalitions if spawn, the set of coalitions the
331 * child process should join, or NULL to
332 * inherit the parent's. On non-spawns,
333 * this param is ignored and the child
334 * always inherits the parent's
335 * coalitions.
336 *
337 * Notes: Permissable values for 'kind':
338 *
339 * PROC_CREATE_FORK Create a complete process which will
340 * return actively running in both the
341 * parent and the child; the child copies
342 * the parent address space.
343 * PROC_CREATE_SPAWN Create a complete process which will
344 * return actively running in the parent
345 * only after returning actively running
346 * in the child; the child address space
347 * is newly created by an image activator,
348 * after which the child is run.
349 * PROC_CREATE_VFORK Creates a partial process which will
350 * borrow the parent task, thread, and
351 * uthread to return running in the child;
352 * the child address space and other parts
353 * are lazily created at execve() time, or
354 * the child is terminated, and the parent
355 * does not actively run until that
356 * happens.
357 *
358 * At first it may seem strange that we return the child thread
359 * address rather than process structure, since the process is
360 * the only part guaranteed to be "new"; however, since we do
361 * not actualy adjust other references between Mach and BSD (see
362 * the block diagram above the implementation of vfork()), this
363 * is the only method which guarantees us the ability to get
364 * back to the other information.
365 */
366 int
367 fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalitions)
368 {
369 thread_t parent_thread = (thread_t)current_thread();
370 uthread_t parent_uthread = (uthread_t)get_bsdthread_info(parent_thread);
371 proc_t child_proc = NULL; /* set in switch, but compiler... */
372 thread_t child_thread = NULL;
373 uid_t uid;
374 int count;
375 int err = 0;
376 int spawn = 0;
377
378 /*
379 * Although process entries are dynamically created, we still keep
380 * a global limit on the maximum number we will create. Don't allow
381 * a nonprivileged user to use the last process; don't let root
382 * exceed the limit. The variable nprocs is the current number of
383 * processes, maxproc is the limit.
384 */
385 uid = kauth_getruid();
386 proc_list_lock();
387 if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
388 proc_list_unlock();
389 tablefull("proc");
390 return (EAGAIN);
391 }
392 proc_list_unlock();
393
394 /*
395 * Increment the count of procs running with this uid. Don't allow
396 * a nonprivileged user to exceed their current limit, which is
397 * always less than what an rlim_t can hold.
398 * (locking protection is provided by list lock held in chgproccnt)
399 */
400 count = chgproccnt(uid, 1);
401 if (uid != 0 &&
402 (rlim_t)count > parent_proc->p_rlimit[RLIMIT_NPROC].rlim_cur) {
403 err = EAGAIN;
404 goto bad;
405 }
406
407 #if CONFIG_MACF
408 /*
409 * Determine if MAC policies applied to the process will allow
410 * it to fork. This is an advisory-only check.
411 */
412 err = mac_proc_check_fork(parent_proc);
413 if (err != 0) {
414 goto bad;
415 }
416 #endif
417
418 switch(kind) {
419 case PROC_CREATE_VFORK:
420 /*
421 * Prevent a vfork while we are in vfork(); we should
422 * also likely preventing a fork here as well, and this
423 * check should then be outside the switch statement,
424 * since the proc struct contents will copy from the
425 * child and the tash/thread/uthread from the parent in
426 * that case. We do not support vfork() in vfork()
427 * because we don't have to; the same non-requirement
428 * is true of both fork() and posix_spawn() and any
429 * call other than execve() amd _exit(), but we've
430 * been historically lenient, so we continue to be so
431 * (for now).
432 *
433 * <rdar://6640521> Probably a source of random panics
434 */
435 if (parent_uthread->uu_flag & UT_VFORK) {
436 printf("fork1 called within vfork by %s\n", parent_proc->p_comm);
437 err = EINVAL;
438 goto bad;
439 }
440
441 /*
442 * Flag us in progress; if we chose to support vfork() in
443 * vfork(), we would chain our parent at this point (in
444 * effect, a stack push). We don't, since we actually want
445 * to disallow everything not specified in the standard
446 */
447 proc_vfork_begin(parent_proc);
448
449 /* The newly created process comes with signal lock held */
450 if ((child_proc = forkproc(parent_proc)) == NULL) {
451 /* Failed to allocate new process */
452 proc_vfork_end(parent_proc);
453 err = ENOMEM;
454 goto bad;
455 }
456
457 // XXX BEGIN: wants to move to be common code (and safe)
458 #if CONFIG_MACF
459 /*
460 * allow policies to associate the credential/label that
461 * we referenced from the parent ... with the child
462 * JMM - this really isn't safe, as we can drop that
463 * association without informing the policy in other
464 * situations (keep long enough to get policies changed)
465 */
466 mac_cred_label_associate_fork(child_proc->p_ucred, child_proc);
467 #endif
468
469 /*
470 * Propogate change of PID - may get new cred if auditing.
471 *
472 * NOTE: This has no effect in the vfork case, since
473 * child_proc->task != current_task(), but we duplicate it
474 * because this is probably, ultimately, wrong, since we
475 * will be running in the "child" which is the parent task
476 * with the wrong token until we get to the execve() or
477 * _exit() call; a lot of "undefined" can happen before
478 * that.
479 *
480 * <rdar://6640530> disallow everything but exeve()/_exit()?
481 */
482 set_security_token(child_proc);
483
484 AUDIT_ARG(pid, child_proc->p_pid);
485
486 // XXX END: wants to move to be common code (and safe)
487
488 /*
489 * BORROW PARENT TASK, THREAD, UTHREAD FOR CHILD
490 *
491 * Note: this is where we would "push" state instead of setting
492 * it for nested vfork() support (see proc_vfork_end() for
493 * description if issues here).
494 */
495 child_proc->task = parent_proc->task;
496
497 child_proc->p_lflag |= P_LINVFORK;
498 child_proc->p_vforkact = parent_thread;
499 child_proc->p_stat = SRUN;
500
501 /*
502 * Until UT_VFORKING is cleared at the end of the vfork
503 * syscall, the process identity of this thread is slightly
504 * murky.
505 *
506 * As long as UT_VFORK and it's associated field (uu_proc)
507 * is set, current_proc() will always return the child process.
508 *
509 * However dtrace_proc_selfpid() returns the parent pid to
510 * ensure that e.g. the proc:::create probe actions accrue
511 * to the parent. (Otherwise the child magically seems to
512 * have created itself!)
513 */
514 parent_uthread->uu_flag |= UT_VFORK | UT_VFORKING;
515 parent_uthread->uu_proc = child_proc;
516 parent_uthread->uu_userstate = (void *)act_thread_csave();
517 parent_uthread->uu_vforkmask = parent_uthread->uu_sigmask;
518
519 /* temporarily drop thread-set-id state */
520 if (parent_uthread->uu_flag & UT_SETUID) {
521 parent_uthread->uu_flag |= UT_WASSETUID;
522 parent_uthread->uu_flag &= ~UT_SETUID;
523 }
524
525 /* blow thread state information */
526 /* XXX is this actually necessary, given syscall return? */
527 thread_set_child(parent_thread, child_proc->p_pid);
528
529 child_proc->p_acflag = AFORK; /* forked but not exec'ed */
530
531 /*
532 * Preserve synchronization semantics of vfork. If
533 * waiting for child to exec or exit, set P_PPWAIT
534 * on child, and sleep on our proc (in case of exit).
535 */
536 child_proc->p_lflag |= P_LPPWAIT;
537 pinsertchild(parent_proc, child_proc); /* set visible */
538
539 break;
540
541 case PROC_CREATE_SPAWN:
542 /*
543 * A spawned process differs from a forked process in that
544 * the spawned process does not carry around the parents
545 * baggage with regard to address space copying, dtrace,
546 * and so on.
547 */
548 spawn = 1;
549
550 /* FALLSTHROUGH */
551
552 case PROC_CREATE_FORK:
553 /*
554 * When we clone the parent process, we are going to inherit
555 * its task attributes and memory, since when we fork, we
556 * will, in effect, create a duplicate of it, with only minor
557 * differences. Contrarily, spawned processes do not inherit.
558 */
559 if ((child_thread = cloneproc(parent_proc->task,
560 spawn ? coalitions : NULL,
561 parent_proc,
562 spawn ? FALSE : TRUE,
563 FALSE)) == NULL) {
564 /* Failed to create thread */
565 err = EAGAIN;
566 goto bad;
567 }
568
569 /* copy current thread state into the child thread (only for fork) */
570 if (!spawn) {
571 thread_dup(child_thread);
572 }
573
574 /* child_proc = child_thread->task->proc; */
575 child_proc = (proc_t)(get_bsdtask_info(get_threadtask(child_thread)));
576
577 // XXX BEGIN: wants to move to be common code (and safe)
578 #if CONFIG_MACF
579 /*
580 * allow policies to associate the credential/label that
581 * we referenced from the parent ... with the child
582 * JMM - this really isn't safe, as we can drop that
583 * association without informing the policy in other
584 * situations (keep long enough to get policies changed)
585 */
586 mac_cred_label_associate_fork(child_proc->p_ucred, child_proc);
587 #endif
588
589 /*
590 * Propogate change of PID - may get new cred if auditing.
591 *
592 * NOTE: This has no effect in the vfork case, since
593 * child_proc->task != current_task(), but we duplicate it
594 * because this is probably, ultimately, wrong, since we
595 * will be running in the "child" which is the parent task
596 * with the wrong token until we get to the execve() or
597 * _exit() call; a lot of "undefined" can happen before
598 * that.
599 *
600 * <rdar://6640530> disallow everything but exeve()/_exit()?
601 */
602 set_security_token(child_proc);
603
604 AUDIT_ARG(pid, child_proc->p_pid);
605
606 // XXX END: wants to move to be common code (and safe)
607
608 /*
609 * Blow thread state information; this is what gives the child
610 * process its "return" value from a fork() call.
611 *
612 * Note: this should probably move to fork() proper, since it
613 * is not relevent to spawn, and the value won't matter
614 * until we resume the child there. If you are in here
615 * refactoring code, consider doing this at the same time.
616 */
617 thread_set_child(child_thread, child_proc->p_pid);
618
619 child_proc->p_acflag = AFORK; /* forked but not exec'ed */
620
621 #if CONFIG_DTRACE
622 dtrace_proc_fork(parent_proc, child_proc, spawn);
623 #endif /* CONFIG_DTRACE */
624 if (!spawn) {
625 /*
626 * Of note, we need to initialize the bank context behind
627 * the protection of the proc_trans lock to prevent a race with exit.
628 */
629 task_bank_init(get_threadtask(child_thread));
630 }
631
632 break;
633
634 default:
635 panic("fork1 called with unknown kind %d", kind);
636 break;
637 }
638
639
640 /* return the thread pointer to the caller */
641 *child_threadp = child_thread;
642
643 bad:
644 /*
645 * In the error case, we return a 0 value for the returned pid (but
646 * it is ignored in the trampoline due to the error return); this
647 * is probably not necessary.
648 */
649 if (err) {
650 (void)chgproccnt(uid, -1);
651 }
652
653 return (err);
654 }
655
656
657 /*
658 * vfork_return
659 *
660 * Description: "Return" to parent vfork thread() following execve/_exit;
661 * this is done by reassociating the parent process structure
662 * with the task, thread, and uthread.
663 *
664 * Refer to the ASCII art above vfork() to figure out the
665 * state we're undoing.
666 *
667 * Parameters: child_proc Child process
668 * retval System call return value array
669 * rval Return value to present to parent
670 *
671 * Returns: void
672 *
673 * Notes: The caller resumes or exits the parent, as appropriate, after
674 * calling this function.
675 */
676 void
677 vfork_return(proc_t child_proc, int32_t *retval, int rval)
678 {
679 task_t parent_task = get_threadtask(child_proc->p_vforkact);
680 proc_t parent_proc = get_bsdtask_info(parent_task);
681 thread_t th = current_thread();
682 uthread_t uth = get_bsdthread_info(th);
683
684 act_thread_catt(uth->uu_userstate);
685
686 /* clear vfork state in parent proc structure */
687 proc_vfork_end(parent_proc);
688
689 /* REPATRIATE PARENT TASK, THREAD, UTHREAD */
690 uth->uu_userstate = 0;
691 uth->uu_flag &= ~UT_VFORK;
692 /* restore thread-set-id state */
693 if (uth->uu_flag & UT_WASSETUID) {
694 uth->uu_flag |= UT_SETUID;
695 uth->uu_flag &= UT_WASSETUID;
696 }
697 uth->uu_proc = 0;
698 uth->uu_sigmask = uth->uu_vforkmask;
699
700 proc_lock(child_proc);
701 child_proc->p_lflag &= ~P_LINVFORK;
702 child_proc->p_vforkact = 0;
703 proc_unlock(child_proc);
704
705 thread_set_parent(th, rval);
706
707 if (retval) {
708 retval[0] = rval;
709 retval[1] = 0; /* mark parent */
710 }
711 }
712
713
714 /*
715 * fork_create_child
716 *
717 * Description: Common operations associated with the creation of a child
718 * process
719 *
720 * Parameters: parent_task parent task
721 * parent_coalitions parent's set of coalitions
722 * child_proc child process
723 * inherit_memory TRUE, if the parents address space is
724 * to be inherited by the child
725 * is64bit TRUE, if the child being created will
726 * be associated with a 64 bit process
727 * rather than a 32 bit process
728 * in_exec TRUE, if called from execve or posix spawn set exec
729 * FALSE, if called from fork or vfexec
730 *
731 * Note: This code is called in the fork() case, from the execve() call
732 * graph, if implementing an execve() following a vfork(), from
733 * the posix_spawn() call graph (which implicitly includes a
734 * vfork() equivalent call, and in the system bootstrap case.
735 *
736 * It creates a new task and thread (and as a side effect of the
737 * thread creation, a uthread) in the parent coalition set, which is
738 * then associated with the process 'child'. If the parent
739 * process address space is to be inherited, then a flag
740 * indicates that the newly created task should inherit this from
741 * the child task.
742 *
743 * As a special concession to bootstrapping the initial process
744 * in the system, it's possible for 'parent_task' to be TASK_NULL;
745 * in this case, 'inherit_memory' MUST be FALSE.
746 */
747 thread_t
748 fork_create_child(task_t parent_task, coalition_t *parent_coalitions, proc_t child_proc, int inherit_memory, int is64bit, int in_exec)
749 {
750 thread_t child_thread = NULL;
751 task_t child_task;
752 kern_return_t result;
753
754 /* Create a new task for the child process */
755 result = task_create_internal(parent_task,
756 parent_coalitions,
757 inherit_memory,
758 is64bit,
759 TF_LRETURNWAIT | TF_LRETURNWAITER, /* All created threads will wait in task_wait_to_return */
760 in_exec ? TPF_EXEC_COPY : TPF_NONE, /* Mark the task exec copy if in execve */
761 &child_task);
762 if (result != KERN_SUCCESS) {
763 printf("%s: task_create_internal failed. Code: %d\n",
764 __func__, result);
765 goto bad;
766 }
767
768 if (!in_exec) {
769 /*
770 * Set the child process task to the new task if not in exec,
771 * will set the task for exec case in proc_exec_switch_task after image activation.
772 */
773 child_proc->task = child_task;
774 }
775
776 /* Set child task process to child proc */
777 set_bsdtask_info(child_task, child_proc);
778
779 /* Propagate CPU limit timer from parent */
780 if (timerisset(&child_proc->p_rlim_cpu))
781 task_vtimer_set(child_task, TASK_VTIMER_RLIM);
782
783 /*
784 * Set child process BSD visible scheduler priority if nice value
785 * inherited from parent
786 */
787 if (child_proc->p_nice != 0)
788 resetpriority(child_proc);
789
790 /*
791 * Create a new thread for the child process
792 * The new thread is waiting on the event triggered by 'task_clear_return_wait'
793 */
794 result = thread_create_waiting(child_task,
795 (thread_continue_t)task_wait_to_return,
796 task_get_return_wait_event(child_task),
797 &child_thread);
798
799 if (result != KERN_SUCCESS) {
800 printf("%s: thread_create failed. Code: %d\n",
801 __func__, result);
802 task_deallocate(child_task);
803 child_task = NULL;
804 }
805
806 /*
807 * Tag thread as being the first thread in its task.
808 */
809 thread_set_tag(child_thread, THREAD_TAG_MAINTHREAD);
810
811 bad:
812 thread_yield_internal(1);
813
814 return(child_thread);
815 }
816
817
818 /*
819 * fork
820 *
821 * Description: fork system call.
822 *
823 * Parameters: parent Parent process to fork
824 * uap (void) [unused]
825 * retval Return value
826 *
827 * Returns: 0 Success
828 * EAGAIN Resource unavailable, try again
829 *
830 * Notes: Attempts to create a new child process which inherits state
831 * from the parent process. If successful, the call returns
832 * having created an initially suspended child process with an
833 * extra Mach task and thread reference, for which the thread
834 * is initially suspended. Until we resume the child process,
835 * it is not yet running.
836 *
837 * The return information to the child is contained in the
838 * thread state structure of the new child, and does not
839 * become visible to the child through a normal return process,
840 * since it never made the call into the kernel itself in the
841 * first place.
842 *
843 * After resuming the thread, this function returns directly to
844 * the parent process which invoked the fork() system call.
845 *
846 * Important: The child thread_resume occurs before the parent returns;
847 * depending on scheduling latency, this means that it is not
848 * deterministic as to whether the parent or child is scheduled
849 * to run first. It is entirely possible that the child could
850 * run to completion prior to the parent running.
851 */
852 int
853 fork(proc_t parent_proc, __unused struct fork_args *uap, int32_t *retval)
854 {
855 thread_t child_thread;
856 int err;
857
858 retval[1] = 0; /* flag parent return for user space */
859
860 if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_FORK, NULL)) == 0) {
861 task_t child_task;
862 proc_t child_proc;
863
864 /* Return to the parent */
865 child_proc = (proc_t)get_bsdthreadtask_info(child_thread);
866 retval[0] = child_proc->p_pid;
867
868 /*
869 * Drop the signal lock on the child which was taken on our
870 * behalf by forkproc()/cloneproc() to prevent signals being
871 * received by the child in a partially constructed state.
872 */
873 proc_signalend(child_proc, 0);
874 proc_transend(child_proc, 0);
875
876 /* flag the fork has occurred */
877 proc_knote(parent_proc, NOTE_FORK | child_proc->p_pid);
878 DTRACE_PROC1(create, proc_t, child_proc);
879
880 #if CONFIG_DTRACE
881 if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL)
882 (*dtrace_proc_waitfor_hook)(child_proc);
883 #endif
884
885 /* "Return" to the child */
886 task_clear_return_wait(get_threadtask(child_thread));
887
888 /* drop the extra references we got during the creation */
889 if ((child_task = (task_t)get_threadtask(child_thread)) != NULL) {
890 task_deallocate(child_task);
891 }
892 thread_deallocate(child_thread);
893 }
894
895 return(err);
896 }
897
898
899 /*
900 * cloneproc
901 *
902 * Description: Create a new process from a specified process.
903 *
904 * Parameters: parent_task The parent task to be cloned, or
905 * TASK_NULL is task characteristics
906 * are not to be inherited
907 * be cloned, or TASK_NULL if the new
908 * task is not to inherit the VM
909 * characteristics of the parent
910 * parent_proc The parent process to be cloned
911 * inherit_memory True if the child is to inherit
912 * memory from the parent; if this is
913 * non-NULL, then the parent_task must
914 * also be non-NULL
915 * memstat_internal Whether to track the process in the
916 * jetsam priority list (if configured)
917 *
918 * Returns: !NULL pointer to new child thread
919 * NULL Failure (unspecified)
920 *
921 * Note: On return newly created child process has signal lock held
922 * to block delivery of signal to it if called with lock set.
923 * fork() code needs to explicity remove this lock before
924 * signals can be delivered
925 *
926 * In the case of bootstrap, this function can be called from
927 * bsd_utaskbootstrap() in order to bootstrap the first process;
928 * the net effect is to provide a uthread structure for the
929 * kernel process associated with the kernel task.
930 *
931 * XXX: Tristating using the value parent_task as the major key
932 * and inherit_memory as the minor key is something we should
933 * refactor later; we owe the current semantics, ultimately,
934 * to the semantics of task_create_internal. For now, we will
935 * live with this being somewhat awkward.
936 */
937 thread_t
938 cloneproc(task_t parent_task, coalition_t *parent_coalitions, proc_t parent_proc, int inherit_memory, int memstat_internal)
939 {
940 #if !CONFIG_MEMORYSTATUS
941 #pragma unused(memstat_internal)
942 #endif
943 task_t child_task;
944 proc_t child_proc;
945 thread_t child_thread = NULL;
946
947 if ((child_proc = forkproc(parent_proc)) == NULL) {
948 /* Failed to allocate new process */
949 goto bad;
950 }
951
952 child_thread = fork_create_child(parent_task, parent_coalitions, child_proc, inherit_memory, parent_proc->p_flag & P_LP64, FALSE);
953
954 if (child_thread == NULL) {
955 /*
956 * Failed to create thread; now we must deconstruct the new
957 * process previously obtained from forkproc().
958 */
959 forkproc_free(child_proc);
960 goto bad;
961 }
962
963 child_task = get_threadtask(child_thread);
964 if (parent_proc->p_flag & P_LP64) {
965 task_set_64bit(child_task, TRUE);
966 OSBitOrAtomic(P_LP64, (UInt32 *)&child_proc->p_flag);
967 } else {
968 task_set_64bit(child_task, FALSE);
969 OSBitAndAtomic(~((uint32_t)P_LP64), (UInt32 *)&child_proc->p_flag);
970 }
971
972 #if CONFIG_MEMORYSTATUS
973 if (memstat_internal) {
974 proc_list_lock();
975 child_proc->p_memstat_state |= P_MEMSTAT_INTERNAL;
976 proc_list_unlock();
977 }
978 #endif
979
980 /* make child visible */
981 pinsertchild(parent_proc, child_proc);
982
983 /*
984 * Make child runnable, set start time.
985 */
986 child_proc->p_stat = SRUN;
987 bad:
988 return(child_thread);
989 }
990
991
992 /*
993 * Destroy a process structure that resulted from a call to forkproc(), but
994 * which must be returned to the system because of a subsequent failure
995 * preventing it from becoming active.
996 *
997 * Parameters: p The incomplete process from forkproc()
998 *
999 * Returns: (void)
1000 *
1001 * Note: This function should only be used in an error handler following
1002 * a call to forkproc().
1003 *
1004 * Operations occur in reverse order of those in forkproc().
1005 */
1006 void
1007 forkproc_free(proc_t p)
1008 {
1009 #if CONFIG_PERSONAS
1010 persona_proc_drop(p);
1011 #endif /* CONFIG_PERSONAS */
1012
1013 #if PSYNCH
1014 pth_proc_hashdelete(p);
1015 #endif /* PSYNCH */
1016
1017 /* We held signal and a transition locks; drop them */
1018 proc_signalend(p, 0);
1019 proc_transend(p, 0);
1020
1021 /*
1022 * If we have our own copy of the resource limits structure, we
1023 * need to free it. If it's a shared copy, we need to drop our
1024 * reference on it.
1025 */
1026 proc_limitdrop(p, 0);
1027 p->p_limit = NULL;
1028
1029 #if SYSV_SHM
1030 /* Need to drop references to the shared memory segment(s), if any */
1031 if (p->vm_shm) {
1032 /*
1033 * Use shmexec(): we have no address space, so no mappings
1034 *
1035 * XXX Yes, the routine is badly named.
1036 */
1037 shmexec(p);
1038 }
1039 #endif
1040
1041 /* Need to undo the effects of the fdcopy(), if any */
1042 fdfree(p);
1043
1044 /*
1045 * Drop the reference on a text vnode pointer, if any
1046 * XXX This code is broken in forkproc(); see <rdar://4256419>;
1047 * XXX if anyone ever uses this field, we will be extremely unhappy.
1048 */
1049 if (p->p_textvp) {
1050 vnode_rele(p->p_textvp);
1051 p->p_textvp = NULL;
1052 }
1053
1054 /* Stop the profiling clock */
1055 stopprofclock(p);
1056
1057 /* Update the audit session proc count */
1058 AUDIT_SESSION_PROCEXIT(p);
1059
1060 #if CONFIG_FINE_LOCK_GROUPS
1061 lck_mtx_destroy(&p->p_mlock, proc_mlock_grp);
1062 lck_mtx_destroy(&p->p_fdmlock, proc_fdmlock_grp);
1063 lck_mtx_destroy(&p->p_ucred_mlock, proc_ucred_mlock_grp);
1064 #if CONFIG_DTRACE
1065 lck_mtx_destroy(&p->p_dtrace_sprlock, proc_lck_grp);
1066 #endif
1067 lck_spin_destroy(&p->p_slock, proc_slock_grp);
1068 #else /* CONFIG_FINE_LOCK_GROUPS */
1069 lck_mtx_destroy(&p->p_mlock, proc_lck_grp);
1070 lck_mtx_destroy(&p->p_fdmlock, proc_lck_grp);
1071 lck_mtx_destroy(&p->p_ucred_mlock, proc_lck_grp);
1072 #if CONFIG_DTRACE
1073 lck_mtx_destroy(&p->p_dtrace_sprlock, proc_lck_grp);
1074 #endif
1075 lck_spin_destroy(&p->p_slock, proc_lck_grp);
1076 #endif /* CONFIG_FINE_LOCK_GROUPS */
1077
1078 /* Release the credential reference */
1079 kauth_cred_unref(&p->p_ucred);
1080
1081 proc_list_lock();
1082 /* Decrement the count of processes in the system */
1083 nprocs--;
1084
1085 /* Take it out of process hash */
1086 LIST_REMOVE(p, p_hash);
1087
1088 proc_list_unlock();
1089
1090 thread_call_free(p->p_rcall);
1091
1092 /* Free allocated memory */
1093 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
1094 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
1095 proc_checkdeadrefs(p);
1096 FREE_ZONE(p, sizeof *p, M_PROC);
1097 }
1098
1099
1100 /*
1101 * forkproc
1102 *
1103 * Description: Create a new process structure, given a parent process
1104 * structure.
1105 *
1106 * Parameters: parent_proc The parent process
1107 *
1108 * Returns: !NULL The new process structure
1109 * NULL Error (insufficient free memory)
1110 *
1111 * Note: When successful, the newly created process structure is
1112 * partially initialized; if a caller needs to deconstruct the
1113 * returned structure, they must call forkproc_free() to do so.
1114 */
1115 proc_t
1116 forkproc(proc_t parent_proc)
1117 {
1118 proc_t child_proc; /* Our new process */
1119 static int nextpid = 0, pidwrap = 0, nextpidversion = 0;
1120 static uint64_t nextuniqueid = 0;
1121 int error = 0;
1122 struct session *sessp;
1123 uthread_t parent_uthread = (uthread_t)get_bsdthread_info(current_thread());
1124
1125 MALLOC_ZONE(child_proc, proc_t , sizeof *child_proc, M_PROC, M_WAITOK);
1126 if (child_proc == NULL) {
1127 printf("forkproc: M_PROC zone exhausted\n");
1128 goto bad;
1129 }
1130 /* zero it out as we need to insert in hash */
1131 bzero(child_proc, sizeof *child_proc);
1132
1133 MALLOC_ZONE(child_proc->p_stats, struct pstats *,
1134 sizeof *child_proc->p_stats, M_PSTATS, M_WAITOK);
1135 if (child_proc->p_stats == NULL) {
1136 printf("forkproc: M_SUBPROC zone exhausted (p_stats)\n");
1137 FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
1138 child_proc = NULL;
1139 goto bad;
1140 }
1141 MALLOC_ZONE(child_proc->p_sigacts, struct sigacts *,
1142 sizeof *child_proc->p_sigacts, M_SIGACTS, M_WAITOK);
1143 if (child_proc->p_sigacts == NULL) {
1144 printf("forkproc: M_SUBPROC zone exhausted (p_sigacts)\n");
1145 FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS);
1146 FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
1147 child_proc = NULL;
1148 goto bad;
1149 }
1150
1151 /* allocate a callout for use by interval timers */
1152 child_proc->p_rcall = thread_call_allocate((thread_call_func_t)realitexpire, child_proc);
1153 if (child_proc->p_rcall == NULL) {
1154 FREE_ZONE(child_proc->p_sigacts, sizeof *child_proc->p_sigacts, M_SIGACTS);
1155 FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS);
1156 FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
1157 child_proc = NULL;
1158 goto bad;
1159 }
1160
1161
1162 /*
1163 * Find an unused PID.
1164 */
1165
1166 proc_list_lock();
1167
1168 nextpid++;
1169 retry:
1170 /*
1171 * If the process ID prototype has wrapped around,
1172 * restart somewhat above 0, as the low-numbered procs
1173 * tend to include daemons that don't exit.
1174 */
1175 if (nextpid >= PID_MAX) {
1176 nextpid = 100;
1177 pidwrap = 1;
1178 }
1179 if (pidwrap != 0) {
1180
1181 /* if the pid stays in hash both for zombie and runniing state */
1182 if (pfind_locked(nextpid) != PROC_NULL) {
1183 nextpid++;
1184 goto retry;
1185 }
1186
1187 if (pgfind_internal(nextpid) != PGRP_NULL) {
1188 nextpid++;
1189 goto retry;
1190 }
1191 if (session_find_internal(nextpid) != SESSION_NULL) {
1192 nextpid++;
1193 goto retry;
1194 }
1195 }
1196 nprocs++;
1197 child_proc->p_pid = nextpid;
1198 child_proc->p_responsible_pid = nextpid; /* initially responsible for self */
1199 child_proc->p_idversion = nextpidversion++;
1200 /* kernel process is handcrafted and not from fork, so start from 1 */
1201 child_proc->p_uniqueid = ++nextuniqueid;
1202 #if 1
1203 if (child_proc->p_pid != 0) {
1204 if (pfind_locked(child_proc->p_pid) != PROC_NULL)
1205 panic("proc in the list already\n");
1206 }
1207 #endif
1208 /* Insert in the hash */
1209 child_proc->p_listflag |= (P_LIST_INHASH | P_LIST_INCREATE);
1210 LIST_INSERT_HEAD(PIDHASH(child_proc->p_pid), child_proc, p_hash);
1211 proc_list_unlock();
1212
1213
1214 /*
1215 * We've identified the PID we are going to use; initialize the new
1216 * process structure.
1217 */
1218 child_proc->p_stat = SIDL;
1219 child_proc->p_pgrpid = PGRPID_DEAD;
1220
1221 /*
1222 * The zero'ing of the proc was at the allocation time due to need
1223 * for insertion to hash. Copy the section that is to be copied
1224 * directly from the parent.
1225 */
1226 bcopy(&parent_proc->p_startcopy, &child_proc->p_startcopy,
1227 (unsigned) ((caddr_t)&child_proc->p_endcopy - (caddr_t)&child_proc->p_startcopy));
1228
1229 /*
1230 * Some flags are inherited from the parent.
1231 * Duplicate sub-structures as needed.
1232 * Increase reference counts on shared objects.
1233 * The p_stats and p_sigacts substructs are set in vm_fork.
1234 */
1235 child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_DELAYIDLESLEEP | P_SUGID));
1236 if (parent_proc->p_flag & P_PROFIL)
1237 startprofclock(child_proc);
1238
1239 child_proc->p_vfs_iopolicy = (parent_proc->p_vfs_iopolicy & (P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY));
1240
1241 /*
1242 * Note that if the current thread has an assumed identity, this
1243 * credential will be granted to the new process.
1244 */
1245 child_proc->p_ucred = kauth_cred_get_with_ref();
1246 /* update cred on proc */
1247 PROC_UPDATE_CREDS_ONPROC(child_proc);
1248 /* update audit session proc count */
1249 AUDIT_SESSION_PROCNEW(child_proc);
1250
1251 #if CONFIG_FINE_LOCK_GROUPS
1252 lck_mtx_init(&child_proc->p_mlock, proc_mlock_grp, proc_lck_attr);
1253 lck_mtx_init(&child_proc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr);
1254 lck_mtx_init(&child_proc->p_ucred_mlock, proc_ucred_mlock_grp, proc_lck_attr);
1255 #if CONFIG_DTRACE
1256 lck_mtx_init(&child_proc->p_dtrace_sprlock, proc_lck_grp, proc_lck_attr);
1257 #endif
1258 lck_spin_init(&child_proc->p_slock, proc_slock_grp, proc_lck_attr);
1259 #else /* !CONFIG_FINE_LOCK_GROUPS */
1260 lck_mtx_init(&child_proc->p_mlock, proc_lck_grp, proc_lck_attr);
1261 lck_mtx_init(&child_proc->p_fdmlock, proc_lck_grp, proc_lck_attr);
1262 lck_mtx_init(&child_proc->p_ucred_mlock, proc_lck_grp, proc_lck_attr);
1263 #if CONFIG_DTRACE
1264 lck_mtx_init(&child_proc->p_dtrace_sprlock, proc_lck_grp, proc_lck_attr);
1265 #endif
1266 lck_spin_init(&child_proc->p_slock, proc_lck_grp, proc_lck_attr);
1267 #endif /* !CONFIG_FINE_LOCK_GROUPS */
1268 klist_init(&child_proc->p_klist);
1269
1270 if (child_proc->p_textvp != NULLVP) {
1271 /* bump references to the text vnode */
1272 /* Need to hold iocount across the ref call */
1273 if (vnode_getwithref(child_proc->p_textvp) == 0) {
1274 error = vnode_ref(child_proc->p_textvp);
1275 vnode_put(child_proc->p_textvp);
1276 if (error != 0)
1277 child_proc->p_textvp = NULLVP;
1278 }
1279 }
1280
1281 /*
1282 * Copy the parents per process open file table to the child; if
1283 * there is a per-thread current working directory, set the childs
1284 * per-process current working directory to that instead of the
1285 * parents.
1286 *
1287 * XXX may fail to copy descriptors to child
1288 */
1289 child_proc->p_fd = fdcopy(parent_proc, parent_uthread->uu_cdir);
1290
1291 #if SYSV_SHM
1292 if (parent_proc->vm_shm) {
1293 /* XXX may fail to attach shm to child */
1294 (void)shmfork(parent_proc, child_proc);
1295 }
1296 #endif
1297 /*
1298 * inherit the limit structure to child
1299 */
1300 proc_limitfork(parent_proc, child_proc);
1301
1302 if (child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1303 uint64_t rlim_cur = child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur;
1304 child_proc->p_rlim_cpu.tv_sec = (rlim_cur > __INT_MAX__) ? __INT_MAX__ : rlim_cur;
1305 }
1306
1307 /* Intialize new process stats, including start time */
1308 /* <rdar://6640543> non-zeroed portion contains garbage AFAICT */
1309 bzero(child_proc->p_stats, sizeof(*child_proc->p_stats));
1310 microtime_with_abstime(&child_proc->p_start, &child_proc->p_stats->ps_start);
1311
1312 if (parent_proc->p_sigacts != NULL)
1313 (void)memcpy(child_proc->p_sigacts,
1314 parent_proc->p_sigacts, sizeof *child_proc->p_sigacts);
1315 else
1316 (void)memset(child_proc->p_sigacts, 0, sizeof *child_proc->p_sigacts);
1317
1318 sessp = proc_session(parent_proc);
1319 if (sessp->s_ttyvp != NULL && parent_proc->p_flag & P_CONTROLT)
1320 OSBitOrAtomic(P_CONTROLT, &child_proc->p_flag);
1321 session_rele(sessp);
1322
1323 /*
1324 * block all signals to reach the process.
1325 * no transition race should be occuring with the child yet,
1326 * but indicate that the process is in (the creation) transition.
1327 */
1328 proc_signalstart(child_proc, 0);
1329 proc_transstart(child_proc, 0, 0);
1330
1331 child_proc->p_pcaction = 0;
1332
1333 TAILQ_INIT(&child_proc->p_uthlist);
1334 TAILQ_INIT(&child_proc->p_aio_activeq);
1335 TAILQ_INIT(&child_proc->p_aio_doneq);
1336
1337 /* Inherit the parent flags for code sign */
1338 child_proc->p_csflags = (parent_proc->p_csflags & ~CS_KILLED);
1339
1340 /*
1341 * Copy work queue information
1342 *
1343 * Note: This should probably only happen in the case where we are
1344 * creating a child that is a copy of the parent; since this
1345 * routine is called in the non-duplication case of vfork()
1346 * or posix_spawn(), then this information should likely not
1347 * be duplicated.
1348 *
1349 * <rdar://6640553> Work queue pointers that no longer point to code
1350 */
1351 child_proc->p_wqthread = parent_proc->p_wqthread;
1352 child_proc->p_threadstart = parent_proc->p_threadstart;
1353 child_proc->p_pthsize = parent_proc->p_pthsize;
1354 if ((parent_proc->p_lflag & P_LREGISTER) != 0) {
1355 child_proc->p_lflag |= P_LREGISTER;
1356 }
1357 child_proc->p_wqkqueue = NULL;
1358 child_proc->p_dispatchqueue_offset = parent_proc->p_dispatchqueue_offset;
1359 child_proc->p_dispatchqueue_serialno_offset = parent_proc->p_dispatchqueue_serialno_offset;
1360 #if PSYNCH
1361 pth_proc_hashinit(child_proc);
1362 #endif /* PSYNCH */
1363
1364 #if CONFIG_PERSONAS
1365 child_proc->p_persona = NULL;
1366 error = persona_proc_inherit(child_proc, parent_proc);
1367 if (error != 0) {
1368 printf("forkproc: persona_proc_inherit failed (persona %d being destroyed?)\n", persona_get_uid(parent_proc->p_persona));
1369 forkproc_free(child_proc);
1370 child_proc = NULL;
1371 goto bad;
1372 }
1373 #endif
1374
1375 #if CONFIG_MEMORYSTATUS
1376 /* Memorystatus init */
1377 child_proc->p_memstat_state = 0;
1378 child_proc->p_memstat_effectivepriority = JETSAM_PRIORITY_DEFAULT;
1379 child_proc->p_memstat_requestedpriority = JETSAM_PRIORITY_DEFAULT;
1380 child_proc->p_memstat_userdata = 0;
1381 child_proc->p_memstat_idle_start = 0;
1382 child_proc->p_memstat_idle_delta = 0;
1383 child_proc->p_memstat_memlimit = 0;
1384 child_proc->p_memstat_memlimit_active = 0;
1385 child_proc->p_memstat_memlimit_inactive = 0;
1386 #if CONFIG_FREEZE
1387 child_proc->p_memstat_suspendedfootprint = 0;
1388 #endif
1389 child_proc->p_memstat_dirty = 0;
1390 child_proc->p_memstat_idledeadline = 0;
1391 #endif /* CONFIG_MEMORYSTATUS */
1392
1393 bad:
1394 return(child_proc);
1395 }
1396
1397 void
1398 proc_lock(proc_t p)
1399 {
1400 lck_mtx_assert(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
1401 lck_mtx_lock(&p->p_mlock);
1402 }
1403
1404 void
1405 proc_unlock(proc_t p)
1406 {
1407 lck_mtx_unlock(&p->p_mlock);
1408 }
1409
1410 void
1411 proc_spinlock(proc_t p)
1412 {
1413 lck_spin_lock(&p->p_slock);
1414 }
1415
1416 void
1417 proc_spinunlock(proc_t p)
1418 {
1419 lck_spin_unlock(&p->p_slock);
1420 }
1421
1422 void
1423 proc_list_lock(void)
1424 {
1425 lck_mtx_lock(proc_list_mlock);
1426 }
1427
1428 void
1429 proc_list_unlock(void)
1430 {
1431 lck_mtx_unlock(proc_list_mlock);
1432 }
1433
1434 void
1435 proc_ucred_lock(proc_t p)
1436 {
1437 lck_mtx_lock(&p->p_ucred_mlock);
1438 }
1439
1440 void
1441 proc_ucred_unlock(proc_t p)
1442 {
1443 lck_mtx_unlock(&p->p_ucred_mlock);
1444 }
1445
1446 #include <kern/zalloc.h>
1447
1448 struct zone *uthread_zone = NULL;
1449
1450 static lck_grp_t *rethrottle_lock_grp;
1451 static lck_attr_t *rethrottle_lock_attr;
1452 static lck_grp_attr_t *rethrottle_lock_grp_attr;
1453
1454 static void
1455 uthread_zone_init(void)
1456 {
1457 assert(uthread_zone == NULL);
1458
1459 rethrottle_lock_grp_attr = lck_grp_attr_alloc_init();
1460 rethrottle_lock_grp = lck_grp_alloc_init("rethrottle", rethrottle_lock_grp_attr);
1461 rethrottle_lock_attr = lck_attr_alloc_init();
1462
1463 uthread_zone = zinit(sizeof(struct uthread),
1464 thread_max * sizeof(struct uthread),
1465 THREAD_CHUNK * sizeof(struct uthread),
1466 "uthreads");
1467 }
1468
1469 void *
1470 uthread_alloc(task_t task, thread_t thread, int noinherit)
1471 {
1472 proc_t p;
1473 uthread_t uth;
1474 uthread_t uth_parent;
1475 void *ut;
1476
1477 if (uthread_zone == NULL)
1478 uthread_zone_init();
1479
1480 ut = (void *)zalloc(uthread_zone);
1481 bzero(ut, sizeof(struct uthread));
1482
1483 p = (proc_t) get_bsdtask_info(task);
1484 uth = (uthread_t)ut;
1485 uth->uu_thread = thread;
1486
1487 lck_spin_init(&uth->uu_rethrottle_lock, rethrottle_lock_grp,
1488 rethrottle_lock_attr);
1489
1490 /*
1491 * Thread inherits credential from the creating thread, if both
1492 * are in the same task.
1493 *
1494 * If the creating thread has no credential or is from another
1495 * task we can leave the new thread credential NULL. If it needs
1496 * one later, it will be lazily assigned from the task's process.
1497 */
1498 uth_parent = (uthread_t)get_bsdthread_info(current_thread());
1499 if ((noinherit == 0) && task == current_task() &&
1500 uth_parent != NULL &&
1501 IS_VALID_CRED(uth_parent->uu_ucred)) {
1502 /*
1503 * XXX The new thread is, in theory, being created in context
1504 * XXX of parent thread, so a direct reference to the parent
1505 * XXX is OK.
1506 */
1507 kauth_cred_ref(uth_parent->uu_ucred);
1508 uth->uu_ucred = uth_parent->uu_ucred;
1509 /* the credential we just inherited is an assumed credential */
1510 if (uth_parent->uu_flag & UT_SETUID)
1511 uth->uu_flag |= UT_SETUID;
1512 } else {
1513 /* sometimes workqueue threads are created out task context */
1514 if ((task != kernel_task) && (p != PROC_NULL))
1515 uth->uu_ucred = kauth_cred_proc_ref(p);
1516 else
1517 uth->uu_ucred = NOCRED;
1518 }
1519
1520
1521 if ((task != kernel_task) && p) {
1522
1523 proc_lock(p);
1524 if (noinherit != 0) {
1525 /* workq threads will not inherit masks */
1526 uth->uu_sigmask = ~workq_threadmask;
1527 } else if (uth_parent) {
1528 if (uth_parent->uu_flag & UT_SAS_OLDMASK)
1529 uth->uu_sigmask = uth_parent->uu_oldmask;
1530 else
1531 uth->uu_sigmask = uth_parent->uu_sigmask;
1532 }
1533 uth->uu_context.vc_thread = thread;
1534 /*
1535 * Do not add the uthread to proc uthlist for exec copy task,
1536 * since they do not hold a ref on proc.
1537 */
1538 if (!task_is_exec_copy(task)) {
1539 TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list);
1540 }
1541 proc_unlock(p);
1542
1543 #if CONFIG_DTRACE
1544 if (p->p_dtrace_ptss_pages != NULL && !task_is_exec_copy(task)) {
1545 uth->t_dtrace_scratch = dtrace_ptss_claim_entry(p);
1546 }
1547 #endif
1548 }
1549
1550 return (ut);
1551 }
1552
1553 /*
1554 * This routine frees the thread name field of the uthread_t structure. Split out of
1555 * uthread_cleanup() so thread name does not get deallocated while generating a corpse fork.
1556 */
1557 void
1558 uthread_cleanup_name(void *uthread)
1559 {
1560 uthread_t uth = (uthread_t)uthread;
1561
1562 /*
1563 * <rdar://17834538>
1564 * Set pth_name to NULL before calling free().
1565 * Previously there was a race condition in the
1566 * case this code was executing during a stackshot
1567 * where the stackshot could try and copy pth_name
1568 * after it had been freed and before if was marked
1569 * as null.
1570 */
1571 if (uth->pth_name != NULL) {
1572 void *pth_name = uth->pth_name;
1573 uth->pth_name = NULL;
1574 kfree(pth_name, MAXTHREADNAMESIZE);
1575 }
1576 return;
1577 }
1578
1579 /*
1580 * This routine frees all the BSD context in uthread except the credential.
1581 * It does not free the uthread structure as well
1582 */
1583 void
1584 uthread_cleanup(task_t task, void *uthread, void * bsd_info)
1585 {
1586 struct _select *sel;
1587 uthread_t uth = (uthread_t)uthread;
1588 proc_t p = (proc_t)bsd_info;
1589
1590 #if PROC_REF_DEBUG
1591 if (__improbable(uthread_get_proc_refcount(uthread) != 0)) {
1592 panic("uthread_cleanup called for uthread %p with uu_proc_refcount != 0", uthread);
1593 }
1594 #endif
1595
1596 if (uth->uu_lowpri_window || uth->uu_throttle_info) {
1597 /*
1598 * task is marked as a low priority I/O type
1599 * and we've somehow managed to not dismiss the throttle
1600 * through the normal exit paths back to user space...
1601 * no need to throttle this thread since its going away
1602 * but we do need to update our bookeeping w/r to throttled threads
1603 *
1604 * Calling this routine will clean up any throttle info reference
1605 * still inuse by the thread.
1606 */
1607 throttle_lowpri_io(0);
1608 }
1609 /*
1610 * Per-thread audit state should never last beyond system
1611 * call return. Since we don't audit the thread creation/
1612 * removal, the thread state pointer should never be
1613 * non-NULL when we get here.
1614 */
1615 assert(uth->uu_ar == NULL);
1616
1617 if (uth->uu_kqueue_bound) {
1618 kevent_qos_internal_unbind(p,
1619 uth->uu_kqueue_bound,
1620 uth->uu_thread,
1621 uth->uu_kqueue_flags);
1622 uth->uu_kqueue_flags = 0;
1623 uth->uu_kqueue_bound = 0;
1624 }
1625
1626 sel = &uth->uu_select;
1627 /* cleanup the select bit space */
1628 if (sel->nbytes) {
1629 FREE(sel->ibits, M_TEMP);
1630 FREE(sel->obits, M_TEMP);
1631 sel->nbytes = 0;
1632 }
1633
1634 if (uth->uu_cdir) {
1635 vnode_rele(uth->uu_cdir);
1636 uth->uu_cdir = NULLVP;
1637 }
1638
1639 if (uth->uu_wqset) {
1640 if (waitq_set_is_valid(uth->uu_wqset))
1641 waitq_set_deinit(uth->uu_wqset);
1642 FREE(uth->uu_wqset, M_SELECT);
1643 uth->uu_wqset = NULL;
1644 uth->uu_wqstate_sz = 0;
1645 }
1646
1647 os_reason_free(uth->uu_exit_reason);
1648
1649 if ((task != kernel_task) && p) {
1650
1651 if (((uth->uu_flag & UT_VFORK) == UT_VFORK) && (uth->uu_proc != PROC_NULL)) {
1652 vfork_exit_internal(uth->uu_proc, 0, 1);
1653 }
1654 /*
1655 * Remove the thread from the process list and
1656 * transfer [appropriate] pending signals to the process.
1657 * Do not remove the uthread from proc uthlist for exec
1658 * copy task, since they does not have a ref on proc and
1659 * would not have been added to the list.
1660 */
1661 if (get_bsdtask_info(task) == p && !task_is_exec_copy(task)) {
1662 proc_lock(p);
1663
1664 TAILQ_REMOVE(&p->p_uthlist, uth, uu_list);
1665 p->p_siglist |= (uth->uu_siglist & execmask & (~p->p_sigignore | sigcantmask));
1666 proc_unlock(p);
1667 }
1668 #if CONFIG_DTRACE
1669 struct dtrace_ptss_page_entry *tmpptr = uth->t_dtrace_scratch;
1670 uth->t_dtrace_scratch = NULL;
1671 if (tmpptr != NULL && !task_is_exec_copy(task)) {
1672 dtrace_ptss_release_entry(p, tmpptr);
1673 }
1674 #endif
1675 }
1676 }
1677
1678 /* This routine releases the credential stored in uthread */
1679 void
1680 uthread_cred_free(void *uthread)
1681 {
1682 uthread_t uth = (uthread_t)uthread;
1683
1684 /* and free the uthread itself */
1685 if (IS_VALID_CRED(uth->uu_ucred)) {
1686 kauth_cred_t oldcred = uth->uu_ucred;
1687 uth->uu_ucred = NOCRED;
1688 kauth_cred_unref(&oldcred);
1689 }
1690 }
1691
1692 /* This routine frees the uthread structure held in thread structure */
1693 void
1694 uthread_zone_free(void *uthread)
1695 {
1696 uthread_t uth = (uthread_t)uthread;
1697
1698 if (uth->t_tombstone) {
1699 kfree(uth->t_tombstone, sizeof(struct doc_tombstone));
1700 uth->t_tombstone = NULL;
1701 }
1702
1703 lck_spin_destroy(&uth->uu_rethrottle_lock, rethrottle_lock_grp);
1704
1705 uthread_cleanup_name(uthread);
1706 /* and free the uthread itself */
1707 zfree(uthread_zone, uthread);
1708 }