]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_fork.c
e23d52ead5f6cecf5963efc58a8b9249a8235cb1
[apple/xnu.git] / bsd / kern / kern_fork.c
1 /*
2 * Copyright (c) 2000-2007, 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_fork.c 8.8 (Berkeley) 2/14/95
67 */
68 /*
69 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74 /*
75 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
76 * support for mandatory and extensible security protections. This notice
77 * is included in support of clause 2.2 (b) of the Apple Public License,
78 * Version 2.0.
79 */
80
81 #include <kern/assert.h>
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/filedesc.h>
85 #include <sys/kernel.h>
86 #include <sys/malloc.h>
87 #include <sys/proc_internal.h>
88 #include <sys/kauth.h>
89 #include <sys/user.h>
90 #include <sys/resourcevar.h>
91 #include <sys/vnode_internal.h>
92 #include <sys/file_internal.h>
93 #include <sys/acct.h>
94 #include <sys/codesign.h>
95 #include <sys/sysproto.h>
96 #if CONFIG_PERSONAS
97 #include <sys/persona.h>
98 #endif
99 #if CONFIG_DTRACE
100 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
101 extern void dtrace_fasttrap_fork(proc_t, proc_t);
102 extern void (*dtrace_helpers_fork)(proc_t, proc_t);
103 extern void (*dtrace_proc_waitfor_exec_ptr)(proc_t);
104 extern void dtrace_lazy_dofs_duplicate(proc_t, proc_t);
105
106 /*
107 * Since dtrace_proc_waitfor_exec_ptr can be added/removed in dtrace_subr.c,
108 * we will store its value before actually calling it.
109 */
110 static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL;
111
112 #include <sys/dtrace_ptss.h>
113 #endif
114
115 #include <security/audit/audit.h>
116
117 #include <mach/mach_types.h>
118 #include <kern/coalition.h>
119 #include <kern/kern_types.h>
120 #include <kern/kalloc.h>
121 #include <kern/mach_param.h>
122 #include <kern/task.h>
123 #include <kern/thread.h>
124 #include <kern/thread_call.h>
125 #include <kern/zalloc.h>
126
127 #include <machine/spl.h>
128
129 #if CONFIG_MACF
130 #include <security/mac.h>
131 #include <security/mac_mach_internal.h>
132 #endif
133
134 #include <vm/vm_map.h>
135 #include <vm/vm_protos.h>
136 #include <vm/vm_shared_region.h>
137
138 #include <sys/shm_internal.h> /* for shmfork() */
139 #include <mach/task.h> /* for thread_create() */
140 #include <mach/thread_act.h> /* for thread_resume() */
141
142 #include <sys/sdt.h>
143
144 #if CONFIG_MEMORYSTATUS
145 #include <sys/kern_memorystatus.h>
146 #endif
147
148 /* XXX routines which should have Mach prototypes, but don't */
149 void thread_set_parent(thread_t parent, int pid);
150 extern void act_thread_catt(void *ctx);
151 void thread_set_child(thread_t child, int pid);
152 void *act_thread_csave(void);
153
154
155 thread_t cloneproc(task_t, coalition_t *, proc_t, int, int);
156 proc_t forkproc(proc_t);
157 void forkproc_free(proc_t);
158 thread_t fork_create_child(task_t parent_task, coalition_t *parent_coalitions, proc_t child, int inherit_memory, int is64bit);
159 void proc_vfork_begin(proc_t parent_proc);
160 void proc_vfork_end(proc_t parent_proc);
161
162 #define DOFORK 0x1 /* fork() system call */
163 #define DOVFORK 0x2 /* vfork() system call */
164
165 /*
166 * proc_vfork_begin
167 *
168 * Description: start a vfork on a process
169 *
170 * Parameters: parent_proc process (re)entering vfork state
171 *
172 * Returns: (void)
173 *
174 * Notes: Although this function increments a count, a count in
175 * excess of 1 is not currently supported. According to the
176 * POSIX standard, calling anything other than execve() or
177 * _exit() following a vfork(), including calling vfork()
178 * itself again, will result in undefined behaviour
179 */
180 void
181 proc_vfork_begin(proc_t parent_proc)
182 {
183 proc_lock(parent_proc);
184 parent_proc->p_lflag |= P_LVFORK;
185 parent_proc->p_vforkcnt++;
186 proc_unlock(parent_proc);
187 }
188
189 /*
190 * proc_vfork_end
191 *
192 * Description: stop a vfork on a process
193 *
194 * Parameters: parent_proc process leaving vfork state
195 *
196 * Returns: (void)
197 *
198 * Notes: Decrements the count; currently, reentrancy of vfork()
199 * is unsupported on the current process
200 */
201 void
202 proc_vfork_end(proc_t parent_proc)
203 {
204 proc_lock(parent_proc);
205 parent_proc->p_vforkcnt--;
206 if (parent_proc->p_vforkcnt < 0)
207 panic("vfork cnt is -ve");
208 if (parent_proc->p_vforkcnt == 0)
209 parent_proc->p_lflag &= ~P_LVFORK;
210 proc_unlock(parent_proc);
211 }
212
213
214 /*
215 * vfork
216 *
217 * Description: vfork system call
218 *
219 * Parameters: void [no arguments]
220 *
221 * Retval: 0 (to child process)
222 * !0 pid of child (to parent process)
223 * -1 error (see "Returns:")
224 *
225 * Returns: EAGAIN Administrative limit reached
226 * EINVAL vfork() called during vfork()
227 * ENOMEM Failed to allocate new process
228 *
229 * Note: After a successful call to this function, the parent process
230 * has its task, thread, and uthread lent to the child process,
231 * and control is returned to the caller; if this function is
232 * invoked as a system call, the return is to user space, and
233 * is effectively running on the child process.
234 *
235 * Subsequent calls that operate on process state are permitted,
236 * though discouraged, and will operate on the child process; any
237 * operations on the task, thread, or uthread will result in
238 * changes in the parent state, and, if inheritable, the child
239 * state, when a task, thread, and uthread are realized for the
240 * child process at execve() time, will also be effected. Given
241 * this, it's recemmended that people use the posix_spawn() call
242 * instead.
243 *
244 * BLOCK DIAGRAM OF VFORK
245 *
246 * Before:
247 *
248 * ,----------------. ,-------------.
249 * | | task | |
250 * | parent_thread | ------> | parent_task |
251 * | | <.list. | |
252 * `----------------' `-------------'
253 * uthread | ^ bsd_info | ^
254 * v | vc_thread v | task
255 * ,----------------. ,-------------.
256 * | | | |
257 * | parent_uthread | <.list. | parent_proc | <-- current_proc()
258 * | | | |
259 * `----------------' `-------------'
260 * uu_proc |
261 * v
262 * NULL
263 *
264 * After:
265 *
266 * ,----------------. ,-------------.
267 * | | task | |
268 * ,----> | parent_thread | ------> | parent_task |
269 * | | | <.list. | |
270 * | `----------------' `-------------'
271 * | uthread | ^ bsd_info | ^
272 * | v | vc_thread v | task
273 * | ,----------------. ,-------------.
274 * | | | | |
275 * | | parent_uthread | <.list. | parent_proc |
276 * | | | | |
277 * | `----------------' `-------------'
278 * | uu_proc | . list
279 * | v v
280 * | ,----------------.
281 * `----- | |
282 * p_vforkact | child_proc | <-- current_proc()
283 * | |
284 * `----------------'
285 */
286 int
287 vfork(proc_t parent_proc, __unused struct vfork_args *uap, int32_t *retval)
288 {
289 thread_t child_thread;
290 int err;
291
292 if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_VFORK, NULL)) != 0) {
293 retval[1] = 0;
294 } else {
295 uthread_t ut = get_bsdthread_info(current_thread());
296 proc_t child_proc = ut->uu_proc;
297
298 retval[0] = child_proc->p_pid;
299 retval[1] = 1; /* flag child return for user space */
300
301 /*
302 * Drop the signal lock on the child which was taken on our
303 * behalf by forkproc()/cloneproc() to prevent signals being
304 * received by the child in a partially constructed state.
305 */
306 proc_signalend(child_proc, 0);
307 proc_transend(child_proc, 0);
308
309 proc_knote(parent_proc, NOTE_FORK | child_proc->p_pid);
310 DTRACE_PROC1(create, proc_t, child_proc);
311 ut->uu_flag &= ~UT_VFORKING;
312 }
313
314 return (err);
315 }
316
317
318 /*
319 * fork1
320 *
321 * Description: common code used by all new process creation other than the
322 * bootstrap of the initial process on the system
323 *
324 * Parameters: parent_proc parent process of the process being
325 * child_threadp pointer to location to receive the
326 * Mach thread_t of the child process
327 * breated
328 * kind kind of creation being requested
329 * coalitions if spawn, the set of coalitions the
330 * child process should join, or NULL to
331 * inherit the parent's. On non-spawns,
332 * this param is ignored and the child
333 * always inherits the parent's
334 * coalitions.
335 *
336 * Notes: Permissable values for 'kind':
337 *
338 * PROC_CREATE_FORK Create a complete process which will
339 * return actively running in both the
340 * parent and the child; the child copies
341 * the parent address space.
342 * PROC_CREATE_SPAWN Create a complete process which will
343 * return actively running in the parent
344 * only after returning actively running
345 * in the child; the child address space
346 * is newly created by an image activator,
347 * after which the child is run.
348 * PROC_CREATE_VFORK Creates a partial process which will
349 * borrow the parent task, thread, and
350 * uthread to return running in the child;
351 * the child address space and other parts
352 * are lazily created at execve() time, or
353 * the child is terminated, and the parent
354 * does not actively run until that
355 * happens.
356 *
357 * At first it may seem strange that we return the child thread
358 * address rather than process structure, since the process is
359 * the only part guaranteed to be "new"; however, since we do
360 * not actualy adjust other references between Mach and BSD (see
361 * the block diagram above the implementation of vfork()), this
362 * is the only method which guarantees us the ability to get
363 * back to the other information.
364 */
365 int
366 fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalitions)
367 {
368 thread_t parent_thread = (thread_t)current_thread();
369 uthread_t parent_uthread = (uthread_t)get_bsdthread_info(parent_thread);
370 proc_t child_proc = NULL; /* set in switch, but compiler... */
371 thread_t child_thread = NULL;
372 uid_t uid;
373 int count;
374 int err = 0;
375 int spawn = 0;
376
377 /*
378 * Although process entries are dynamically created, we still keep
379 * a global limit on the maximum number we will create. Don't allow
380 * a nonprivileged user to use the last process; don't let root
381 * exceed the limit. The variable nprocs is the current number of
382 * processes, maxproc is the limit.
383 */
384 uid = kauth_getruid();
385 proc_list_lock();
386 if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
387 proc_list_unlock();
388 tablefull("proc");
389 return (EAGAIN);
390 }
391 proc_list_unlock();
392
393 /*
394 * Increment the count of procs running with this uid. Don't allow
395 * a nonprivileged user to exceed their current limit, which is
396 * always less than what an rlim_t can hold.
397 * (locking protection is provided by list lock held in chgproccnt)
398 */
399 count = chgproccnt(uid, 1);
400 if (uid != 0 &&
401 (rlim_t)count > parent_proc->p_rlimit[RLIMIT_NPROC].rlim_cur) {
402 err = EAGAIN;
403 goto bad;
404 }
405
406 #if CONFIG_MACF
407 /*
408 * Determine if MAC policies applied to the process will allow
409 * it to fork. This is an advisory-only check.
410 */
411 err = mac_proc_check_fork(parent_proc);
412 if (err != 0) {
413 goto bad;
414 }
415 #endif
416
417 switch(kind) {
418 case PROC_CREATE_VFORK:
419 /*
420 * Prevent a vfork while we are in vfork(); we should
421 * also likely preventing a fork here as well, and this
422 * check should then be outside the switch statement,
423 * since the proc struct contents will copy from the
424 * child and the tash/thread/uthread from the parent in
425 * that case. We do not support vfork() in vfork()
426 * because we don't have to; the same non-requirement
427 * is true of both fork() and posix_spawn() and any
428 * call other than execve() amd _exit(), but we've
429 * been historically lenient, so we continue to be so
430 * (for now).
431 *
432 * <rdar://6640521> Probably a source of random panics
433 */
434 if (parent_uthread->uu_flag & UT_VFORK) {
435 printf("fork1 called within vfork by %s\n", parent_proc->p_comm);
436 err = EINVAL;
437 goto bad;
438 }
439
440 /*
441 * Flag us in progress; if we chose to support vfork() in
442 * vfork(), we would chain our parent at this point (in
443 * effect, a stack push). We don't, since we actually want
444 * to disallow everything not specified in the standard
445 */
446 proc_vfork_begin(parent_proc);
447
448 /* The newly created process comes with signal lock held */
449 if ((child_proc = forkproc(parent_proc)) == NULL) {
450 /* Failed to allocate new process */
451 proc_vfork_end(parent_proc);
452 err = ENOMEM;
453 goto bad;
454 }
455
456 // XXX BEGIN: wants to move to be common code (and safe)
457 #if CONFIG_MACF
458 /*
459 * allow policies to associate the credential/label that
460 * we referenced from the parent ... with the child
461 * JMM - this really isn't safe, as we can drop that
462 * association without informing the policy in other
463 * situations (keep long enough to get policies changed)
464 */
465 mac_cred_label_associate_fork(child_proc->p_ucred, child_proc);
466 #endif
467
468 /*
469 * Propogate change of PID - may get new cred if auditing.
470 *
471 * NOTE: This has no effect in the vfork case, since
472 * child_proc->task != current_task(), but we duplicate it
473 * because this is probably, ultimately, wrong, since we
474 * will be running in the "child" which is the parent task
475 * with the wrong token until we get to the execve() or
476 * _exit() call; a lot of "undefined" can happen before
477 * that.
478 *
479 * <rdar://6640530> disallow everything but exeve()/_exit()?
480 */
481 set_security_token(child_proc);
482
483 AUDIT_ARG(pid, child_proc->p_pid);
484
485 // XXX END: wants to move to be common code (and safe)
486
487 /*
488 * BORROW PARENT TASK, THREAD, UTHREAD FOR CHILD
489 *
490 * Note: this is where we would "push" state instead of setting
491 * it for nested vfork() support (see proc_vfork_end() for
492 * description if issues here).
493 */
494 child_proc->task = parent_proc->task;
495
496 child_proc->p_lflag |= P_LINVFORK;
497 child_proc->p_vforkact = parent_thread;
498 child_proc->p_stat = SRUN;
499
500 /*
501 * Until UT_VFORKING is cleared at the end of the vfork
502 * syscall, the process identity of this thread is slightly
503 * murky.
504 *
505 * As long as UT_VFORK and it's associated field (uu_proc)
506 * is set, current_proc() will always return the child process.
507 *
508 * However dtrace_proc_selfpid() returns the parent pid to
509 * ensure that e.g. the proc:::create probe actions accrue
510 * to the parent. (Otherwise the child magically seems to
511 * have created itself!)
512 */
513 parent_uthread->uu_flag |= UT_VFORK | UT_VFORKING;
514 parent_uthread->uu_proc = child_proc;
515 parent_uthread->uu_userstate = (void *)act_thread_csave();
516 parent_uthread->uu_vforkmask = parent_uthread->uu_sigmask;
517
518 /* temporarily drop thread-set-id state */
519 if (parent_uthread->uu_flag & UT_SETUID) {
520 parent_uthread->uu_flag |= UT_WASSETUID;
521 parent_uthread->uu_flag &= ~UT_SETUID;
522 }
523
524 /* blow thread state information */
525 /* XXX is this actually necessary, given syscall return? */
526 thread_set_child(parent_thread, child_proc->p_pid);
527
528 child_proc->p_acflag = AFORK; /* forked but not exec'ed */
529
530 /*
531 * Preserve synchronization semantics of vfork. If
532 * waiting for child to exec or exit, set P_PPWAIT
533 * on child, and sleep on our proc (in case of exit).
534 */
535 child_proc->p_lflag |= P_LPPWAIT;
536 pinsertchild(parent_proc, child_proc); /* set visible */
537
538 break;
539
540 case PROC_CREATE_SPAWN:
541 /*
542 * A spawned process differs from a forked process in that
543 * the spawned process does not carry around the parents
544 * baggage with regard to address space copying, dtrace,
545 * and so on.
546 */
547 spawn = 1;
548
549 /* FALLSTHROUGH */
550
551 case PROC_CREATE_FORK:
552 /*
553 * When we clone the parent process, we are going to inherit
554 * its task attributes and memory, since when we fork, we
555 * will, in effect, create a duplicate of it, with only minor
556 * differences. Contrarily, spawned processes do not inherit.
557 */
558 if ((child_thread = cloneproc(parent_proc->task,
559 spawn ? coalitions : NULL,
560 parent_proc,
561 spawn ? FALSE : TRUE,
562 FALSE)) == NULL) {
563 /* Failed to create thread */
564 err = EAGAIN;
565 goto bad;
566 }
567
568 /* copy current thread state into the child thread (only for fork) */
569 if (!spawn) {
570 thread_dup(child_thread);
571 }
572
573 /* child_proc = child_thread->task->proc; */
574 child_proc = (proc_t)(get_bsdtask_info(get_threadtask(child_thread)));
575
576 // XXX BEGIN: wants to move to be common code (and safe)
577 #if CONFIG_MACF
578 /*
579 * allow policies to associate the credential/label that
580 * we referenced from the parent ... with the child
581 * JMM - this really isn't safe, as we can drop that
582 * association without informing the policy in other
583 * situations (keep long enough to get policies changed)
584 */
585 mac_cred_label_associate_fork(child_proc->p_ucred, child_proc);
586 #endif
587
588 /*
589 * Propogate change of PID - may get new cred if auditing.
590 *
591 * NOTE: This has no effect in the vfork case, since
592 * child_proc->task != current_task(), but we duplicate it
593 * because this is probably, ultimately, wrong, since we
594 * will be running in the "child" which is the parent task
595 * with the wrong token until we get to the execve() or
596 * _exit() call; a lot of "undefined" can happen before
597 * that.
598 *
599 * <rdar://6640530> disallow everything but exeve()/_exit()?
600 */
601 set_security_token(child_proc);
602
603 AUDIT_ARG(pid, child_proc->p_pid);
604
605 // XXX END: wants to move to be common code (and safe)
606
607 /*
608 * Blow thread state information; this is what gives the child
609 * process its "return" value from a fork() call.
610 *
611 * Note: this should probably move to fork() proper, since it
612 * is not relevent to spawn, and the value won't matter
613 * until we resume the child there. If you are in here
614 * refactoring code, consider doing this at the same time.
615 */
616 thread_set_child(child_thread, child_proc->p_pid);
617
618 child_proc->p_acflag = AFORK; /* forked but not exec'ed */
619
620 // <rdar://6598155> dtrace code cleanup needed
621 #if CONFIG_DTRACE
622 /*
623 * This code applies to new processes who are copying the task
624 * and thread state and address spaces of their parent process.
625 */
626 if (!spawn) {
627 // <rdar://6598155> call dtrace specific function here instead of all this...
628 /*
629 * APPLE NOTE: Solaris does a sprlock() and drops the
630 * proc_lock here. We're cheating a bit and only taking
631 * the p_dtrace_sprlock lock. A full sprlock would
632 * task_suspend the parent.
633 */
634 lck_mtx_lock(&parent_proc->p_dtrace_sprlock);
635
636 /*
637 * Remove all DTrace tracepoints from the child process. We
638 * need to do this _before_ duplicating USDT providers since
639 * any associated probes may be immediately enabled.
640 */
641 if (parent_proc->p_dtrace_count > 0) {
642 dtrace_fasttrap_fork(parent_proc, child_proc);
643 }
644
645 lck_mtx_unlock(&parent_proc->p_dtrace_sprlock);
646
647 /*
648 * Duplicate any lazy dof(s). This must be done while NOT
649 * holding the parent sprlock! Lock ordering is
650 * dtrace_dof_mode_lock, then sprlock. It is imperative we
651 * always call dtrace_lazy_dofs_duplicate, rather than null
652 * check and call if !NULL. If we NULL test, during lazy dof
653 * faulting we can race with the faulting code and proceed
654 * from here to beyond the helpers copy. The lazy dof
655 * faulting will then fail to copy the helpers to the child
656 * process.
657 */
658 dtrace_lazy_dofs_duplicate(parent_proc, child_proc);
659
660 /*
661 * Duplicate any helper actions and providers. The SFORKING
662 * we set above informs the code to enable USDT probes that
663 * sprlock() may fail because the child is being forked.
664 */
665 /*
666 * APPLE NOTE: As best I can tell, Apple's sprlock() equivalent
667 * never fails to find the child. We do not set SFORKING.
668 */
669 if (parent_proc->p_dtrace_helpers != NULL && dtrace_helpers_fork) {
670 (*dtrace_helpers_fork)(parent_proc, child_proc);
671 }
672
673 }
674 #endif /* CONFIG_DTRACE */
675 if (!spawn) {
676 /*
677 * Of note, we need to initialize the bank context behind
678 * the protection of the proc_trans lock to prevent a race with exit.
679 */
680 task_bank_init(get_threadtask(child_thread));
681 }
682
683 break;
684
685 default:
686 panic("fork1 called with unknown kind %d", kind);
687 break;
688 }
689
690
691 /* return the thread pointer to the caller */
692 *child_threadp = child_thread;
693
694 bad:
695 /*
696 * In the error case, we return a 0 value for the returned pid (but
697 * it is ignored in the trampoline due to the error return); this
698 * is probably not necessary.
699 */
700 if (err) {
701 (void)chgproccnt(uid, -1);
702 }
703
704 return (err);
705 }
706
707
708 /*
709 * vfork_return
710 *
711 * Description: "Return" to parent vfork thread() following execve/_exit;
712 * this is done by reassociating the parent process structure
713 * with the task, thread, and uthread.
714 *
715 * Refer to the ASCII art above vfork() to figure out the
716 * state we're undoing.
717 *
718 * Parameters: child_proc Child process
719 * retval System call return value array
720 * rval Return value to present to parent
721 *
722 * Returns: void
723 *
724 * Notes: The caller resumes or exits the parent, as appropriate, after
725 * calling this function.
726 */
727 void
728 vfork_return(proc_t child_proc, int32_t *retval, int rval)
729 {
730 task_t parent_task = get_threadtask(child_proc->p_vforkact);
731 proc_t parent_proc = get_bsdtask_info(parent_task);
732 thread_t th = current_thread();
733 uthread_t uth = get_bsdthread_info(th);
734
735 act_thread_catt(uth->uu_userstate);
736
737 /* clear vfork state in parent proc structure */
738 proc_vfork_end(parent_proc);
739
740 /* REPATRIATE PARENT TASK, THREAD, UTHREAD */
741 uth->uu_userstate = 0;
742 uth->uu_flag &= ~UT_VFORK;
743 /* restore thread-set-id state */
744 if (uth->uu_flag & UT_WASSETUID) {
745 uth->uu_flag |= UT_SETUID;
746 uth->uu_flag &= UT_WASSETUID;
747 }
748 uth->uu_proc = 0;
749 uth->uu_sigmask = uth->uu_vforkmask;
750
751 proc_lock(child_proc);
752 child_proc->p_lflag &= ~P_LINVFORK;
753 child_proc->p_vforkact = 0;
754 proc_unlock(child_proc);
755
756 thread_set_parent(th, rval);
757
758 if (retval) {
759 retval[0] = rval;
760 retval[1] = 0; /* mark parent */
761 }
762 }
763
764
765 /*
766 * fork_create_child
767 *
768 * Description: Common operations associated with the creation of a child
769 * process
770 *
771 * Parameters: parent_task parent task
772 * parent_coalitions parent's set of coalitions
773 * child_proc child process
774 * inherit_memory TRUE, if the parents address space is
775 * to be inherited by the child
776 * is64bit TRUE, if the child being created will
777 * be associated with a 64 bit process
778 * rather than a 32 bit process
779 *
780 * Note: This code is called in the fork() case, from the execve() call
781 * graph, if implementing an execve() following a vfork(), from
782 * the posix_spawn() call graph (which implicitly includes a
783 * vfork() equivalent call, and in the system bootstrap case.
784 *
785 * It creates a new task and thread (and as a side effect of the
786 * thread creation, a uthread) in the parent coalition set, which is
787 * then associated with the process 'child'. If the parent
788 * process address space is to be inherited, then a flag
789 * indicates that the newly created task should inherit this from
790 * the child task.
791 *
792 * As a special concession to bootstrapping the initial process
793 * in the system, it's possible for 'parent_task' to be TASK_NULL;
794 * in this case, 'inherit_memory' MUST be FALSE.
795 */
796 thread_t
797 fork_create_child(task_t parent_task, coalition_t *parent_coalitions, proc_t child_proc, int inherit_memory, int is64bit)
798 {
799 thread_t child_thread = NULL;
800 task_t child_task;
801 kern_return_t result;
802
803 /* Create a new task for the child process */
804 result = task_create_internal(parent_task,
805 parent_coalitions,
806 inherit_memory,
807 is64bit,
808 &child_task);
809 if (result != KERN_SUCCESS) {
810 printf("%s: task_create_internal failed. Code: %d\n",
811 __func__, result);
812 goto bad;
813 }
814
815 /* Set the child process task to the new task */
816 child_proc->task = child_task;
817
818 /* Set child task process to child proc */
819 set_bsdtask_info(child_task, child_proc);
820
821 /* Propagate CPU limit timer from parent */
822 if (timerisset(&child_proc->p_rlim_cpu))
823 task_vtimer_set(child_task, TASK_VTIMER_RLIM);
824
825 /* Set/clear 64 bit vm_map flag */
826 if (is64bit)
827 vm_map_set_64bit(get_task_map(child_task));
828 else
829 vm_map_set_32bit(get_task_map(child_task));
830
831 /*
832 * Set child process BSD visible scheduler priority if nice value
833 * inherited from parent
834 */
835 if (child_proc->p_nice != 0)
836 resetpriority(child_proc);
837
838 /* Create a new thread for the child process */
839 result = thread_create_with_continuation(child_task, &child_thread, (thread_continue_t)proc_wait_to_return);
840 if (result != KERN_SUCCESS) {
841 printf("%s: thread_create failed. Code: %d\n",
842 __func__, result);
843 task_deallocate(child_task);
844 child_task = NULL;
845 }
846
847 /*
848 * Tag thread as being the first thread in its task.
849 */
850 thread_set_tag(child_thread, THREAD_TAG_MAINTHREAD);
851
852 bad:
853 thread_yield_internal(1);
854
855 return(child_thread);
856 }
857
858
859 /*
860 * fork
861 *
862 * Description: fork system call.
863 *
864 * Parameters: parent Parent process to fork
865 * uap (void) [unused]
866 * retval Return value
867 *
868 * Returns: 0 Success
869 * EAGAIN Resource unavailable, try again
870 *
871 * Notes: Attempts to create a new child process which inherits state
872 * from the parent process. If successful, the call returns
873 * having created an initially suspended child process with an
874 * extra Mach task and thread reference, for which the thread
875 * is initially suspended. Until we resume the child process,
876 * it is not yet running.
877 *
878 * The return information to the child is contained in the
879 * thread state structure of the new child, and does not
880 * become visible to the child through a normal return process,
881 * since it never made the call into the kernel itself in the
882 * first place.
883 *
884 * After resuming the thread, this function returns directly to
885 * the parent process which invoked the fork() system call.
886 *
887 * Important: The child thread_resume occurs before the parent returns;
888 * depending on scheduling latency, this means that it is not
889 * deterministic as to whether the parent or child is scheduled
890 * to run first. It is entirely possible that the child could
891 * run to completion prior to the parent running.
892 */
893 int
894 fork(proc_t parent_proc, __unused struct fork_args *uap, int32_t *retval)
895 {
896 thread_t child_thread;
897 int err;
898
899 retval[1] = 0; /* flag parent return for user space */
900
901 if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_FORK, NULL)) == 0) {
902 task_t child_task;
903 proc_t child_proc;
904
905 /* Return to the parent */
906 child_proc = (proc_t)get_bsdthreadtask_info(child_thread);
907 retval[0] = child_proc->p_pid;
908
909 /*
910 * Drop the signal lock on the child which was taken on our
911 * behalf by forkproc()/cloneproc() to prevent signals being
912 * received by the child in a partially constructed state.
913 */
914 proc_signalend(child_proc, 0);
915 proc_transend(child_proc, 0);
916
917 /* flag the fork has occurred */
918 proc_knote(parent_proc, NOTE_FORK | child_proc->p_pid);
919 DTRACE_PROC1(create, proc_t, child_proc);
920
921 #if CONFIG_DTRACE
922 if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL)
923 (*dtrace_proc_waitfor_hook)(child_proc);
924 #endif
925
926 /* "Return" to the child */
927 proc_clear_return_wait(child_proc, child_thread);
928
929 /* drop the extra references we got during the creation */
930 if ((child_task = (task_t)get_threadtask(child_thread)) != NULL) {
931 task_deallocate(child_task);
932 }
933 thread_deallocate(child_thread);
934 }
935
936 return(err);
937 }
938
939
940 /*
941 * cloneproc
942 *
943 * Description: Create a new process from a specified process.
944 *
945 * Parameters: parent_task The parent task to be cloned, or
946 * TASK_NULL is task characteristics
947 * are not to be inherited
948 * be cloned, or TASK_NULL if the new
949 * task is not to inherit the VM
950 * characteristics of the parent
951 * parent_proc The parent process to be cloned
952 * inherit_memory True if the child is to inherit
953 * memory from the parent; if this is
954 * non-NULL, then the parent_task must
955 * also be non-NULL
956 * memstat_internal Whether to track the process in the
957 * jetsam priority list (if configured)
958 *
959 * Returns: !NULL pointer to new child thread
960 * NULL Failure (unspecified)
961 *
962 * Note: On return newly created child process has signal lock held
963 * to block delivery of signal to it if called with lock set.
964 * fork() code needs to explicity remove this lock before
965 * signals can be delivered
966 *
967 * In the case of bootstrap, this function can be called from
968 * bsd_utaskbootstrap() in order to bootstrap the first process;
969 * the net effect is to provide a uthread structure for the
970 * kernel process associated with the kernel task.
971 *
972 * XXX: Tristating using the value parent_task as the major key
973 * and inherit_memory as the minor key is something we should
974 * refactor later; we owe the current semantics, ultimately,
975 * to the semantics of task_create_internal. For now, we will
976 * live with this being somewhat awkward.
977 */
978 thread_t
979 cloneproc(task_t parent_task, coalition_t *parent_coalitions, proc_t parent_proc, int inherit_memory, int memstat_internal)
980 {
981 #if !CONFIG_MEMORYSTATUS
982 #pragma unused(memstat_internal)
983 #endif
984 task_t child_task;
985 proc_t child_proc;
986 thread_t child_thread = NULL;
987
988 if ((child_proc = forkproc(parent_proc)) == NULL) {
989 /* Failed to allocate new process */
990 goto bad;
991 }
992
993 child_thread = fork_create_child(parent_task, parent_coalitions, child_proc, inherit_memory, (parent_task == TASK_NULL) ? FALSE : (parent_proc->p_flag & P_LP64));
994
995 if (child_thread == NULL) {
996 /*
997 * Failed to create thread; now we must deconstruct the new
998 * process previously obtained from forkproc().
999 */
1000 forkproc_free(child_proc);
1001 goto bad;
1002 }
1003
1004 child_task = get_threadtask(child_thread);
1005 if (parent_proc->p_flag & P_LP64) {
1006 task_set_64bit(child_task, TRUE);
1007 OSBitOrAtomic(P_LP64, (UInt32 *)&child_proc->p_flag);
1008 } else {
1009 task_set_64bit(child_task, FALSE);
1010 OSBitAndAtomic(~((uint32_t)P_LP64), (UInt32 *)&child_proc->p_flag);
1011 }
1012
1013 #if CONFIG_MEMORYSTATUS
1014 if (memstat_internal) {
1015 proc_list_lock();
1016 child_proc->p_memstat_state |= P_MEMSTAT_INTERNAL;
1017 proc_list_unlock();
1018 }
1019 #endif
1020
1021 /* make child visible */
1022 pinsertchild(parent_proc, child_proc);
1023
1024 /*
1025 * Make child runnable, set start time.
1026 */
1027 child_proc->p_stat = SRUN;
1028 bad:
1029 return(child_thread);
1030 }
1031
1032
1033 /*
1034 * Destroy a process structure that resulted from a call to forkproc(), but
1035 * which must be returned to the system because of a subsequent failure
1036 * preventing it from becoming active.
1037 *
1038 * Parameters: p The incomplete process from forkproc()
1039 *
1040 * Returns: (void)
1041 *
1042 * Note: This function should only be used in an error handler following
1043 * a call to forkproc().
1044 *
1045 * Operations occur in reverse order of those in forkproc().
1046 */
1047 void
1048 forkproc_free(proc_t p)
1049 {
1050
1051 /* We held signal and a transition locks; drop them */
1052 proc_signalend(p, 0);
1053 proc_transend(p, 0);
1054
1055 /*
1056 * If we have our own copy of the resource limits structure, we
1057 * need to free it. If it's a shared copy, we need to drop our
1058 * reference on it.
1059 */
1060 proc_limitdrop(p, 0);
1061 p->p_limit = NULL;
1062
1063 #if SYSV_SHM
1064 /* Need to drop references to the shared memory segment(s), if any */
1065 if (p->vm_shm) {
1066 /*
1067 * Use shmexec(): we have no address space, so no mappings
1068 *
1069 * XXX Yes, the routine is badly named.
1070 */
1071 shmexec(p);
1072 }
1073 #endif
1074
1075 /* Need to undo the effects of the fdcopy(), if any */
1076 fdfree(p);
1077
1078 /*
1079 * Drop the reference on a text vnode pointer, if any
1080 * XXX This code is broken in forkproc(); see <rdar://4256419>;
1081 * XXX if anyone ever uses this field, we will be extremely unhappy.
1082 */
1083 if (p->p_textvp) {
1084 vnode_rele(p->p_textvp);
1085 p->p_textvp = NULL;
1086 }
1087
1088 /* Stop the profiling clock */
1089 stopprofclock(p);
1090
1091 /* Update the audit session proc count */
1092 AUDIT_SESSION_PROCEXIT(p);
1093
1094 /* Release the credential reference */
1095 kauth_cred_unref(&p->p_ucred);
1096
1097 proc_list_lock();
1098 /* Decrement the count of processes in the system */
1099 nprocs--;
1100 proc_list_unlock();
1101
1102 thread_call_free(p->p_rcall);
1103
1104 /* Free allocated memory */
1105 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
1106 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
1107 proc_checkdeadrefs(p);
1108 FREE_ZONE(p, sizeof *p, M_PROC);
1109 }
1110
1111
1112 /*
1113 * forkproc
1114 *
1115 * Description: Create a new process structure, given a parent process
1116 * structure.
1117 *
1118 * Parameters: parent_proc The parent process
1119 *
1120 * Returns: !NULL The new process structure
1121 * NULL Error (insufficient free memory)
1122 *
1123 * Note: When successful, the newly created process structure is
1124 * partially initialized; if a caller needs to deconstruct the
1125 * returned structure, they must call forkproc_free() to do so.
1126 */
1127 proc_t
1128 forkproc(proc_t parent_proc)
1129 {
1130 proc_t child_proc; /* Our new process */
1131 static int nextpid = 0, pidwrap = 0, nextpidversion = 0;
1132 static uint64_t nextuniqueid = 0;
1133 int error = 0;
1134 struct session *sessp;
1135 uthread_t parent_uthread = (uthread_t)get_bsdthread_info(current_thread());
1136
1137 MALLOC_ZONE(child_proc, proc_t , sizeof *child_proc, M_PROC, M_WAITOK);
1138 if (child_proc == NULL) {
1139 printf("forkproc: M_PROC zone exhausted\n");
1140 goto bad;
1141 }
1142 /* zero it out as we need to insert in hash */
1143 bzero(child_proc, sizeof *child_proc);
1144
1145 MALLOC_ZONE(child_proc->p_stats, struct pstats *,
1146 sizeof *child_proc->p_stats, M_PSTATS, M_WAITOK);
1147 if (child_proc->p_stats == NULL) {
1148 printf("forkproc: M_SUBPROC zone exhausted (p_stats)\n");
1149 FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
1150 child_proc = NULL;
1151 goto bad;
1152 }
1153 MALLOC_ZONE(child_proc->p_sigacts, struct sigacts *,
1154 sizeof *child_proc->p_sigacts, M_SIGACTS, M_WAITOK);
1155 if (child_proc->p_sigacts == NULL) {
1156 printf("forkproc: M_SUBPROC zone exhausted (p_sigacts)\n");
1157 FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS);
1158 FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
1159 child_proc = NULL;
1160 goto bad;
1161 }
1162
1163 /* allocate a callout for use by interval timers */
1164 child_proc->p_rcall = thread_call_allocate((thread_call_func_t)realitexpire, child_proc);
1165 if (child_proc->p_rcall == NULL) {
1166 FREE_ZONE(child_proc->p_sigacts, sizeof *child_proc->p_sigacts, M_SIGACTS);
1167 FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS);
1168 FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
1169 child_proc = NULL;
1170 goto bad;
1171 }
1172
1173
1174 /*
1175 * Find an unused PID.
1176 */
1177
1178 proc_list_lock();
1179
1180 nextpid++;
1181 retry:
1182 /*
1183 * If the process ID prototype has wrapped around,
1184 * restart somewhat above 0, as the low-numbered procs
1185 * tend to include daemons that don't exit.
1186 */
1187 if (nextpid >= PID_MAX) {
1188 nextpid = 100;
1189 pidwrap = 1;
1190 }
1191 if (pidwrap != 0) {
1192
1193 /* if the pid stays in hash both for zombie and runniing state */
1194 if (pfind_locked(nextpid) != PROC_NULL) {
1195 nextpid++;
1196 goto retry;
1197 }
1198
1199 if (pgfind_internal(nextpid) != PGRP_NULL) {
1200 nextpid++;
1201 goto retry;
1202 }
1203 if (session_find_internal(nextpid) != SESSION_NULL) {
1204 nextpid++;
1205 goto retry;
1206 }
1207 }
1208 nprocs++;
1209 child_proc->p_pid = nextpid;
1210 child_proc->p_responsible_pid = nextpid; /* initially responsible for self */
1211 child_proc->p_idversion = nextpidversion++;
1212 /* kernel process is handcrafted and not from fork, so start from 1 */
1213 child_proc->p_uniqueid = ++nextuniqueid;
1214 #if 1
1215 if (child_proc->p_pid != 0) {
1216 if (pfind_locked(child_proc->p_pid) != PROC_NULL)
1217 panic("proc in the list already\n");
1218 }
1219 #endif
1220 /* Insert in the hash */
1221 child_proc->p_listflag |= (P_LIST_INHASH | P_LIST_INCREATE);
1222 LIST_INSERT_HEAD(PIDHASH(child_proc->p_pid), child_proc, p_hash);
1223 proc_list_unlock();
1224
1225
1226 /*
1227 * We've identified the PID we are going to use; initialize the new
1228 * process structure.
1229 */
1230 child_proc->p_stat = SIDL;
1231 child_proc->p_pgrpid = PGRPID_DEAD;
1232
1233 /*
1234 * The zero'ing of the proc was at the allocation time due to need
1235 * for insertion to hash. Copy the section that is to be copied
1236 * directly from the parent.
1237 */
1238 bcopy(&parent_proc->p_startcopy, &child_proc->p_startcopy,
1239 (unsigned) ((caddr_t)&child_proc->p_endcopy - (caddr_t)&child_proc->p_startcopy));
1240
1241 /*
1242 * Some flags are inherited from the parent.
1243 * Duplicate sub-structures as needed.
1244 * Increase reference counts on shared objects.
1245 * The p_stats and p_sigacts substructs are set in vm_fork.
1246 */
1247 child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_DELAYIDLESLEEP | P_SUGID));
1248 if (parent_proc->p_flag & P_PROFIL)
1249 startprofclock(child_proc);
1250
1251 child_proc->p_vfs_iopolicy = (parent_proc->p_vfs_iopolicy & (P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY));
1252
1253 /*
1254 * Note that if the current thread has an assumed identity, this
1255 * credential will be granted to the new process.
1256 */
1257 child_proc->p_ucred = kauth_cred_get_with_ref();
1258 /* update cred on proc */
1259 PROC_UPDATE_CREDS_ONPROC(child_proc);
1260 /* update audit session proc count */
1261 AUDIT_SESSION_PROCNEW(child_proc);
1262
1263 #if CONFIG_FINE_LOCK_GROUPS
1264 lck_mtx_init(&child_proc->p_mlock, proc_mlock_grp, proc_lck_attr);
1265 lck_mtx_init(&child_proc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr);
1266 lck_mtx_init(&child_proc->p_ucred_mlock, proc_ucred_mlock_grp, proc_lck_attr);
1267 #if CONFIG_DTRACE
1268 lck_mtx_init(&child_proc->p_dtrace_sprlock, proc_lck_grp, proc_lck_attr);
1269 #endif
1270 lck_spin_init(&child_proc->p_slock, proc_slock_grp, proc_lck_attr);
1271 #else /* !CONFIG_FINE_LOCK_GROUPS */
1272 lck_mtx_init(&child_proc->p_mlock, proc_lck_grp, proc_lck_attr);
1273 lck_mtx_init(&child_proc->p_fdmlock, proc_lck_grp, proc_lck_attr);
1274 lck_mtx_init(&child_proc->p_ucred_mlock, proc_lck_grp, proc_lck_attr);
1275 #if CONFIG_DTRACE
1276 lck_mtx_init(&child_proc->p_dtrace_sprlock, proc_lck_grp, proc_lck_attr);
1277 #endif
1278 lck_spin_init(&child_proc->p_slock, proc_lck_grp, proc_lck_attr);
1279 #endif /* !CONFIG_FINE_LOCK_GROUPS */
1280 klist_init(&child_proc->p_klist);
1281
1282 if (child_proc->p_textvp != NULLVP) {
1283 /* bump references to the text vnode */
1284 /* Need to hold iocount across the ref call */
1285 if (vnode_getwithref(child_proc->p_textvp) == 0) {
1286 error = vnode_ref(child_proc->p_textvp);
1287 vnode_put(child_proc->p_textvp);
1288 if (error != 0)
1289 child_proc->p_textvp = NULLVP;
1290 }
1291 }
1292
1293 /*
1294 * Copy the parents per process open file table to the child; if
1295 * there is a per-thread current working directory, set the childs
1296 * per-process current working directory to that instead of the
1297 * parents.
1298 *
1299 * XXX may fail to copy descriptors to child
1300 */
1301 child_proc->p_fd = fdcopy(parent_proc, parent_uthread->uu_cdir);
1302
1303 #if SYSV_SHM
1304 if (parent_proc->vm_shm) {
1305 /* XXX may fail to attach shm to child */
1306 (void)shmfork(parent_proc, child_proc);
1307 }
1308 #endif
1309 /*
1310 * inherit the limit structure to child
1311 */
1312 proc_limitfork(parent_proc, child_proc);
1313
1314 if (child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1315 uint64_t rlim_cur = child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur;
1316 child_proc->p_rlim_cpu.tv_sec = (rlim_cur > __INT_MAX__) ? __INT_MAX__ : rlim_cur;
1317 }
1318
1319 /* Intialize new process stats, including start time */
1320 /* <rdar://6640543> non-zeroed portion contains garbage AFAICT */
1321 bzero(child_proc->p_stats, sizeof(*child_proc->p_stats));
1322 microtime_with_abstime(&child_proc->p_start, &child_proc->p_stats->ps_start);
1323
1324 if (parent_proc->p_sigacts != NULL)
1325 (void)memcpy(child_proc->p_sigacts,
1326 parent_proc->p_sigacts, sizeof *child_proc->p_sigacts);
1327 else
1328 (void)memset(child_proc->p_sigacts, 0, sizeof *child_proc->p_sigacts);
1329
1330 sessp = proc_session(parent_proc);
1331 if (sessp->s_ttyvp != NULL && parent_proc->p_flag & P_CONTROLT)
1332 OSBitOrAtomic(P_CONTROLT, &child_proc->p_flag);
1333 session_rele(sessp);
1334
1335 /*
1336 * block all signals to reach the process.
1337 * no transition race should be occuring with the child yet,
1338 * but indicate that the process is in (the creation) transition.
1339 */
1340 proc_signalstart(child_proc, 0);
1341 proc_transstart(child_proc, 0, 0);
1342 proc_set_return_wait(child_proc);
1343
1344 child_proc->p_pcaction = 0;
1345
1346 TAILQ_INIT(&child_proc->p_uthlist);
1347 TAILQ_INIT(&child_proc->p_aio_activeq);
1348 TAILQ_INIT(&child_proc->p_aio_doneq);
1349
1350 /* Inherit the parent flags for code sign */
1351 child_proc->p_csflags = (parent_proc->p_csflags & ~CS_KILLED);
1352
1353 /*
1354 * All processes have work queue locks; cleaned up by
1355 * reap_child_locked()
1356 */
1357 workqueue_init_lock(child_proc);
1358
1359 /*
1360 * Copy work queue information
1361 *
1362 * Note: This should probably only happen in the case where we are
1363 * creating a child that is a copy of the parent; since this
1364 * routine is called in the non-duplication case of vfork()
1365 * or posix_spawn(), then this information should likely not
1366 * be duplicated.
1367 *
1368 * <rdar://6640553> Work queue pointers that no longer point to code
1369 */
1370 child_proc->p_wqthread = parent_proc->p_wqthread;
1371 child_proc->p_threadstart = parent_proc->p_threadstart;
1372 child_proc->p_pthsize = parent_proc->p_pthsize;
1373 child_proc->p_targconc = parent_proc->p_targconc;
1374 if ((parent_proc->p_lflag & P_LREGISTER) != 0) {
1375 child_proc->p_lflag |= P_LREGISTER;
1376 }
1377 child_proc->p_wqkqueue = NULL;
1378 child_proc->p_dispatchqueue_offset = parent_proc->p_dispatchqueue_offset;
1379 child_proc->p_dispatchqueue_serialno_offset = parent_proc->p_dispatchqueue_serialno_offset;
1380 #if PSYNCH
1381 pth_proc_hashinit(child_proc);
1382 #endif /* PSYNCH */
1383
1384 #if CONFIG_PERSONAS
1385 child_proc->p_persona = NULL;
1386 error = persona_proc_inherit(child_proc, parent_proc);
1387 if (error != 0) {
1388 printf("forkproc: persona_proc_inherit failed (persona %d being destroyed?)\n", persona_get_uid(parent_proc->p_persona));
1389 forkproc_free(child_proc);
1390 child_proc = NULL;
1391 goto bad;
1392 }
1393 #endif
1394
1395 #if CONFIG_MEMORYSTATUS
1396 /* Memorystatus + jetsam init */
1397 child_proc->p_memstat_state = 0;
1398 child_proc->p_memstat_effectivepriority = JETSAM_PRIORITY_DEFAULT;
1399 child_proc->p_memstat_requestedpriority = JETSAM_PRIORITY_DEFAULT;
1400 child_proc->p_memstat_userdata = 0;
1401 #if CONFIG_FREEZE
1402 child_proc->p_memstat_suspendedfootprint = 0;
1403 #endif
1404 child_proc->p_memstat_dirty = 0;
1405 child_proc->p_memstat_idledeadline = 0;
1406 #endif /* CONFIG_MEMORYSTATUS */
1407
1408 bad:
1409 return(child_proc);
1410 }
1411
1412 void
1413 proc_lock(proc_t p)
1414 {
1415 lck_mtx_assert(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
1416 lck_mtx_lock(&p->p_mlock);
1417 }
1418
1419 void
1420 proc_unlock(proc_t p)
1421 {
1422 lck_mtx_unlock(&p->p_mlock);
1423 }
1424
1425 void
1426 proc_spinlock(proc_t p)
1427 {
1428 lck_spin_lock(&p->p_slock);
1429 }
1430
1431 void
1432 proc_spinunlock(proc_t p)
1433 {
1434 lck_spin_unlock(&p->p_slock);
1435 }
1436
1437 void
1438 proc_list_lock(void)
1439 {
1440 lck_mtx_lock(proc_list_mlock);
1441 }
1442
1443 void
1444 proc_list_unlock(void)
1445 {
1446 lck_mtx_unlock(proc_list_mlock);
1447 }
1448
1449 void
1450 proc_ucred_lock(proc_t p)
1451 {
1452 lck_mtx_lock(&p->p_ucred_mlock);
1453 }
1454
1455 void
1456 proc_ucred_unlock(proc_t p)
1457 {
1458 lck_mtx_unlock(&p->p_ucred_mlock);
1459 }
1460
1461 #include <kern/zalloc.h>
1462
1463 struct zone *uthread_zone;
1464 static int uthread_zone_inited = 0;
1465
1466 static void
1467 uthread_zone_init(void)
1468 {
1469 if (!uthread_zone_inited) {
1470 uthread_zone = zinit(sizeof(struct uthread),
1471 thread_max * sizeof(struct uthread),
1472 THREAD_CHUNK * sizeof(struct uthread),
1473 "uthreads");
1474 uthread_zone_inited = 1;
1475 }
1476 }
1477
1478 void *
1479 uthread_alloc(task_t task, thread_t thread, int noinherit)
1480 {
1481 proc_t p;
1482 uthread_t uth;
1483 uthread_t uth_parent;
1484 void *ut;
1485
1486 if (!uthread_zone_inited)
1487 uthread_zone_init();
1488
1489 ut = (void *)zalloc(uthread_zone);
1490 bzero(ut, sizeof(struct uthread));
1491
1492 p = (proc_t) get_bsdtask_info(task);
1493 uth = (uthread_t)ut;
1494 uth->uu_thread = thread;
1495
1496 /*
1497 * Thread inherits credential from the creating thread, if both
1498 * are in the same task.
1499 *
1500 * If the creating thread has no credential or is from another
1501 * task we can leave the new thread credential NULL. If it needs
1502 * one later, it will be lazily assigned from the task's process.
1503 */
1504 uth_parent = (uthread_t)get_bsdthread_info(current_thread());
1505 if ((noinherit == 0) && task == current_task() &&
1506 uth_parent != NULL &&
1507 IS_VALID_CRED(uth_parent->uu_ucred)) {
1508 /*
1509 * XXX The new thread is, in theory, being created in context
1510 * XXX of parent thread, so a direct reference to the parent
1511 * XXX is OK.
1512 */
1513 kauth_cred_ref(uth_parent->uu_ucred);
1514 uth->uu_ucred = uth_parent->uu_ucred;
1515 /* the credential we just inherited is an assumed credential */
1516 if (uth_parent->uu_flag & UT_SETUID)
1517 uth->uu_flag |= UT_SETUID;
1518 } else {
1519 /* sometimes workqueue threads are created out task context */
1520 if ((task != kernel_task) && (p != PROC_NULL))
1521 uth->uu_ucred = kauth_cred_proc_ref(p);
1522 else
1523 uth->uu_ucred = NOCRED;
1524 }
1525
1526
1527 if ((task != kernel_task) && p) {
1528
1529 proc_lock(p);
1530 if (noinherit != 0) {
1531 /* workq threads will not inherit masks */
1532 uth->uu_sigmask = ~workq_threadmask;
1533 } else if (uth_parent) {
1534 if (uth_parent->uu_flag & UT_SAS_OLDMASK)
1535 uth->uu_sigmask = uth_parent->uu_oldmask;
1536 else
1537 uth->uu_sigmask = uth_parent->uu_sigmask;
1538 }
1539 uth->uu_context.vc_thread = thread;
1540 TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list);
1541 proc_unlock(p);
1542
1543 #if CONFIG_DTRACE
1544 if (p->p_dtrace_ptss_pages != NULL) {
1545 uth->t_dtrace_scratch = dtrace_ptss_claim_entry(p);
1546 }
1547 #endif
1548 }
1549
1550 return (ut);
1551 }
1552
1553 /*
1554 * This routine frees the thread name field of the uthread_t structure. Split out of
1555 * uthread_cleanup() so it can be called separately on the threads of a corpse after
1556 * the corpse notification has been sent, and the handler has had a chance to extract
1557 * the thread names.
1558 */
1559 void
1560 uthread_cleanup_name(void *uthread)
1561 {
1562 uthread_t uth = (uthread_t)uthread;
1563
1564 /*
1565 * <rdar://17834538>
1566 * Set pth_name to NULL before calling free().
1567 * Previously there was a race condition in the
1568 * case this code was executing during a stackshot
1569 * where the stackshot could try and copy pth_name
1570 * after it had been freed and before if was marked
1571 * as null.
1572 */
1573 if (uth->pth_name != NULL) {
1574 void *pth_name = uth->pth_name;
1575 uth->pth_name = NULL;
1576 kfree(pth_name, MAXTHREADNAMESIZE);
1577 }
1578 return;
1579 }
1580
1581 /*
1582 * This routine frees all the BSD context in uthread except the credential.
1583 * It does not free the uthread structure as well
1584 */
1585 void
1586 uthread_cleanup(task_t task, void *uthread, void * bsd_info, boolean_t is_corpse)
1587 {
1588 struct _select *sel;
1589 uthread_t uth = (uthread_t)uthread;
1590 proc_t p = (proc_t)bsd_info;
1591
1592 #if PROC_REF_DEBUG
1593 if (__improbable(uthread_get_proc_refcount(uthread) != 0)) {
1594 panic("uthread_cleanup called for uthread %p with uu_proc_refcount != 0", uthread);
1595 }
1596 #endif
1597
1598 if (uth->uu_lowpri_window || uth->uu_throttle_info) {
1599 /*
1600 * task is marked as a low priority I/O type
1601 * and we've somehow managed to not dismiss the throttle
1602 * through the normal exit paths back to user space...
1603 * no need to throttle this thread since its going away
1604 * but we do need to update our bookeeping w/r to throttled threads
1605 *
1606 * Calling this routine will clean up any throttle info reference
1607 * still inuse by the thread.
1608 */
1609 throttle_lowpri_io(0);
1610 }
1611 /*
1612 * Per-thread audit state should never last beyond system
1613 * call return. Since we don't audit the thread creation/
1614 * removal, the thread state pointer should never be
1615 * non-NULL when we get here.
1616 */
1617 assert(uth->uu_ar == NULL);
1618
1619 sel = &uth->uu_select;
1620 /* cleanup the select bit space */
1621 if (sel->nbytes) {
1622 FREE(sel->ibits, M_TEMP);
1623 FREE(sel->obits, M_TEMP);
1624 sel->nbytes = 0;
1625 }
1626
1627 if (uth->uu_cdir) {
1628 vnode_rele(uth->uu_cdir);
1629 uth->uu_cdir = NULLVP;
1630 }
1631
1632 if (uth->uu_wqset) {
1633 if (waitq_set_is_valid(uth->uu_wqset))
1634 waitq_set_deinit(uth->uu_wqset);
1635 FREE(uth->uu_wqset, M_SELECT);
1636 uth->uu_wqset = NULL;
1637 uth->uu_wqstate_sz = 0;
1638 }
1639
1640 /*
1641 * defer the removal of the thread name on process corpses until the corpse has
1642 * been autopsied.
1643 */
1644 if (!is_corpse) {
1645 uthread_cleanup_name(uth);
1646 }
1647
1648 if ((task != kernel_task) && p) {
1649
1650 if (((uth->uu_flag & UT_VFORK) == UT_VFORK) && (uth->uu_proc != PROC_NULL)) {
1651 vfork_exit_internal(uth->uu_proc, 0, 1);
1652 }
1653 /*
1654 * Remove the thread from the process list and
1655 * transfer [appropriate] pending signals to the process.
1656 */
1657 if (get_bsdtask_info(task) == p) {
1658 proc_lock(p);
1659 TAILQ_REMOVE(&p->p_uthlist, uth, uu_list);
1660 p->p_siglist |= (uth->uu_siglist & execmask & (~p->p_sigignore | sigcantmask));
1661 proc_unlock(p);
1662 }
1663 #if CONFIG_DTRACE
1664 struct dtrace_ptss_page_entry *tmpptr = uth->t_dtrace_scratch;
1665 uth->t_dtrace_scratch = NULL;
1666 if (tmpptr != NULL) {
1667 dtrace_ptss_release_entry(p, tmpptr);
1668 }
1669 #endif
1670 }
1671 }
1672
1673 /* This routine releases the credential stored in uthread */
1674 void
1675 uthread_cred_free(void *uthread)
1676 {
1677 uthread_t uth = (uthread_t)uthread;
1678
1679 /* and free the uthread itself */
1680 if (IS_VALID_CRED(uth->uu_ucred)) {
1681 kauth_cred_t oldcred = uth->uu_ucred;
1682 uth->uu_ucred = NOCRED;
1683 kauth_cred_unref(&oldcred);
1684 }
1685 }
1686
1687 /* This routine frees the uthread structure held in thread structure */
1688 void
1689 uthread_zone_free(void *uthread)
1690 {
1691 uthread_t uth = (uthread_t)uthread;
1692
1693 if (uth->t_tombstone) {
1694 kfree(uth->t_tombstone, sizeof(struct doc_tombstone));
1695 uth->t_tombstone = NULL;
1696 }
1697
1698 /* and free the uthread itself */
1699 zfree(uthread_zone, uthread);
1700 }