]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_fork.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / kern_fork.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_fork.c 8.8 (Berkeley) 2/14/95
67 */
68 /*
69 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74 /*
75 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
76 * support for mandatory and extensible security protections. This notice
77 * is included in support of clause 2.2 (b) of the Apple Public License,
78 * Version 2.0.
79 */
80
81 #include <kern/assert.h>
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/filedesc.h>
85 #include <sys/kernel.h>
86 #include <sys/malloc.h>
87 #include <sys/proc_internal.h>
88 #include <sys/kauth.h>
89 #include <sys/user.h>
90 #include <sys/reason.h>
91 #include <sys/resourcevar.h>
92 #include <sys/vnode_internal.h>
93 #include <sys/file_internal.h>
94 #include <sys/acct.h>
95 #include <sys/codesign.h>
96 #include <sys/sysproto.h>
97 #if CONFIG_PERSONAS
98 #include <sys/persona.h>
99 #endif
100 #include <sys/doc_tombstone.h>
101 #if CONFIG_DTRACE
102 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
103 extern void (*dtrace_proc_waitfor_exec_ptr)(proc_t);
104 extern void dtrace_proc_fork(proc_t, proc_t, int);
105
106 /*
107 * Since dtrace_proc_waitfor_exec_ptr can be added/removed in dtrace_subr.c,
108 * we will store its value before actually calling it.
109 */
110 static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL;
111
112 #include <sys/dtrace_ptss.h>
113 #endif
114
115 #include <security/audit/audit.h>
116
117 #include <mach/mach_types.h>
118 #include <kern/coalition.h>
119 #include <kern/kern_types.h>
120 #include <kern/kalloc.h>
121 #include <kern/mach_param.h>
122 #include <kern/task.h>
123 #include <kern/thread.h>
124 #include <kern/thread_call.h>
125 #include <kern/zalloc.h>
126
127 #include <os/log.h>
128
129 #if CONFIG_MACF
130 #include <security/mac_framework.h>
131 #include <security/mac_mach_internal.h>
132 #endif
133
134 #include <vm/vm_map.h>
135 #include <vm/vm_protos.h>
136 #include <vm/vm_shared_region.h>
137
138 #include <sys/shm_internal.h> /* for shmfork() */
139 #include <mach/task.h> /* for thread_create() */
140 #include <mach/thread_act.h> /* for thread_resume() */
141
142 #include <sys/sdt.h>
143
144 #if CONFIG_MEMORYSTATUS
145 #include <sys/kern_memorystatus.h>
146 #endif
147
148 /* XXX routines which should have Mach prototypes, but don't */
149 void thread_set_parent(thread_t parent, int pid);
150 extern void act_thread_catt(void *ctx);
151 void thread_set_child(thread_t child, int pid);
152 void *act_thread_csave(void);
153 extern boolean_t task_is_exec_copy(task_t);
154 int nextpidversion = 0;
155
156
157 thread_t cloneproc(task_t, coalition_t *, proc_t, int, int);
158 proc_t forkproc(proc_t);
159 void forkproc_free(proc_t);
160 thread_t fork_create_child(task_t parent_task,
161 coalition_t *parent_coalitions,
162 proc_t child,
163 int inherit_memory,
164 int is_64bit_addr,
165 int is_64bit_data,
166 int in_exec);
167 void proc_vfork_begin(proc_t parent_proc);
168 void proc_vfork_end(proc_t parent_proc);
169
170 static LCK_GRP_DECLARE(rethrottle_lock_grp, "rethrottle");
171 static ZONE_DECLARE(uthread_zone, "uthreads",
172 sizeof(struct uthread), ZC_ZFREE_CLEARMEM);
173
174 SECURITY_READ_ONLY_LATE(zone_t) proc_zone;
175 ZONE_INIT(&proc_zone, "proc", sizeof(struct proc), ZC_ZFREE_CLEARMEM,
176 ZONE_ID_PROC, NULL);
177
178 ZONE_DECLARE(proc_stats_zone, "pstats",
179 sizeof(struct pstats), ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
180
181 ZONE_DECLARE(proc_sigacts_zone, "sigacts",
182 sizeof(struct sigacts), ZC_NOENCRYPT);
183
184 #define DOFORK 0x1 /* fork() system call */
185 #define DOVFORK 0x2 /* vfork() system call */
186
187 /*
188 * proc_vfork_begin
189 *
190 * Description: start a vfork on a process
191 *
192 * Parameters: parent_proc process (re)entering vfork state
193 *
194 * Returns: (void)
195 *
196 * Notes: Although this function increments a count, a count in
197 * excess of 1 is not currently supported. According to the
198 * POSIX standard, calling anything other than execve() or
199 * _exit() following a vfork(), including calling vfork()
200 * itself again, will result in undefined behaviour
201 */
202 void
203 proc_vfork_begin(proc_t parent_proc)
204 {
205 proc_lock(parent_proc);
206 parent_proc->p_lflag |= P_LVFORK;
207 parent_proc->p_vforkcnt++;
208 proc_unlock(parent_proc);
209 }
210
211 /*
212 * proc_vfork_end
213 *
214 * Description: stop a vfork on a process
215 *
216 * Parameters: parent_proc process leaving vfork state
217 *
218 * Returns: (void)
219 *
220 * Notes: Decrements the count; currently, reentrancy of vfork()
221 * is unsupported on the current process
222 */
223 void
224 proc_vfork_end(proc_t parent_proc)
225 {
226 proc_lock(parent_proc);
227 parent_proc->p_vforkcnt--;
228 if (parent_proc->p_vforkcnt < 0) {
229 panic("vfork cnt is -ve");
230 }
231 if (parent_proc->p_vforkcnt == 0) {
232 parent_proc->p_lflag &= ~P_LVFORK;
233 }
234 proc_unlock(parent_proc);
235 }
236
237
238 /*
239 * vfork
240 *
241 * Description: vfork system call
242 *
243 * Parameters: void [no arguments]
244 *
245 * Retval: 0 (to child process)
246 * !0 pid of child (to parent process)
247 * -1 error (see "Returns:")
248 *
249 * Returns: EAGAIN Administrative limit reached
250 * EINVAL vfork() called during vfork()
251 * ENOMEM Failed to allocate new process
252 *
253 * Note: After a successful call to this function, the parent process
254 * has its task, thread, and uthread lent to the child process,
255 * and control is returned to the caller; if this function is
256 * invoked as a system call, the return is to user space, and
257 * is effectively running on the child process.
258 *
259 * Subsequent calls that operate on process state are permitted,
260 * though discouraged, and will operate on the child process; any
261 * operations on the task, thread, or uthread will result in
262 * changes in the parent state, and, if inheritable, the child
263 * state, when a task, thread, and uthread are realized for the
264 * child process at execve() time, will also be effected. Given
265 * this, it's recemmended that people use the posix_spawn() call
266 * instead.
267 *
268 * BLOCK DIAGRAM OF VFORK
269 *
270 * Before:
271 *
272 * ,----------------. ,-------------.
273 * | | task | |
274 * | parent_thread | ------> | parent_task |
275 * | | <.list. | |
276 * `----------------' `-------------'
277 * uthread | ^ bsd_info | ^
278 * v | vc_thread v | task
279 * ,----------------. ,-------------.
280 * | | | |
281 * | parent_uthread | <.list. | parent_proc | <-- current_proc()
282 * | | | |
283 * `----------------' `-------------'
284 * uu_proc |
285 * v
286 * NULL
287 *
288 * After:
289 *
290 * ,----------------. ,-------------.
291 * | | task | |
292 * ,----> | parent_thread | ------> | parent_task |
293 * | | | <.list. | |
294 * | `----------------' `-------------'
295 * | uthread | ^ bsd_info | ^
296 * | v | vc_thread v | task
297 * | ,----------------. ,-------------.
298 * | | | | |
299 * | | parent_uthread | <.list. | parent_proc |
300 * | | | | |
301 * | `----------------' `-------------'
302 * | uu_proc | . list
303 * | v v
304 * | ,----------------.
305 * `----- | |
306 * p_vforkact | child_proc | <-- current_proc()
307 * | |
308 * `----------------'
309 */
310 int
311 vfork(proc_t parent_proc, __unused struct vfork_args *uap, int32_t *retval)
312 {
313 thread_t child_thread;
314 int err;
315
316 if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_VFORK, NULL)) != 0) {
317 retval[1] = 0;
318 } else {
319 uthread_t ut = get_bsdthread_info(current_thread());
320 proc_t child_proc = ut->uu_proc;
321
322 retval[0] = child_proc->p_pid;
323 retval[1] = 1; /* flag child return for user space */
324
325 /*
326 * Drop the signal lock on the child which was taken on our
327 * behalf by forkproc()/cloneproc() to prevent signals being
328 * received by the child in a partially constructed state.
329 */
330 proc_signalend(child_proc, 0);
331 proc_transend(child_proc, 0);
332
333 proc_knote(parent_proc, NOTE_FORK | child_proc->p_pid);
334 DTRACE_PROC1(create, proc_t, child_proc);
335 ut->uu_flag &= ~UT_VFORKING;
336 }
337
338 return err;
339 }
340
341
342 /*
343 * fork1
344 *
345 * Description: common code used by all new process creation other than the
346 * bootstrap of the initial process on the system
347 *
348 * Parameters: parent_proc parent process of the process being
349 * child_threadp pointer to location to receive the
350 * Mach thread_t of the child process
351 * created
352 * kind kind of creation being requested
353 * coalitions if spawn, the set of coalitions the
354 * child process should join, or NULL to
355 * inherit the parent's. On non-spawns,
356 * this param is ignored and the child
357 * always inherits the parent's
358 * coalitions.
359 *
360 * Notes: Permissable values for 'kind':
361 *
362 * PROC_CREATE_FORK Create a complete process which will
363 * return actively running in both the
364 * parent and the child; the child copies
365 * the parent address space.
366 * PROC_CREATE_SPAWN Create a complete process which will
367 * return actively running in the parent
368 * only after returning actively running
369 * in the child; the child address space
370 * is newly created by an image activator,
371 * after which the child is run.
372 * PROC_CREATE_VFORK Creates a partial process which will
373 * borrow the parent task, thread, and
374 * uthread to return running in the child;
375 * the child address space and other parts
376 * are lazily created at execve() time, or
377 * the child is terminated, and the parent
378 * does not actively run until that
379 * happens.
380 *
381 * At first it may seem strange that we return the child thread
382 * address rather than process structure, since the process is
383 * the only part guaranteed to be "new"; however, since we do
384 * not actualy adjust other references between Mach and BSD (see
385 * the block diagram above the implementation of vfork()), this
386 * is the only method which guarantees us the ability to get
387 * back to the other information.
388 */
389 int
390 fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalitions)
391 {
392 thread_t parent_thread = (thread_t)current_thread();
393 uthread_t parent_uthread = (uthread_t)get_bsdthread_info(parent_thread);
394 proc_t child_proc = NULL; /* set in switch, but compiler... */
395 thread_t child_thread = NULL;
396 uid_t uid;
397 size_t count;
398 int err = 0;
399 int spawn = 0;
400 rlim_t rlimit_nproc_cur;
401
402 /*
403 * Although process entries are dynamically created, we still keep
404 * a global limit on the maximum number we will create. Don't allow
405 * a nonprivileged user to use the last process; don't let root
406 * exceed the limit. The variable nprocs is the current number of
407 * processes, maxproc is the limit.
408 */
409 uid = kauth_getruid();
410 proc_list_lock();
411 if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
412 #if (DEVELOPMENT || DEBUG) && !defined(XNU_TARGET_OS_OSX)
413 /*
414 * On the development kernel, panic so that the fact that we hit
415 * the process limit is obvious, as this may very well wedge the
416 * system.
417 */
418 panic("The process table is full; parent pid=%d", parent_proc->p_pid);
419 #endif
420 proc_list_unlock();
421 tablefull("proc");
422 return EAGAIN;
423 }
424 proc_list_unlock();
425
426 /*
427 * Increment the count of procs running with this uid. Don't allow
428 * a nonprivileged user to exceed their current limit, which is
429 * always less than what an rlim_t can hold.
430 * (locking protection is provided by list lock held in chgproccnt)
431 */
432 count = chgproccnt(uid, 1);
433 rlimit_nproc_cur = proc_limitgetcur(parent_proc, RLIMIT_NPROC, TRUE);
434 if (uid != 0 &&
435 (rlim_t)count > rlimit_nproc_cur) {
436 #if (DEVELOPMENT || DEBUG) && !defined(XNU_TARGET_OS_OSX)
437 /*
438 * On the development kernel, panic so that the fact that we hit
439 * the per user process limit is obvious. This may be less dire
440 * than hitting the global process limit, but we cannot rely on
441 * that.
442 */
443 panic("The per-user process limit has been hit; parent pid=%d, uid=%d", parent_proc->p_pid, uid);
444 #endif
445 err = EAGAIN;
446 goto bad;
447 }
448
449 #if CONFIG_MACF
450 /*
451 * Determine if MAC policies applied to the process will allow
452 * it to fork. This is an advisory-only check.
453 */
454 err = mac_proc_check_fork(parent_proc);
455 if (err != 0) {
456 goto bad;
457 }
458 #endif
459
460 switch (kind) {
461 case PROC_CREATE_VFORK:
462 /*
463 * Prevent a vfork while we are in vfork(); we should
464 * also likely preventing a fork here as well, and this
465 * check should then be outside the switch statement,
466 * since the proc struct contents will copy from the
467 * child and the tash/thread/uthread from the parent in
468 * that case. We do not support vfork() in vfork()
469 * because we don't have to; the same non-requirement
470 * is true of both fork() and posix_spawn() and any
471 * call other than execve() amd _exit(), but we've
472 * been historically lenient, so we continue to be so
473 * (for now).
474 *
475 * <rdar://6640521> Probably a source of random panics
476 */
477 if (parent_uthread->uu_flag & UT_VFORK) {
478 printf("fork1 called within vfork by %s\n", parent_proc->p_comm);
479 err = EINVAL;
480 goto bad;
481 }
482
483 /*
484 * Flag us in progress; if we chose to support vfork() in
485 * vfork(), we would chain our parent at this point (in
486 * effect, a stack push). We don't, since we actually want
487 * to disallow everything not specified in the standard
488 */
489 proc_vfork_begin(parent_proc);
490
491 /* The newly created process comes with signal lock held */
492 if ((child_proc = forkproc(parent_proc)) == NULL) {
493 /* Failed to allocate new process */
494 proc_vfork_end(parent_proc);
495 err = ENOMEM;
496 goto bad;
497 }
498
499 // XXX BEGIN: wants to move to be common code (and safe)
500 #if CONFIG_MACF
501 /*
502 * allow policies to associate the credential/label that
503 * we referenced from the parent ... with the child
504 * JMM - this really isn't safe, as we can drop that
505 * association without informing the policy in other
506 * situations (keep long enough to get policies changed)
507 */
508 mac_cred_label_associate_fork(child_proc->p_ucred, child_proc);
509 #endif
510
511 /*
512 * Propogate change of PID - may get new cred if auditing.
513 *
514 * NOTE: This has no effect in the vfork case, since
515 * child_proc->task != current_task(), but we duplicate it
516 * because this is probably, ultimately, wrong, since we
517 * will be running in the "child" which is the parent task
518 * with the wrong token until we get to the execve() or
519 * _exit() call; a lot of "undefined" can happen before
520 * that.
521 *
522 * <rdar://6640530> disallow everything but exeve()/_exit()?
523 */
524 set_security_token(child_proc);
525
526 AUDIT_ARG(pid, child_proc->p_pid);
527
528 // XXX END: wants to move to be common code (and safe)
529
530 /*
531 * BORROW PARENT TASK, THREAD, UTHREAD FOR CHILD
532 *
533 * Note: this is where we would "push" state instead of setting
534 * it for nested vfork() support (see proc_vfork_end() for
535 * description if issues here).
536 */
537 child_proc->task = parent_proc->task;
538
539 child_proc->p_lflag |= P_LINVFORK;
540 child_proc->p_vforkact = parent_thread;
541 child_proc->p_stat = SRUN;
542
543 /*
544 * Until UT_VFORKING is cleared at the end of the vfork
545 * syscall, the process identity of this thread is slightly
546 * murky.
547 *
548 * As long as UT_VFORK and it's associated field (uu_proc)
549 * is set, current_proc() will always return the child process.
550 *
551 * However dtrace_proc_selfpid() returns the parent pid to
552 * ensure that e.g. the proc:::create probe actions accrue
553 * to the parent. (Otherwise the child magically seems to
554 * have created itself!)
555 */
556 parent_uthread->uu_flag |= UT_VFORK | UT_VFORKING;
557 parent_uthread->uu_proc = child_proc;
558 parent_uthread->uu_userstate = (void *)act_thread_csave();
559 parent_uthread->uu_vforkmask = parent_uthread->uu_sigmask;
560
561 /* temporarily drop thread-set-id state */
562 if (parent_uthread->uu_flag & UT_SETUID) {
563 parent_uthread->uu_flag |= UT_WASSETUID;
564 parent_uthread->uu_flag &= ~UT_SETUID;
565 }
566
567 /* blow thread state information */
568 /* XXX is this actually necessary, given syscall return? */
569 thread_set_child(parent_thread, child_proc->p_pid);
570
571 child_proc->p_acflag = AFORK; /* forked but not exec'ed */
572
573 /*
574 * Preserve synchronization semantics of vfork. If
575 * waiting for child to exec or exit, set P_PPWAIT
576 * on child, and sleep on our proc (in case of exit).
577 */
578 child_proc->p_lflag |= P_LPPWAIT;
579 pinsertchild(parent_proc, child_proc); /* set visible */
580
581 break;
582
583 case PROC_CREATE_SPAWN:
584 /*
585 * A spawned process differs from a forked process in that
586 * the spawned process does not carry around the parents
587 * baggage with regard to address space copying, dtrace,
588 * and so on.
589 */
590 spawn = 1;
591
592 OS_FALLTHROUGH;
593
594 case PROC_CREATE_FORK:
595 /*
596 * When we clone the parent process, we are going to inherit
597 * its task attributes and memory, since when we fork, we
598 * will, in effect, create a duplicate of it, with only minor
599 * differences. Contrarily, spawned processes do not inherit.
600 */
601 if ((child_thread = cloneproc(parent_proc->task,
602 spawn ? coalitions : NULL,
603 parent_proc,
604 spawn ? FALSE : TRUE,
605 FALSE)) == NULL) {
606 /* Failed to create thread */
607 err = EAGAIN;
608 goto bad;
609 }
610
611 /* copy current thread state into the child thread (only for fork) */
612 if (!spawn) {
613 thread_dup(child_thread);
614 }
615
616 /* child_proc = child_thread->task->proc; */
617 child_proc = (proc_t)(get_bsdtask_info(get_threadtask(child_thread)));
618
619 // XXX BEGIN: wants to move to be common code (and safe)
620 #if CONFIG_MACF
621 /*
622 * allow policies to associate the credential/label that
623 * we referenced from the parent ... with the child
624 * JMM - this really isn't safe, as we can drop that
625 * association without informing the policy in other
626 * situations (keep long enough to get policies changed)
627 */
628 mac_cred_label_associate_fork(child_proc->p_ucred, child_proc);
629 #endif
630
631 /*
632 * Propogate change of PID - may get new cred if auditing.
633 *
634 * NOTE: This has no effect in the vfork case, since
635 * child_proc->task != current_task(), but we duplicate it
636 * because this is probably, ultimately, wrong, since we
637 * will be running in the "child" which is the parent task
638 * with the wrong token until we get to the execve() or
639 * _exit() call; a lot of "undefined" can happen before
640 * that.
641 *
642 * <rdar://6640530> disallow everything but exeve()/_exit()?
643 */
644 set_security_token(child_proc);
645
646 AUDIT_ARG(pid, child_proc->p_pid);
647
648 // XXX END: wants to move to be common code (and safe)
649
650 /*
651 * Blow thread state information; this is what gives the child
652 * process its "return" value from a fork() call.
653 *
654 * Note: this should probably move to fork() proper, since it
655 * is not relevent to spawn, and the value won't matter
656 * until we resume the child there. If you are in here
657 * refactoring code, consider doing this at the same time.
658 */
659 thread_set_child(child_thread, child_proc->p_pid);
660
661 child_proc->p_acflag = AFORK; /* forked but not exec'ed */
662
663 #if CONFIG_DTRACE
664 dtrace_proc_fork(parent_proc, child_proc, spawn);
665 #endif /* CONFIG_DTRACE */
666 if (!spawn) {
667 /*
668 * Of note, we need to initialize the bank context behind
669 * the protection of the proc_trans lock to prevent a race with exit.
670 */
671 task_bank_init(get_threadtask(child_thread));
672 }
673
674 break;
675
676 default:
677 panic("fork1 called with unknown kind %d", kind);
678 break;
679 }
680
681
682 /* return the thread pointer to the caller */
683 *child_threadp = child_thread;
684
685 bad:
686 /*
687 * In the error case, we return a 0 value for the returned pid (but
688 * it is ignored in the trampoline due to the error return); this
689 * is probably not necessary.
690 */
691 if (err) {
692 (void)chgproccnt(uid, -1);
693 }
694
695 return err;
696 }
697
698
699 /*
700 * vfork_return
701 *
702 * Description: "Return" to parent vfork thread() following execve/_exit;
703 * this is done by reassociating the parent process structure
704 * with the task, thread, and uthread.
705 *
706 * Refer to the ASCII art above vfork() to figure out the
707 * state we're undoing.
708 *
709 * Parameters: child_proc Child process
710 * retval System call return value array
711 * rval Return value to present to parent
712 *
713 * Returns: void
714 *
715 * Notes: The caller resumes or exits the parent, as appropriate, after
716 * calling this function.
717 */
718 void
719 vfork_return(proc_t child_proc, int32_t *retval, int rval)
720 {
721 task_t parent_task = get_threadtask(child_proc->p_vforkact);
722 proc_t parent_proc = get_bsdtask_info(parent_task);
723 thread_t th = current_thread();
724 uthread_t uth = get_bsdthread_info(th);
725
726 act_thread_catt(uth->uu_userstate);
727
728 /* clear vfork state in parent proc structure */
729 proc_vfork_end(parent_proc);
730
731 /* REPATRIATE PARENT TASK, THREAD, UTHREAD */
732 uth->uu_userstate = 0;
733 uth->uu_flag &= ~UT_VFORK;
734 /* restore thread-set-id state */
735 if (uth->uu_flag & UT_WASSETUID) {
736 uth->uu_flag |= UT_SETUID;
737 uth->uu_flag &= ~UT_WASSETUID;
738 }
739 uth->uu_proc = 0;
740 uth->uu_sigmask = uth->uu_vforkmask;
741
742 proc_lock(child_proc);
743 child_proc->p_lflag &= ~P_LINVFORK;
744 child_proc->p_vforkact = 0;
745 proc_unlock(child_proc);
746
747 thread_set_parent(th, rval);
748
749 if (retval) {
750 retval[0] = rval;
751 retval[1] = 0; /* mark parent */
752 }
753 }
754
755
756 /*
757 * fork_create_child
758 *
759 * Description: Common operations associated with the creation of a child
760 * process
761 *
762 * Parameters: parent_task parent task
763 * parent_coalitions parent's set of coalitions
764 * child_proc child process
765 * inherit_memory TRUE, if the parents address space is
766 * to be inherited by the child
767 * is_64bit_addr TRUE, if the child being created will
768 * be associated with a 64 bit address space
769 * is_64bit_data TRUE if the child being created will use a
770 * 64-bit register state
771 * in_exec TRUE, if called from execve or posix spawn set exec
772 * FALSE, if called from fork or vfexec
773 *
774 * Note: This code is called in the fork() case, from the execve() call
775 * graph, if implementing an execve() following a vfork(), from
776 * the posix_spawn() call graph (which implicitly includes a
777 * vfork() equivalent call, and in the system bootstrap case.
778 *
779 * It creates a new task and thread (and as a side effect of the
780 * thread creation, a uthread) in the parent coalition set, which is
781 * then associated with the process 'child'. If the parent
782 * process address space is to be inherited, then a flag
783 * indicates that the newly created task should inherit this from
784 * the child task.
785 *
786 * As a special concession to bootstrapping the initial process
787 * in the system, it's possible for 'parent_task' to be TASK_NULL;
788 * in this case, 'inherit_memory' MUST be FALSE.
789 */
790 thread_t
791 fork_create_child(task_t parent_task,
792 coalition_t *parent_coalitions,
793 proc_t child_proc,
794 int inherit_memory,
795 int is_64bit_addr,
796 int is_64bit_data,
797 int in_exec)
798 {
799 thread_t child_thread = NULL;
800 task_t child_task;
801 kern_return_t result;
802
803 /* Create a new task for the child process */
804 result = task_create_internal(parent_task,
805 parent_coalitions,
806 inherit_memory,
807 is_64bit_addr,
808 is_64bit_data,
809 TF_NONE,
810 in_exec ? TPF_EXEC_COPY : TPF_NONE, /* Mark the task exec copy if in execve */
811 (TRW_LRETURNWAIT | TRW_LRETURNWAITER), /* All created threads will wait in task_wait_to_return */
812 &child_task);
813 if (result != KERN_SUCCESS) {
814 printf("%s: task_create_internal failed. Code: %d\n",
815 __func__, result);
816 goto bad;
817 }
818
819 if (!in_exec) {
820 /*
821 * Set the child process task to the new task if not in exec,
822 * will set the task for exec case in proc_exec_switch_task after image activation.
823 */
824 child_proc->task = child_task;
825 }
826
827 /* Set child task process to child proc */
828 set_bsdtask_info(child_task, child_proc);
829
830 /* Propagate CPU limit timer from parent */
831 if (timerisset(&child_proc->p_rlim_cpu)) {
832 task_vtimer_set(child_task, TASK_VTIMER_RLIM);
833 }
834
835 /*
836 * Set child process BSD visible scheduler priority if nice value
837 * inherited from parent
838 */
839 if (child_proc->p_nice != 0) {
840 resetpriority(child_proc);
841 }
842
843 /*
844 * Create a new thread for the child process. Pin it and make it immovable.
845 * The new thread is waiting on the event triggered by 'task_clear_return_wait'
846 */
847 result = thread_create_waiting(child_task,
848 (thread_continue_t)task_wait_to_return,
849 task_get_return_wait_event(child_task),
850 TH_CREATE_WAITING_OPTION_PINNED | TH_CREATE_WAITING_OPTION_IMMOVABLE,
851 &child_thread);
852
853 if (result != KERN_SUCCESS) {
854 printf("%s: thread_create failed. Code: %d\n",
855 __func__, result);
856 task_deallocate(child_task);
857 child_task = NULL;
858 }
859
860 /*
861 * Tag thread as being the first thread in its task.
862 */
863 thread_set_tag(child_thread, THREAD_TAG_MAINTHREAD);
864
865 bad:
866 thread_yield_internal(1);
867
868 return child_thread;
869 }
870
871
872 /*
873 * fork
874 *
875 * Description: fork system call.
876 *
877 * Parameters: parent Parent process to fork
878 * uap (void) [unused]
879 * retval Return value
880 *
881 * Returns: 0 Success
882 * EAGAIN Resource unavailable, try again
883 *
884 * Notes: Attempts to create a new child process which inherits state
885 * from the parent process. If successful, the call returns
886 * having created an initially suspended child process with an
887 * extra Mach task and thread reference, for which the thread
888 * is initially suspended. Until we resume the child process,
889 * it is not yet running.
890 *
891 * The return information to the child is contained in the
892 * thread state structure of the new child, and does not
893 * become visible to the child through a normal return process,
894 * since it never made the call into the kernel itself in the
895 * first place.
896 *
897 * After resuming the thread, this function returns directly to
898 * the parent process which invoked the fork() system call.
899 *
900 * Important: The child thread_resume occurs before the parent returns;
901 * depending on scheduling latency, this means that it is not
902 * deterministic as to whether the parent or child is scheduled
903 * to run first. It is entirely possible that the child could
904 * run to completion prior to the parent running.
905 */
906 int
907 fork(proc_t parent_proc, __unused struct fork_args *uap, int32_t *retval)
908 {
909 thread_t child_thread;
910 int err;
911
912 retval[1] = 0; /* flag parent return for user space */
913
914 if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_FORK, NULL)) == 0) {
915 task_t child_task;
916 proc_t child_proc;
917
918 /* Return to the parent */
919 child_proc = (proc_t)get_bsdthreadtask_info(child_thread);
920 retval[0] = child_proc->p_pid;
921
922 /*
923 * Drop the signal lock on the child which was taken on our
924 * behalf by forkproc()/cloneproc() to prevent signals being
925 * received by the child in a partially constructed state.
926 */
927 proc_signalend(child_proc, 0);
928 proc_transend(child_proc, 0);
929
930 /* flag the fork has occurred */
931 proc_knote(parent_proc, NOTE_FORK | child_proc->p_pid);
932 DTRACE_PROC1(create, proc_t, child_proc);
933
934 #if CONFIG_DTRACE
935 if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL) {
936 (*dtrace_proc_waitfor_hook)(child_proc);
937 }
938 #endif
939
940 /* "Return" to the child */
941 task_clear_return_wait(get_threadtask(child_thread), TCRW_CLEAR_ALL_WAIT);
942
943 /* drop the extra references we got during the creation */
944 if ((child_task = (task_t)get_threadtask(child_thread)) != NULL) {
945 task_deallocate(child_task);
946 }
947 thread_deallocate(child_thread);
948 }
949
950 return err;
951 }
952
953
954 /*
955 * cloneproc
956 *
957 * Description: Create a new process from a specified process.
958 *
959 * Parameters: parent_task The parent task to be cloned, or
960 * TASK_NULL is task characteristics
961 * are not to be inherited
962 * be cloned, or TASK_NULL if the new
963 * task is not to inherit the VM
964 * characteristics of the parent
965 * parent_proc The parent process to be cloned
966 * inherit_memory True if the child is to inherit
967 * memory from the parent; if this is
968 * non-NULL, then the parent_task must
969 * also be non-NULL
970 * memstat_internal Whether to track the process in the
971 * jetsam priority list (if configured)
972 *
973 * Returns: !NULL pointer to new child thread
974 * NULL Failure (unspecified)
975 *
976 * Note: On return newly created child process has signal lock held
977 * to block delivery of signal to it if called with lock set.
978 * fork() code needs to explicity remove this lock before
979 * signals can be delivered
980 *
981 * In the case of bootstrap, this function can be called from
982 * bsd_utaskbootstrap() in order to bootstrap the first process;
983 * the net effect is to provide a uthread structure for the
984 * kernel process associated with the kernel task.
985 *
986 * XXX: Tristating using the value parent_task as the major key
987 * and inherit_memory as the minor key is something we should
988 * refactor later; we owe the current semantics, ultimately,
989 * to the semantics of task_create_internal. For now, we will
990 * live with this being somewhat awkward.
991 */
992 thread_t
993 cloneproc(task_t parent_task, coalition_t *parent_coalitions, proc_t parent_proc, int inherit_memory, int memstat_internal)
994 {
995 #if !CONFIG_MEMORYSTATUS
996 #pragma unused(memstat_internal)
997 #endif
998 task_t child_task;
999 proc_t child_proc;
1000 thread_t child_thread = NULL;
1001
1002 if ((child_proc = forkproc(parent_proc)) == NULL) {
1003 /* Failed to allocate new process */
1004 goto bad;
1005 }
1006
1007 /*
1008 * In the case where the parent_task is TASK_NULL (during the init path)
1009 * we make the assumption that the register size will be the same as the
1010 * address space size since there's no way to determine the possible
1011 * register size until an image is exec'd.
1012 *
1013 * The only architecture that has different address space and register sizes
1014 * (arm64_32) isn't being used within kernel-space, so the above assumption
1015 * always holds true for the init path.
1016 */
1017 const int parent_64bit_addr = parent_proc->p_flag & P_LP64;
1018 const int parent_64bit_data = (parent_task == TASK_NULL) ? parent_64bit_addr : task_get_64bit_data(parent_task);
1019
1020 child_thread = fork_create_child(parent_task,
1021 parent_coalitions,
1022 child_proc,
1023 inherit_memory,
1024 parent_64bit_addr,
1025 parent_64bit_data,
1026 FALSE);
1027
1028 if (child_thread == NULL) {
1029 /*
1030 * Failed to create thread; now we must deconstruct the new
1031 * process previously obtained from forkproc().
1032 */
1033 forkproc_free(child_proc);
1034 goto bad;
1035 }
1036
1037 child_task = get_threadtask(child_thread);
1038 if (parent_64bit_addr) {
1039 OSBitOrAtomic(P_LP64, (UInt32 *)&child_proc->p_flag);
1040 } else {
1041 OSBitAndAtomic(~((uint32_t)P_LP64), (UInt32 *)&child_proc->p_flag);
1042 }
1043
1044 #if CONFIG_MEMORYSTATUS
1045 if (memstat_internal) {
1046 proc_list_lock();
1047 child_proc->p_memstat_state |= P_MEMSTAT_INTERNAL;
1048 proc_list_unlock();
1049 }
1050 #endif
1051
1052 /* make child visible */
1053 pinsertchild(parent_proc, child_proc);
1054
1055 /*
1056 * Make child runnable, set start time.
1057 */
1058 child_proc->p_stat = SRUN;
1059 bad:
1060 return child_thread;
1061 }
1062
1063
1064 /*
1065 * Destroy a process structure that resulted from a call to forkproc(), but
1066 * which must be returned to the system because of a subsequent failure
1067 * preventing it from becoming active.
1068 *
1069 * Parameters: p The incomplete process from forkproc()
1070 *
1071 * Returns: (void)
1072 *
1073 * Note: This function should only be used in an error handler following
1074 * a call to forkproc().
1075 *
1076 * Operations occur in reverse order of those in forkproc().
1077 */
1078 void
1079 forkproc_free(proc_t p)
1080 {
1081 #if CONFIG_PERSONAS
1082 persona_proc_drop(p);
1083 #endif /* CONFIG_PERSONAS */
1084
1085 #if PSYNCH
1086 pth_proc_hashdelete(p);
1087 #endif /* PSYNCH */
1088
1089 /* We held signal and a transition locks; drop them */
1090 proc_signalend(p, 0);
1091 proc_transend(p, 0);
1092
1093 /*
1094 * If we have our own copy of the resource limits structure, we
1095 * need to free it. If it's a shared copy, we need to drop our
1096 * reference on it.
1097 */
1098 proc_limitdrop(p);
1099
1100 #if SYSV_SHM
1101 /* Need to drop references to the shared memory segment(s), if any */
1102 if (p->vm_shm) {
1103 /*
1104 * Use shmexec(): we have no address space, so no mappings
1105 *
1106 * XXX Yes, the routine is badly named.
1107 */
1108 shmexec(p);
1109 }
1110 #endif
1111
1112 /* Need to undo the effects of the fdcopy(), if any */
1113 fdfree(p);
1114
1115 /*
1116 * Drop the reference on a text vnode pointer, if any
1117 * XXX This code is broken in forkproc(); see <rdar://4256419>;
1118 * XXX if anyone ever uses this field, we will be extremely unhappy.
1119 */
1120 if (p->p_textvp) {
1121 vnode_rele(p->p_textvp);
1122 p->p_textvp = NULL;
1123 }
1124
1125 /* Update the audit session proc count */
1126 AUDIT_SESSION_PROCEXIT(p);
1127
1128 lck_mtx_destroy(&p->p_mlock, &proc_mlock_grp);
1129 lck_mtx_destroy(&p->p_fdmlock, &proc_fdmlock_grp);
1130 lck_mtx_destroy(&p->p_ucred_mlock, &proc_ucred_mlock_grp);
1131 #if CONFIG_DTRACE
1132 lck_mtx_destroy(&p->p_dtrace_sprlock, &proc_lck_grp);
1133 #endif
1134 lck_spin_destroy(&p->p_slock, &proc_slock_grp);
1135 lck_rw_destroy(&p->p_dirs_lock, &proc_dirslock_grp);
1136
1137 /* Release the credential reference */
1138 kauth_cred_t tmp_ucred = p->p_ucred;
1139 kauth_cred_unref(&tmp_ucred);
1140 p->p_ucred = tmp_ucred;
1141
1142 proc_list_lock();
1143 /* Decrement the count of processes in the system */
1144 nprocs--;
1145
1146 /* Take it out of process hash */
1147 LIST_REMOVE(p, p_hash);
1148
1149 proc_list_unlock();
1150
1151 thread_call_free(p->p_rcall);
1152
1153 /* Free allocated memory */
1154 zfree(proc_sigacts_zone, p->p_sigacts);
1155 p->p_sigacts = NULL;
1156 zfree(proc_stats_zone, p->p_stats);
1157 p->p_stats = NULL;
1158 if (p->p_subsystem_root_path) {
1159 zfree(ZV_NAMEI, p->p_subsystem_root_path);
1160 }
1161
1162 proc_checkdeadrefs(p);
1163 zfree(proc_zone, p);
1164 }
1165
1166
1167 /*
1168 * forkproc
1169 *
1170 * Description: Create a new process structure, given a parent process
1171 * structure.
1172 *
1173 * Parameters: parent_proc The parent process
1174 *
1175 * Returns: !NULL The new process structure
1176 * NULL Error (insufficient free memory)
1177 *
1178 * Note: When successful, the newly created process structure is
1179 * partially initialized; if a caller needs to deconstruct the
1180 * returned structure, they must call forkproc_free() to do so.
1181 */
1182 proc_t
1183 forkproc(proc_t parent_proc)
1184 {
1185 proc_t child_proc; /* Our new process */
1186 static int nextpid = 0, pidwrap = 0;
1187 static uint64_t nextuniqueid = 0;
1188 int error = 0;
1189 struct session *sessp;
1190 uthread_t parent_uthread = (uthread_t)get_bsdthread_info(current_thread());
1191 rlim_t rlimit_cpu_cur;
1192
1193 child_proc = zalloc_flags(proc_zone, Z_WAITOK | Z_ZERO);
1194 child_proc->p_stats = zalloc_flags(proc_stats_zone, Z_WAITOK | Z_ZERO);
1195 child_proc->p_sigacts = zalloc_flags(proc_sigacts_zone, Z_WAITOK);
1196
1197 /* allocate a callout for use by interval timers */
1198 child_proc->p_rcall = thread_call_allocate((thread_call_func_t)realitexpire, child_proc);
1199 if (child_proc->p_rcall == NULL) {
1200 zfree(proc_sigacts_zone, child_proc->p_sigacts);
1201 zfree(proc_stats_zone, child_proc->p_stats);
1202 zfree(proc_zone, child_proc);
1203 child_proc = NULL;
1204 goto bad;
1205 }
1206
1207
1208 /*
1209 * Find an unused PID.
1210 */
1211
1212 proc_list_lock();
1213
1214 nextpid++;
1215 retry:
1216 /*
1217 * If the process ID prototype has wrapped around,
1218 * restart somewhat above 0, as the low-numbered procs
1219 * tend to include daemons that don't exit.
1220 */
1221 if (nextpid >= PID_MAX) {
1222 nextpid = 100;
1223 pidwrap = 1;
1224 }
1225 if (pidwrap != 0) {
1226 /* if the pid stays in hash both for zombie and runniing state */
1227 if (pfind_locked(nextpid) != PROC_NULL) {
1228 nextpid++;
1229 goto retry;
1230 }
1231
1232 if (pgfind_internal(nextpid) != PGRP_NULL) {
1233 nextpid++;
1234 goto retry;
1235 }
1236 if (session_find_internal(nextpid) != SESSION_NULL) {
1237 nextpid++;
1238 goto retry;
1239 }
1240 }
1241 nprocs++;
1242 child_proc->p_pid = nextpid;
1243 child_proc->p_idversion = OSIncrementAtomic(&nextpidversion);
1244 /* kernel process is handcrafted and not from fork, so start from 1 */
1245 child_proc->p_uniqueid = ++nextuniqueid;
1246 #if 1
1247 if (child_proc->p_pid != 0) {
1248 if (pfind_locked(child_proc->p_pid) != PROC_NULL) {
1249 panic("proc in the list already\n");
1250 }
1251 }
1252 #endif
1253 /* Insert in the hash */
1254 child_proc->p_listflag |= (P_LIST_INHASH | P_LIST_INCREATE);
1255 LIST_INSERT_HEAD(PIDHASH(child_proc->p_pid), child_proc, p_hash);
1256 proc_list_unlock();
1257
1258 if (child_proc->p_uniqueid == startup_serial_num_procs) {
1259 /*
1260 * Turn off startup serial logging now that we have reached
1261 * the defined number of startup processes.
1262 */
1263 startup_serial_logging_active = false;
1264 }
1265
1266 /*
1267 * We've identified the PID we are going to use; initialize the new
1268 * process structure.
1269 */
1270 child_proc->p_stat = SIDL;
1271 child_proc->p_pgrpid = PGRPID_DEAD;
1272
1273 /*
1274 * The zero'ing of the proc was at the allocation time due to need
1275 * for insertion to hash. Copy the section that is to be copied
1276 * directly from the parent.
1277 */
1278 __nochk_bcopy(&parent_proc->p_startcopy, &child_proc->p_startcopy,
1279 (unsigned) ((caddr_t)&child_proc->p_endcopy - (caddr_t)&child_proc->p_startcopy));
1280
1281 #if defined(HAS_APPLE_PAC)
1282 /*
1283 * The p_textvp and p_pgrp pointers are address-diversified by PAC, so we must
1284 * resign them here for the new proc
1285 */
1286 if (parent_proc->p_textvp) {
1287 child_proc->p_textvp = parent_proc->p_textvp;
1288 }
1289
1290 if (parent_proc->p_pgrp) {
1291 child_proc->p_pgrp = parent_proc->p_pgrp;
1292 }
1293 #endif /* defined(HAS_APPLE_PAC) */
1294
1295 child_proc->p_sessionid = parent_proc->p_sessionid;
1296
1297 /*
1298 * Some flags are inherited from the parent.
1299 * Duplicate sub-structures as needed.
1300 * Increase reference counts on shared objects.
1301 * The p_stats and p_sigacts substructs are set in vm_fork.
1302 */
1303 #if CONFIG_DELAY_IDLE_SLEEP
1304 child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_TRANSLATED | P_DISABLE_ASLR | P_DELAYIDLESLEEP | P_SUGID | P_AFFINITY));
1305 #else /* CONFIG_DELAY_IDLE_SLEEP */
1306 child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_TRANSLATED | P_DISABLE_ASLR | P_SUGID));
1307 #endif /* CONFIG_DELAY_IDLE_SLEEP */
1308
1309 child_proc->p_vfs_iopolicy = (parent_proc->p_vfs_iopolicy & (P_VFS_IOPOLICY_VALID_MASK));
1310
1311 child_proc->p_responsible_pid = parent_proc->p_responsible_pid;
1312
1313 /*
1314 * Note that if the current thread has an assumed identity, this
1315 * credential will be granted to the new process.
1316 */
1317 child_proc->p_ucred = kauth_cred_get_with_ref();
1318 /* update cred on proc */
1319 PROC_UPDATE_CREDS_ONPROC(child_proc);
1320 /* update audit session proc count */
1321 AUDIT_SESSION_PROCNEW(child_proc);
1322
1323 lck_mtx_init(&child_proc->p_mlock, &proc_mlock_grp, &proc_lck_attr);
1324 lck_mtx_init(&child_proc->p_fdmlock, &proc_fdmlock_grp, &proc_lck_attr);
1325 lck_mtx_init(&child_proc->p_ucred_mlock, &proc_ucred_mlock_grp, &proc_lck_attr);
1326 #if CONFIG_DTRACE
1327 lck_mtx_init(&child_proc->p_dtrace_sprlock, &proc_lck_grp, &proc_lck_attr);
1328 #endif
1329 lck_spin_init(&child_proc->p_slock, &proc_slock_grp, &proc_lck_attr);
1330 lck_rw_init(&child_proc->p_dirs_lock, &proc_dirslock_grp, &proc_lck_attr);
1331
1332 klist_init(&child_proc->p_klist);
1333
1334 if (child_proc->p_textvp != NULLVP) {
1335 /* bump references to the text vnode */
1336 /* Need to hold iocount across the ref call */
1337 if ((error = vnode_getwithref(child_proc->p_textvp)) == 0) {
1338 error = vnode_ref(child_proc->p_textvp);
1339 vnode_put(child_proc->p_textvp);
1340 }
1341
1342 if (error != 0) {
1343 child_proc->p_textvp = NULLVP;
1344 }
1345 }
1346
1347 /*
1348 * Copy the parents per process open file table to the child; if
1349 * there is a per-thread current working directory, set the childs
1350 * per-process current working directory to that instead of the
1351 * parents.
1352 *
1353 * XXX may fail to copy descriptors to child
1354 */
1355 child_proc->p_fd = fdcopy(parent_proc, parent_uthread->uu_cdir);
1356
1357 #if SYSV_SHM
1358 if (parent_proc->vm_shm) {
1359 /* XXX may fail to attach shm to child */
1360 (void)shmfork(parent_proc, child_proc);
1361 }
1362 #endif
1363
1364 /*
1365 * Child inherits the parent's plimit
1366 */
1367 proc_limitfork(parent_proc, child_proc);
1368
1369 rlimit_cpu_cur = proc_limitgetcur(child_proc, RLIMIT_CPU, TRUE);
1370 if (rlimit_cpu_cur != RLIM_INFINITY) {
1371 child_proc->p_rlim_cpu.tv_sec = (rlimit_cpu_cur > __INT_MAX__) ? __INT_MAX__ : rlimit_cpu_cur;
1372 }
1373
1374 /* Intialize new process stats, including start time */
1375 /* <rdar://6640543> non-zeroed portion contains garbage AFAICT */
1376 microtime_with_abstime(&child_proc->p_start, &child_proc->p_stats->ps_start);
1377
1378 if (parent_proc->p_sigacts != NULL) {
1379 (void)memcpy(child_proc->p_sigacts,
1380 parent_proc->p_sigacts, sizeof *child_proc->p_sigacts);
1381 } else {
1382 (void)memset(child_proc->p_sigacts, 0, sizeof *child_proc->p_sigacts);
1383 }
1384
1385 sessp = proc_session(parent_proc);
1386 if (sessp->s_ttyvp != NULL && parent_proc->p_flag & P_CONTROLT) {
1387 OSBitOrAtomic(P_CONTROLT, &child_proc->p_flag);
1388 }
1389 session_rele(sessp);
1390
1391 /*
1392 * block all signals to reach the process.
1393 * no transition race should be occuring with the child yet,
1394 * but indicate that the process is in (the creation) transition.
1395 */
1396 proc_signalstart(child_proc, 0);
1397 proc_transstart(child_proc, 0, 0);
1398
1399 child_proc->p_pcaction = 0;
1400
1401 TAILQ_INIT(&child_proc->p_uthlist);
1402 TAILQ_INIT(&child_proc->p_aio_activeq);
1403 TAILQ_INIT(&child_proc->p_aio_doneq);
1404
1405 /* Inherit the parent flags for code sign */
1406 child_proc->p_csflags = (parent_proc->p_csflags & ~CS_KILLED);
1407
1408 /*
1409 * Copy work queue information
1410 *
1411 * Note: This should probably only happen in the case where we are
1412 * creating a child that is a copy of the parent; since this
1413 * routine is called in the non-duplication case of vfork()
1414 * or posix_spawn(), then this information should likely not
1415 * be duplicated.
1416 *
1417 * <rdar://6640553> Work queue pointers that no longer point to code
1418 */
1419 child_proc->p_wqthread = parent_proc->p_wqthread;
1420 child_proc->p_threadstart = parent_proc->p_threadstart;
1421 child_proc->p_pthsize = parent_proc->p_pthsize;
1422 if ((parent_proc->p_lflag & P_LREGISTER) != 0) {
1423 child_proc->p_lflag |= P_LREGISTER;
1424 }
1425 child_proc->p_dispatchqueue_offset = parent_proc->p_dispatchqueue_offset;
1426 child_proc->p_dispatchqueue_serialno_offset = parent_proc->p_dispatchqueue_serialno_offset;
1427 child_proc->p_dispatchqueue_label_offset = parent_proc->p_dispatchqueue_label_offset;
1428 child_proc->p_return_to_kernel_offset = parent_proc->p_return_to_kernel_offset;
1429 child_proc->p_mach_thread_self_offset = parent_proc->p_mach_thread_self_offset;
1430 child_proc->p_pth_tsd_offset = parent_proc->p_pth_tsd_offset;
1431 #if PSYNCH
1432 pth_proc_hashinit(child_proc);
1433 #endif /* PSYNCH */
1434
1435 #if CONFIG_PERSONAS
1436 child_proc->p_persona = NULL;
1437 error = persona_proc_inherit(child_proc, parent_proc);
1438 if (error != 0) {
1439 printf("forkproc: persona_proc_inherit failed (persona %d being destroyed?)\n", persona_get_uid(parent_proc->p_persona));
1440 forkproc_free(child_proc);
1441 child_proc = NULL;
1442 goto bad;
1443 }
1444 #endif
1445
1446 #if CONFIG_MEMORYSTATUS
1447 /* Memorystatus init */
1448 child_proc->p_memstat_state = 0;
1449 child_proc->p_memstat_effectivepriority = JETSAM_PRIORITY_DEFAULT;
1450 child_proc->p_memstat_requestedpriority = JETSAM_PRIORITY_DEFAULT;
1451 child_proc->p_memstat_assertionpriority = 0;
1452 child_proc->p_memstat_userdata = 0;
1453 child_proc->p_memstat_idle_start = 0;
1454 child_proc->p_memstat_idle_delta = 0;
1455 child_proc->p_memstat_memlimit = 0;
1456 child_proc->p_memstat_memlimit_active = 0;
1457 child_proc->p_memstat_memlimit_inactive = 0;
1458 child_proc->p_memstat_relaunch_flags = P_MEMSTAT_RELAUNCH_UNKNOWN;
1459 #if CONFIG_FREEZE
1460 child_proc->p_memstat_freeze_sharedanon_pages = 0;
1461 #endif
1462 child_proc->p_memstat_dirty = 0;
1463 child_proc->p_memstat_idledeadline = 0;
1464 #endif /* CONFIG_MEMORYSTATUS */
1465
1466 if (parent_proc->p_subsystem_root_path) {
1467 size_t parent_length = strlen(parent_proc->p_subsystem_root_path) + 1;
1468 assert(parent_length <= MAXPATHLEN);
1469 child_proc->p_subsystem_root_path = zalloc_flags(ZV_NAMEI,
1470 Z_WAITOK | Z_ZERO);
1471 memcpy(child_proc->p_subsystem_root_path, parent_proc->p_subsystem_root_path, parent_length);
1472 }
1473
1474 bad:
1475 return child_proc;
1476 }
1477
1478 void
1479 proc_lock(proc_t p)
1480 {
1481 LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
1482 lck_mtx_lock(&p->p_mlock);
1483 }
1484
1485 void
1486 proc_unlock(proc_t p)
1487 {
1488 lck_mtx_unlock(&p->p_mlock);
1489 }
1490
1491 void
1492 proc_spinlock(proc_t p)
1493 {
1494 lck_spin_lock_grp(&p->p_slock, &proc_slock_grp);
1495 }
1496
1497 void
1498 proc_spinunlock(proc_t p)
1499 {
1500 lck_spin_unlock(&p->p_slock);
1501 }
1502
1503 void
1504 proc_list_lock(void)
1505 {
1506 lck_mtx_lock(&proc_list_mlock);
1507 }
1508
1509 void
1510 proc_list_unlock(void)
1511 {
1512 lck_mtx_unlock(&proc_list_mlock);
1513 }
1514
1515 void
1516 proc_ucred_lock(proc_t p)
1517 {
1518 lck_mtx_lock(&p->p_ucred_mlock);
1519 }
1520
1521 void
1522 proc_ucred_unlock(proc_t p)
1523 {
1524 lck_mtx_unlock(&p->p_ucred_mlock);
1525 }
1526
1527 void *
1528 uthread_alloc(task_t task, thread_t thread, int noinherit)
1529 {
1530 proc_t p;
1531 uthread_t uth;
1532 uthread_t uth_parent;
1533 void *ut;
1534
1535 ut = zalloc_flags(uthread_zone, Z_WAITOK | Z_ZERO);
1536
1537 p = (proc_t) get_bsdtask_info(task);
1538 uth = (uthread_t)ut;
1539 uth->uu_thread = thread;
1540
1541 lck_spin_init(&uth->uu_rethrottle_lock, &rethrottle_lock_grp,
1542 LCK_ATTR_NULL);
1543
1544 /*
1545 * Thread inherits credential from the creating thread, if both
1546 * are in the same task.
1547 *
1548 * If the creating thread has no credential or is from another
1549 * task we can leave the new thread credential NULL. If it needs
1550 * one later, it will be lazily assigned from the task's process.
1551 */
1552 uth_parent = (uthread_t)get_bsdthread_info(current_thread());
1553 if ((noinherit == 0) && task == current_task() &&
1554 uth_parent != NULL &&
1555 IS_VALID_CRED(uth_parent->uu_ucred)) {
1556 /*
1557 * XXX The new thread is, in theory, being created in context
1558 * XXX of parent thread, so a direct reference to the parent
1559 * XXX is OK.
1560 */
1561 kauth_cred_ref(uth_parent->uu_ucred);
1562 uth->uu_ucred = uth_parent->uu_ucred;
1563 /* the credential we just inherited is an assumed credential */
1564 if (uth_parent->uu_flag & UT_SETUID) {
1565 uth->uu_flag |= UT_SETUID;
1566 }
1567 } else {
1568 /* sometimes workqueue threads are created out task context */
1569 if ((task != kernel_task) && (p != PROC_NULL)) {
1570 uth->uu_ucred = kauth_cred_proc_ref(p);
1571 } else {
1572 uth->uu_ucred = NOCRED;
1573 }
1574 }
1575
1576
1577 if ((task != kernel_task) && p) {
1578 proc_lock(p);
1579 if (noinherit != 0) {
1580 /* workq threads will not inherit masks */
1581 uth->uu_sigmask = ~workq_threadmask;
1582 } else if (uth_parent) {
1583 if (uth_parent->uu_flag & UT_SAS_OLDMASK) {
1584 uth->uu_sigmask = uth_parent->uu_oldmask;
1585 } else {
1586 uth->uu_sigmask = uth_parent->uu_sigmask;
1587 }
1588 }
1589 uth->uu_context.vc_thread = thread;
1590 /*
1591 * Do not add the uthread to proc uthlist for exec copy task,
1592 * since they do not hold a ref on proc.
1593 */
1594 if (!task_is_exec_copy(task)) {
1595 TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list);
1596 }
1597 proc_unlock(p);
1598
1599 #if CONFIG_DTRACE
1600 if (p->p_dtrace_ptss_pages != NULL && !task_is_exec_copy(task)) {
1601 uth->t_dtrace_scratch = dtrace_ptss_claim_entry(p);
1602 }
1603 #endif
1604 }
1605
1606 return ut;
1607 }
1608
1609 /*
1610 * This routine frees the thread name field of the uthread_t structure. Split out of
1611 * uthread_cleanup() so thread name does not get deallocated while generating a corpse fork.
1612 */
1613 void
1614 uthread_cleanup_name(void *uthread)
1615 {
1616 uthread_t uth = (uthread_t)uthread;
1617
1618 /*
1619 * <rdar://17834538>
1620 * Set pth_name to NULL before calling free().
1621 * Previously there was a race condition in the
1622 * case this code was executing during a stackshot
1623 * where the stackshot could try and copy pth_name
1624 * after it had been freed and before if was marked
1625 * as null.
1626 */
1627 if (uth->pth_name != NULL) {
1628 void *pth_name = uth->pth_name;
1629 uth->pth_name = NULL;
1630 kfree(pth_name, MAXTHREADNAMESIZE);
1631 }
1632 return;
1633 }
1634
1635 /*
1636 * This routine frees all the BSD context in uthread except the credential.
1637 * It does not free the uthread structure as well
1638 */
1639 void
1640 uthread_cleanup(task_t task, void *uthread, void * bsd_info)
1641 {
1642 uthread_t uth = (uthread_t)uthread;
1643 proc_t p = (proc_t)bsd_info;
1644
1645 #if PROC_REF_DEBUG
1646 if (__improbable(uthread_get_proc_refcount(uthread) != 0)) {
1647 panic("uthread_cleanup called for uthread %p with uu_proc_refcount != 0", uthread);
1648 }
1649 #endif
1650
1651 if (uth->uu_lowpri_window || uth->uu_throttle_info) {
1652 /*
1653 * task is marked as a low priority I/O type
1654 * and we've somehow managed to not dismiss the throttle
1655 * through the normal exit paths back to user space...
1656 * no need to throttle this thread since its going away
1657 * but we do need to update our bookeeping w/r to throttled threads
1658 *
1659 * Calling this routine will clean up any throttle info reference
1660 * still inuse by the thread.
1661 */
1662 throttle_lowpri_io(0);
1663 }
1664 /*
1665 * Per-thread audit state should never last beyond system
1666 * call return. Since we don't audit the thread creation/
1667 * removal, the thread state pointer should never be
1668 * non-NULL when we get here.
1669 */
1670 assert(uth->uu_ar == NULL);
1671
1672 if (uth->uu_kqr_bound) {
1673 kqueue_threadreq_unbind(p, uth->uu_kqr_bound);
1674 }
1675
1676 if (uth->uu_select.nbytes) {
1677 select_cleanup_uthread(&uth->uu_select);
1678 }
1679
1680 if (uth->uu_cdir) {
1681 vnode_rele(uth->uu_cdir);
1682 uth->uu_cdir = NULLVP;
1683 }
1684
1685 if (uth->uu_wqset) {
1686 if (waitq_set_is_valid(uth->uu_wqset)) {
1687 waitq_set_deinit(uth->uu_wqset);
1688 }
1689 kheap_free(KHEAP_DEFAULT, uth->uu_wqset, uth->uu_wqstate_sz);
1690 uth->uu_wqset = NULL;
1691 uth->uu_wqstate_sz = 0;
1692 }
1693
1694 os_reason_free(uth->uu_exit_reason);
1695
1696 if ((task != kernel_task) && p) {
1697 if (((uth->uu_flag & UT_VFORK) == UT_VFORK) && (uth->uu_proc != PROC_NULL)) {
1698 vfork_exit_internal(uth->uu_proc, 0, 1);
1699 }
1700 /*
1701 * Remove the thread from the process list and
1702 * transfer [appropriate] pending signals to the process.
1703 * Do not remove the uthread from proc uthlist for exec
1704 * copy task, since they does not have a ref on proc and
1705 * would not have been added to the list.
1706 */
1707 if (get_bsdtask_info(task) == p && !task_is_exec_copy(task)) {
1708 proc_lock(p);
1709
1710 TAILQ_REMOVE(&p->p_uthlist, uth, uu_list);
1711 p->p_siglist |= (uth->uu_siglist & execmask & (~p->p_sigignore | sigcantmask));
1712 proc_unlock(p);
1713 }
1714 #if CONFIG_DTRACE
1715 struct dtrace_ptss_page_entry *tmpptr = uth->t_dtrace_scratch;
1716 uth->t_dtrace_scratch = NULL;
1717 if (tmpptr != NULL && !task_is_exec_copy(task)) {
1718 dtrace_ptss_release_entry(p, tmpptr);
1719 }
1720 #endif
1721 }
1722 }
1723
1724 /* This routine releases the credential stored in uthread */
1725 void
1726 uthread_cred_free(void *uthread)
1727 {
1728 uthread_t uth = (uthread_t)uthread;
1729
1730 /* and free the uthread itself */
1731 if (IS_VALID_CRED(uth->uu_ucred)) {
1732 kauth_cred_t oldcred = uth->uu_ucred;
1733 uth->uu_ucred = NOCRED;
1734 kauth_cred_unref(&oldcred);
1735 }
1736 }
1737
1738 /* This routine frees the uthread structure held in thread structure */
1739 void
1740 uthread_zone_free(void *uthread)
1741 {
1742 uthread_t uth = (uthread_t)uthread;
1743
1744 if (uth->t_tombstone) {
1745 kfree(uth->t_tombstone, sizeof(struct doc_tombstone));
1746 uth->t_tombstone = NULL;
1747 }
1748
1749 lck_spin_destroy(&uth->uu_rethrottle_lock, &rethrottle_lock_grp);
1750
1751 uthread_cleanup_name(uthread);
1752 /* and free the uthread itself */
1753 zfree(uthread_zone, uthread);
1754 }