]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_fork.c
772c163554e40c0d21dcb1af64d85d883ce73dbe
[apple/xnu.git] / bsd / kern / kern_fork.c
1 /*
2 * Copyright (c) 2000-2007, 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_fork.c 8.8 (Berkeley) 2/14/95
67 */
68 /*
69 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74 /*
75 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
76 * support for mandatory and extensible security protections. This notice
77 * is included in support of clause 2.2 (b) of the Apple Public License,
78 * Version 2.0.
79 */
80
81 #include <kern/assert.h>
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/filedesc.h>
85 #include <sys/kernel.h>
86 #include <sys/malloc.h>
87 #include <sys/proc_internal.h>
88 #include <sys/kauth.h>
89 #include <sys/user.h>
90 #include <sys/reason.h>
91 #include <sys/resourcevar.h>
92 #include <sys/vnode_internal.h>
93 #include <sys/file_internal.h>
94 #include <sys/acct.h>
95 #include <sys/codesign.h>
96 #include <sys/sysproto.h>
97 #if CONFIG_PERSONAS
98 #include <sys/persona.h>
99 #endif
100 #include <sys/doc_tombstone.h>
101 #if CONFIG_DTRACE
102 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
103 extern void (*dtrace_proc_waitfor_exec_ptr)(proc_t);
104 extern void dtrace_proc_fork(proc_t, proc_t, int);
105
106 /*
107 * Since dtrace_proc_waitfor_exec_ptr can be added/removed in dtrace_subr.c,
108 * we will store its value before actually calling it.
109 */
110 static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL;
111
112 #include <sys/dtrace_ptss.h>
113 #endif
114
115 #include <security/audit/audit.h>
116
117 #include <mach/mach_types.h>
118 #include <kern/coalition.h>
119 #include <kern/kern_types.h>
120 #include <kern/kalloc.h>
121 #include <kern/mach_param.h>
122 #include <kern/task.h>
123 #include <kern/thread.h>
124 #include <kern/thread_call.h>
125 #include <kern/zalloc.h>
126
127 #include <os/log.h>
128
129 #include <os/log.h>
130
131 #if CONFIG_MACF
132 #include <security/mac_framework.h>
133 #include <security/mac_mach_internal.h>
134 #endif
135
136 #include <vm/vm_map.h>
137 #include <vm/vm_protos.h>
138 #include <vm/vm_shared_region.h>
139
140 #include <sys/shm_internal.h> /* for shmfork() */
141 #include <mach/task.h> /* for thread_create() */
142 #include <mach/thread_act.h> /* for thread_resume() */
143
144 #include <sys/sdt.h>
145
146 #if CONFIG_MEMORYSTATUS
147 #include <sys/kern_memorystatus.h>
148 #endif
149
150 /* XXX routines which should have Mach prototypes, but don't */
151 void thread_set_parent(thread_t parent, int pid);
152 extern void act_thread_catt(void *ctx);
153 void thread_set_child(thread_t child, int pid);
154 void *act_thread_csave(void);
155 extern boolean_t task_is_exec_copy(task_t);
156 int nextpidversion = 0;
157
158
159 thread_t cloneproc(task_t, coalition_t *, proc_t, int, int);
160 proc_t forkproc(proc_t);
161 void forkproc_free(proc_t);
162 thread_t fork_create_child(task_t parent_task,
163 coalition_t *parent_coalitions,
164 proc_t child,
165 int inherit_memory,
166 int is_64bit_addr,
167 int is_64bit_data,
168 int in_exec);
169 void proc_vfork_begin(proc_t parent_proc);
170 void proc_vfork_end(proc_t parent_proc);
171
172 #define DOFORK 0x1 /* fork() system call */
173 #define DOVFORK 0x2 /* vfork() system call */
174
175 /*
176 * proc_vfork_begin
177 *
178 * Description: start a vfork on a process
179 *
180 * Parameters: parent_proc process (re)entering vfork state
181 *
182 * Returns: (void)
183 *
184 * Notes: Although this function increments a count, a count in
185 * excess of 1 is not currently supported. According to the
186 * POSIX standard, calling anything other than execve() or
187 * _exit() following a vfork(), including calling vfork()
188 * itself again, will result in undefined behaviour
189 */
190 void
191 proc_vfork_begin(proc_t parent_proc)
192 {
193 proc_lock(parent_proc);
194 parent_proc->p_lflag |= P_LVFORK;
195 parent_proc->p_vforkcnt++;
196 proc_unlock(parent_proc);
197 }
198
199 /*
200 * proc_vfork_end
201 *
202 * Description: stop a vfork on a process
203 *
204 * Parameters: parent_proc process leaving vfork state
205 *
206 * Returns: (void)
207 *
208 * Notes: Decrements the count; currently, reentrancy of vfork()
209 * is unsupported on the current process
210 */
211 void
212 proc_vfork_end(proc_t parent_proc)
213 {
214 proc_lock(parent_proc);
215 parent_proc->p_vforkcnt--;
216 if (parent_proc->p_vforkcnt < 0) {
217 panic("vfork cnt is -ve");
218 }
219 if (parent_proc->p_vforkcnt == 0) {
220 parent_proc->p_lflag &= ~P_LVFORK;
221 }
222 proc_unlock(parent_proc);
223 }
224
225
226 /*
227 * vfork
228 *
229 * Description: vfork system call
230 *
231 * Parameters: void [no arguments]
232 *
233 * Retval: 0 (to child process)
234 * !0 pid of child (to parent process)
235 * -1 error (see "Returns:")
236 *
237 * Returns: EAGAIN Administrative limit reached
238 * EINVAL vfork() called during vfork()
239 * ENOMEM Failed to allocate new process
240 *
241 * Note: After a successful call to this function, the parent process
242 * has its task, thread, and uthread lent to the child process,
243 * and control is returned to the caller; if this function is
244 * invoked as a system call, the return is to user space, and
245 * is effectively running on the child process.
246 *
247 * Subsequent calls that operate on process state are permitted,
248 * though discouraged, and will operate on the child process; any
249 * operations on the task, thread, or uthread will result in
250 * changes in the parent state, and, if inheritable, the child
251 * state, when a task, thread, and uthread are realized for the
252 * child process at execve() time, will also be effected. Given
253 * this, it's recemmended that people use the posix_spawn() call
254 * instead.
255 *
256 * BLOCK DIAGRAM OF VFORK
257 *
258 * Before:
259 *
260 * ,----------------. ,-------------.
261 * | | task | |
262 * | parent_thread | ------> | parent_task |
263 * | | <.list. | |
264 * `----------------' `-------------'
265 * uthread | ^ bsd_info | ^
266 * v | vc_thread v | task
267 * ,----------------. ,-------------.
268 * | | | |
269 * | parent_uthread | <.list. | parent_proc | <-- current_proc()
270 * | | | |
271 * `----------------' `-------------'
272 * uu_proc |
273 * v
274 * NULL
275 *
276 * After:
277 *
278 * ,----------------. ,-------------.
279 * | | task | |
280 * ,----> | parent_thread | ------> | parent_task |
281 * | | | <.list. | |
282 * | `----------------' `-------------'
283 * | uthread | ^ bsd_info | ^
284 * | v | vc_thread v | task
285 * | ,----------------. ,-------------.
286 * | | | | |
287 * | | parent_uthread | <.list. | parent_proc |
288 * | | | | |
289 * | `----------------' `-------------'
290 * | uu_proc | . list
291 * | v v
292 * | ,----------------.
293 * `----- | |
294 * p_vforkact | child_proc | <-- current_proc()
295 * | |
296 * `----------------'
297 */
298 int
299 vfork(proc_t parent_proc, __unused struct vfork_args *uap, int32_t *retval)
300 {
301 thread_t child_thread;
302 int err;
303
304 if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_VFORK, NULL)) != 0) {
305 retval[1] = 0;
306 } else {
307 uthread_t ut = get_bsdthread_info(current_thread());
308 proc_t child_proc = ut->uu_proc;
309
310 retval[0] = child_proc->p_pid;
311 retval[1] = 1; /* flag child return for user space */
312
313 /*
314 * Drop the signal lock on the child which was taken on our
315 * behalf by forkproc()/cloneproc() to prevent signals being
316 * received by the child in a partially constructed state.
317 */
318 proc_signalend(child_proc, 0);
319 proc_transend(child_proc, 0);
320
321 proc_knote(parent_proc, NOTE_FORK | child_proc->p_pid);
322 DTRACE_PROC1(create, proc_t, child_proc);
323 ut->uu_flag &= ~UT_VFORKING;
324 }
325
326 return err;
327 }
328
329
330 /*
331 * fork1
332 *
333 * Description: common code used by all new process creation other than the
334 * bootstrap of the initial process on the system
335 *
336 * Parameters: parent_proc parent process of the process being
337 * child_threadp pointer to location to receive the
338 * Mach thread_t of the child process
339 * created
340 * kind kind of creation being requested
341 * coalitions if spawn, the set of coalitions the
342 * child process should join, or NULL to
343 * inherit the parent's. On non-spawns,
344 * this param is ignored and the child
345 * always inherits the parent's
346 * coalitions.
347 *
348 * Notes: Permissable values for 'kind':
349 *
350 * PROC_CREATE_FORK Create a complete process which will
351 * return actively running in both the
352 * parent and the child; the child copies
353 * the parent address space.
354 * PROC_CREATE_SPAWN Create a complete process which will
355 * return actively running in the parent
356 * only after returning actively running
357 * in the child; the child address space
358 * is newly created by an image activator,
359 * after which the child is run.
360 * PROC_CREATE_VFORK Creates a partial process which will
361 * borrow the parent task, thread, and
362 * uthread to return running in the child;
363 * the child address space and other parts
364 * are lazily created at execve() time, or
365 * the child is terminated, and the parent
366 * does not actively run until that
367 * happens.
368 *
369 * At first it may seem strange that we return the child thread
370 * address rather than process structure, since the process is
371 * the only part guaranteed to be "new"; however, since we do
372 * not actualy adjust other references between Mach and BSD (see
373 * the block diagram above the implementation of vfork()), this
374 * is the only method which guarantees us the ability to get
375 * back to the other information.
376 */
377 int
378 fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalitions)
379 {
380 thread_t parent_thread = (thread_t)current_thread();
381 uthread_t parent_uthread = (uthread_t)get_bsdthread_info(parent_thread);
382 proc_t child_proc = NULL; /* set in switch, but compiler... */
383 thread_t child_thread = NULL;
384 uid_t uid;
385 int count;
386 int err = 0;
387 int spawn = 0;
388
389 /*
390 * Although process entries are dynamically created, we still keep
391 * a global limit on the maximum number we will create. Don't allow
392 * a nonprivileged user to use the last process; don't let root
393 * exceed the limit. The variable nprocs is the current number of
394 * processes, maxproc is the limit.
395 */
396 uid = kauth_getruid();
397 proc_list_lock();
398 if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
399 #if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED
400 /*
401 * On the development kernel, panic so that the fact that we hit
402 * the process limit is obvious, as this may very well wedge the
403 * system.
404 */
405 panic("The process table is full; parent pid=%d", parent_proc->p_pid);
406 #endif
407 proc_list_unlock();
408 tablefull("proc");
409 return EAGAIN;
410 }
411 proc_list_unlock();
412
413 /*
414 * Increment the count of procs running with this uid. Don't allow
415 * a nonprivileged user to exceed their current limit, which is
416 * always less than what an rlim_t can hold.
417 * (locking protection is provided by list lock held in chgproccnt)
418 */
419 count = chgproccnt(uid, 1);
420 if (uid != 0 &&
421 (rlim_t)count > parent_proc->p_rlimit[RLIMIT_NPROC].rlim_cur) {
422 #if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED
423 /*
424 * On the development kernel, panic so that the fact that we hit
425 * the per user process limit is obvious. This may be less dire
426 * than hitting the global process limit, but we cannot rely on
427 * that.
428 */
429 panic("The per-user process limit has been hit; parent pid=%d, uid=%d", parent_proc->p_pid, uid);
430 #endif
431 err = EAGAIN;
432 goto bad;
433 }
434
435 #if CONFIG_MACF
436 /*
437 * Determine if MAC policies applied to the process will allow
438 * it to fork. This is an advisory-only check.
439 */
440 err = mac_proc_check_fork(parent_proc);
441 if (err != 0) {
442 goto bad;
443 }
444 #endif
445
446 switch (kind) {
447 case PROC_CREATE_VFORK:
448 /*
449 * Prevent a vfork while we are in vfork(); we should
450 * also likely preventing a fork here as well, and this
451 * check should then be outside the switch statement,
452 * since the proc struct contents will copy from the
453 * child and the tash/thread/uthread from the parent in
454 * that case. We do not support vfork() in vfork()
455 * because we don't have to; the same non-requirement
456 * is true of both fork() and posix_spawn() and any
457 * call other than execve() amd _exit(), but we've
458 * been historically lenient, so we continue to be so
459 * (for now).
460 *
461 * <rdar://6640521> Probably a source of random panics
462 */
463 if (parent_uthread->uu_flag & UT_VFORK) {
464 printf("fork1 called within vfork by %s\n", parent_proc->p_comm);
465 err = EINVAL;
466 goto bad;
467 }
468
469 /*
470 * Flag us in progress; if we chose to support vfork() in
471 * vfork(), we would chain our parent at this point (in
472 * effect, a stack push). We don't, since we actually want
473 * to disallow everything not specified in the standard
474 */
475 proc_vfork_begin(parent_proc);
476
477 /* The newly created process comes with signal lock held */
478 if ((child_proc = forkproc(parent_proc)) == NULL) {
479 /* Failed to allocate new process */
480 proc_vfork_end(parent_proc);
481 err = ENOMEM;
482 goto bad;
483 }
484
485 // XXX BEGIN: wants to move to be common code (and safe)
486 #if CONFIG_MACF
487 /*
488 * allow policies to associate the credential/label that
489 * we referenced from the parent ... with the child
490 * JMM - this really isn't safe, as we can drop that
491 * association without informing the policy in other
492 * situations (keep long enough to get policies changed)
493 */
494 mac_cred_label_associate_fork(child_proc->p_ucred, child_proc);
495 #endif
496
497 /*
498 * Propogate change of PID - may get new cred if auditing.
499 *
500 * NOTE: This has no effect in the vfork case, since
501 * child_proc->task != current_task(), but we duplicate it
502 * because this is probably, ultimately, wrong, since we
503 * will be running in the "child" which is the parent task
504 * with the wrong token until we get to the execve() or
505 * _exit() call; a lot of "undefined" can happen before
506 * that.
507 *
508 * <rdar://6640530> disallow everything but exeve()/_exit()?
509 */
510 set_security_token(child_proc);
511
512 AUDIT_ARG(pid, child_proc->p_pid);
513
514 // XXX END: wants to move to be common code (and safe)
515
516 /*
517 * BORROW PARENT TASK, THREAD, UTHREAD FOR CHILD
518 *
519 * Note: this is where we would "push" state instead of setting
520 * it for nested vfork() support (see proc_vfork_end() for
521 * description if issues here).
522 */
523 child_proc->task = parent_proc->task;
524
525 child_proc->p_lflag |= P_LINVFORK;
526 child_proc->p_vforkact = parent_thread;
527 child_proc->p_stat = SRUN;
528
529 /*
530 * Until UT_VFORKING is cleared at the end of the vfork
531 * syscall, the process identity of this thread is slightly
532 * murky.
533 *
534 * As long as UT_VFORK and it's associated field (uu_proc)
535 * is set, current_proc() will always return the child process.
536 *
537 * However dtrace_proc_selfpid() returns the parent pid to
538 * ensure that e.g. the proc:::create probe actions accrue
539 * to the parent. (Otherwise the child magically seems to
540 * have created itself!)
541 */
542 parent_uthread->uu_flag |= UT_VFORK | UT_VFORKING;
543 parent_uthread->uu_proc = child_proc;
544 parent_uthread->uu_userstate = (void *)act_thread_csave();
545 parent_uthread->uu_vforkmask = parent_uthread->uu_sigmask;
546
547 /* temporarily drop thread-set-id state */
548 if (parent_uthread->uu_flag & UT_SETUID) {
549 parent_uthread->uu_flag |= UT_WASSETUID;
550 parent_uthread->uu_flag &= ~UT_SETUID;
551 }
552
553 /* blow thread state information */
554 /* XXX is this actually necessary, given syscall return? */
555 thread_set_child(parent_thread, child_proc->p_pid);
556
557 child_proc->p_acflag = AFORK; /* forked but not exec'ed */
558
559 /*
560 * Preserve synchronization semantics of vfork. If
561 * waiting for child to exec or exit, set P_PPWAIT
562 * on child, and sleep on our proc (in case of exit).
563 */
564 child_proc->p_lflag |= P_LPPWAIT;
565 pinsertchild(parent_proc, child_proc); /* set visible */
566
567 break;
568
569 case PROC_CREATE_SPAWN:
570 /*
571 * A spawned process differs from a forked process in that
572 * the spawned process does not carry around the parents
573 * baggage with regard to address space copying, dtrace,
574 * and so on.
575 */
576 spawn = 1;
577
578 /* FALLSTHROUGH */
579
580 case PROC_CREATE_FORK:
581 /*
582 * When we clone the parent process, we are going to inherit
583 * its task attributes and memory, since when we fork, we
584 * will, in effect, create a duplicate of it, with only minor
585 * differences. Contrarily, spawned processes do not inherit.
586 */
587 if ((child_thread = cloneproc(parent_proc->task,
588 spawn ? coalitions : NULL,
589 parent_proc,
590 spawn ? FALSE : TRUE,
591 FALSE)) == NULL) {
592 /* Failed to create thread */
593 err = EAGAIN;
594 goto bad;
595 }
596
597 /* copy current thread state into the child thread (only for fork) */
598 if (!spawn) {
599 thread_dup(child_thread);
600 }
601
602 /* child_proc = child_thread->task->proc; */
603 child_proc = (proc_t)(get_bsdtask_info(get_threadtask(child_thread)));
604
605 // XXX BEGIN: wants to move to be common code (and safe)
606 #if CONFIG_MACF
607 /*
608 * allow policies to associate the credential/label that
609 * we referenced from the parent ... with the child
610 * JMM - this really isn't safe, as we can drop that
611 * association without informing the policy in other
612 * situations (keep long enough to get policies changed)
613 */
614 mac_cred_label_associate_fork(child_proc->p_ucred, child_proc);
615 #endif
616
617 /*
618 * Propogate change of PID - may get new cred if auditing.
619 *
620 * NOTE: This has no effect in the vfork case, since
621 * child_proc->task != current_task(), but we duplicate it
622 * because this is probably, ultimately, wrong, since we
623 * will be running in the "child" which is the parent task
624 * with the wrong token until we get to the execve() or
625 * _exit() call; a lot of "undefined" can happen before
626 * that.
627 *
628 * <rdar://6640530> disallow everything but exeve()/_exit()?
629 */
630 set_security_token(child_proc);
631
632 AUDIT_ARG(pid, child_proc->p_pid);
633
634 // XXX END: wants to move to be common code (and safe)
635
636 /*
637 * Blow thread state information; this is what gives the child
638 * process its "return" value from a fork() call.
639 *
640 * Note: this should probably move to fork() proper, since it
641 * is not relevent to spawn, and the value won't matter
642 * until we resume the child there. If you are in here
643 * refactoring code, consider doing this at the same time.
644 */
645 thread_set_child(child_thread, child_proc->p_pid);
646
647 child_proc->p_acflag = AFORK; /* forked but not exec'ed */
648
649 #if CONFIG_DTRACE
650 dtrace_proc_fork(parent_proc, child_proc, spawn);
651 #endif /* CONFIG_DTRACE */
652 if (!spawn) {
653 /*
654 * Of note, we need to initialize the bank context behind
655 * the protection of the proc_trans lock to prevent a race with exit.
656 */
657 task_bank_init(get_threadtask(child_thread));
658 }
659
660 break;
661
662 default:
663 panic("fork1 called with unknown kind %d", kind);
664 break;
665 }
666
667
668 /* return the thread pointer to the caller */
669 *child_threadp = child_thread;
670
671 bad:
672 /*
673 * In the error case, we return a 0 value for the returned pid (but
674 * it is ignored in the trampoline due to the error return); this
675 * is probably not necessary.
676 */
677 if (err) {
678 (void)chgproccnt(uid, -1);
679 }
680
681 return err;
682 }
683
684
685 /*
686 * vfork_return
687 *
688 * Description: "Return" to parent vfork thread() following execve/_exit;
689 * this is done by reassociating the parent process structure
690 * with the task, thread, and uthread.
691 *
692 * Refer to the ASCII art above vfork() to figure out the
693 * state we're undoing.
694 *
695 * Parameters: child_proc Child process
696 * retval System call return value array
697 * rval Return value to present to parent
698 *
699 * Returns: void
700 *
701 * Notes: The caller resumes or exits the parent, as appropriate, after
702 * calling this function.
703 */
704 void
705 vfork_return(proc_t child_proc, int32_t *retval, int rval)
706 {
707 task_t parent_task = get_threadtask(child_proc->p_vforkact);
708 proc_t parent_proc = get_bsdtask_info(parent_task);
709 thread_t th = current_thread();
710 uthread_t uth = get_bsdthread_info(th);
711
712 act_thread_catt(uth->uu_userstate);
713
714 /* clear vfork state in parent proc structure */
715 proc_vfork_end(parent_proc);
716
717 /* REPATRIATE PARENT TASK, THREAD, UTHREAD */
718 uth->uu_userstate = 0;
719 uth->uu_flag &= ~UT_VFORK;
720 /* restore thread-set-id state */
721 if (uth->uu_flag & UT_WASSETUID) {
722 uth->uu_flag |= UT_SETUID;
723 uth->uu_flag &= UT_WASSETUID;
724 }
725 uth->uu_proc = 0;
726 uth->uu_sigmask = uth->uu_vforkmask;
727
728 proc_lock(child_proc);
729 child_proc->p_lflag &= ~P_LINVFORK;
730 child_proc->p_vforkact = 0;
731 proc_unlock(child_proc);
732
733 thread_set_parent(th, rval);
734
735 if (retval) {
736 retval[0] = rval;
737 retval[1] = 0; /* mark parent */
738 }
739 }
740
741
742 /*
743 * fork_create_child
744 *
745 * Description: Common operations associated with the creation of a child
746 * process
747 *
748 * Parameters: parent_task parent task
749 * parent_coalitions parent's set of coalitions
750 * child_proc child process
751 * inherit_memory TRUE, if the parents address space is
752 * to be inherited by the child
753 * is_64bit_addr TRUE, if the child being created will
754 * be associated with a 64 bit address space
755 * is_64bit_data TRUE if the child being created will use a
756 * 64-bit register state
757 * in_exec TRUE, if called from execve or posix spawn set exec
758 * FALSE, if called from fork or vfexec
759 *
760 * Note: This code is called in the fork() case, from the execve() call
761 * graph, if implementing an execve() following a vfork(), from
762 * the posix_spawn() call graph (which implicitly includes a
763 * vfork() equivalent call, and in the system bootstrap case.
764 *
765 * It creates a new task and thread (and as a side effect of the
766 * thread creation, a uthread) in the parent coalition set, which is
767 * then associated with the process 'child'. If the parent
768 * process address space is to be inherited, then a flag
769 * indicates that the newly created task should inherit this from
770 * the child task.
771 *
772 * As a special concession to bootstrapping the initial process
773 * in the system, it's possible for 'parent_task' to be TASK_NULL;
774 * in this case, 'inherit_memory' MUST be FALSE.
775 */
776 thread_t
777 fork_create_child(task_t parent_task,
778 coalition_t *parent_coalitions,
779 proc_t child_proc,
780 int inherit_memory,
781 int is_64bit_addr,
782 int is_64bit_data,
783 int in_exec)
784 {
785 thread_t child_thread = NULL;
786 task_t child_task;
787 kern_return_t result;
788
789 /* Create a new task for the child process */
790 result = task_create_internal(parent_task,
791 parent_coalitions,
792 inherit_memory,
793 is_64bit_addr,
794 is_64bit_data,
795 TF_LRETURNWAIT | TF_LRETURNWAITER, /* All created threads will wait in task_wait_to_return */
796 in_exec ? TPF_EXEC_COPY : TPF_NONE, /* Mark the task exec copy if in execve */
797 &child_task);
798 if (result != KERN_SUCCESS) {
799 printf("%s: task_create_internal failed. Code: %d\n",
800 __func__, result);
801 goto bad;
802 }
803
804 if (!in_exec) {
805 /*
806 * Set the child process task to the new task if not in exec,
807 * will set the task for exec case in proc_exec_switch_task after image activation.
808 */
809 child_proc->task = child_task;
810 }
811
812 /* Set child task process to child proc */
813 set_bsdtask_info(child_task, child_proc);
814
815 /* Propagate CPU limit timer from parent */
816 if (timerisset(&child_proc->p_rlim_cpu)) {
817 task_vtimer_set(child_task, TASK_VTIMER_RLIM);
818 }
819
820 /*
821 * Set child process BSD visible scheduler priority if nice value
822 * inherited from parent
823 */
824 if (child_proc->p_nice != 0) {
825 resetpriority(child_proc);
826 }
827
828 /*
829 * Create a new thread for the child process
830 * The new thread is waiting on the event triggered by 'task_clear_return_wait'
831 */
832 result = thread_create_waiting(child_task,
833 (thread_continue_t)task_wait_to_return,
834 task_get_return_wait_event(child_task),
835 &child_thread);
836
837 if (result != KERN_SUCCESS) {
838 printf("%s: thread_create failed. Code: %d\n",
839 __func__, result);
840 task_deallocate(child_task);
841 child_task = NULL;
842 }
843
844 /*
845 * Tag thread as being the first thread in its task.
846 */
847 thread_set_tag(child_thread, THREAD_TAG_MAINTHREAD);
848
849 bad:
850 thread_yield_internal(1);
851
852 return child_thread;
853 }
854
855
856 /*
857 * fork
858 *
859 * Description: fork system call.
860 *
861 * Parameters: parent Parent process to fork
862 * uap (void) [unused]
863 * retval Return value
864 *
865 * Returns: 0 Success
866 * EAGAIN Resource unavailable, try again
867 *
868 * Notes: Attempts to create a new child process which inherits state
869 * from the parent process. If successful, the call returns
870 * having created an initially suspended child process with an
871 * extra Mach task and thread reference, for which the thread
872 * is initially suspended. Until we resume the child process,
873 * it is not yet running.
874 *
875 * The return information to the child is contained in the
876 * thread state structure of the new child, and does not
877 * become visible to the child through a normal return process,
878 * since it never made the call into the kernel itself in the
879 * first place.
880 *
881 * After resuming the thread, this function returns directly to
882 * the parent process which invoked the fork() system call.
883 *
884 * Important: The child thread_resume occurs before the parent returns;
885 * depending on scheduling latency, this means that it is not
886 * deterministic as to whether the parent or child is scheduled
887 * to run first. It is entirely possible that the child could
888 * run to completion prior to the parent running.
889 */
890 int
891 fork(proc_t parent_proc, __unused struct fork_args *uap, int32_t *retval)
892 {
893 thread_t child_thread;
894 int err;
895
896 retval[1] = 0; /* flag parent return for user space */
897
898 if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_FORK, NULL)) == 0) {
899 task_t child_task;
900 proc_t child_proc;
901
902 /* Return to the parent */
903 child_proc = (proc_t)get_bsdthreadtask_info(child_thread);
904 retval[0] = child_proc->p_pid;
905
906 /*
907 * Drop the signal lock on the child which was taken on our
908 * behalf by forkproc()/cloneproc() to prevent signals being
909 * received by the child in a partially constructed state.
910 */
911 proc_signalend(child_proc, 0);
912 proc_transend(child_proc, 0);
913
914 /* flag the fork has occurred */
915 proc_knote(parent_proc, NOTE_FORK | child_proc->p_pid);
916 DTRACE_PROC1(create, proc_t, child_proc);
917
918 #if CONFIG_DTRACE
919 if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL) {
920 (*dtrace_proc_waitfor_hook)(child_proc);
921 }
922 #endif
923
924 /* "Return" to the child */
925 task_clear_return_wait(get_threadtask(child_thread));
926
927 /* drop the extra references we got during the creation */
928 if ((child_task = (task_t)get_threadtask(child_thread)) != NULL) {
929 task_deallocate(child_task);
930 }
931 thread_deallocate(child_thread);
932 }
933
934 return err;
935 }
936
937
938 /*
939 * cloneproc
940 *
941 * Description: Create a new process from a specified process.
942 *
943 * Parameters: parent_task The parent task to be cloned, or
944 * TASK_NULL is task characteristics
945 * are not to be inherited
946 * be cloned, or TASK_NULL if the new
947 * task is not to inherit the VM
948 * characteristics of the parent
949 * parent_proc The parent process to be cloned
950 * inherit_memory True if the child is to inherit
951 * memory from the parent; if this is
952 * non-NULL, then the parent_task must
953 * also be non-NULL
954 * memstat_internal Whether to track the process in the
955 * jetsam priority list (if configured)
956 *
957 * Returns: !NULL pointer to new child thread
958 * NULL Failure (unspecified)
959 *
960 * Note: On return newly created child process has signal lock held
961 * to block delivery of signal to it if called with lock set.
962 * fork() code needs to explicity remove this lock before
963 * signals can be delivered
964 *
965 * In the case of bootstrap, this function can be called from
966 * bsd_utaskbootstrap() in order to bootstrap the first process;
967 * the net effect is to provide a uthread structure for the
968 * kernel process associated with the kernel task.
969 *
970 * XXX: Tristating using the value parent_task as the major key
971 * and inherit_memory as the minor key is something we should
972 * refactor later; we owe the current semantics, ultimately,
973 * to the semantics of task_create_internal. For now, we will
974 * live with this being somewhat awkward.
975 */
976 thread_t
977 cloneproc(task_t parent_task, coalition_t *parent_coalitions, proc_t parent_proc, int inherit_memory, int memstat_internal)
978 {
979 #if !CONFIG_MEMORYSTATUS
980 #pragma unused(memstat_internal)
981 #endif
982 task_t child_task;
983 proc_t child_proc;
984 thread_t child_thread = NULL;
985
986 if ((child_proc = forkproc(parent_proc)) == NULL) {
987 /* Failed to allocate new process */
988 goto bad;
989 }
990
991 /*
992 * In the case where the parent_task is TASK_NULL (during the init path)
993 * we make the assumption that the register size will be the same as the
994 * address space size since there's no way to determine the possible
995 * register size until an image is exec'd.
996 *
997 * The only architecture that has different address space and register sizes
998 * (arm64_32) isn't being used within kernel-space, so the above assumption
999 * always holds true for the init path.
1000 */
1001 const int parent_64bit_addr = parent_proc->p_flag & P_LP64;
1002 const int parent_64bit_data = (parent_task == TASK_NULL) ? parent_64bit_addr : task_get_64bit_data(parent_task);
1003
1004 child_thread = fork_create_child(parent_task,
1005 parent_coalitions,
1006 child_proc,
1007 inherit_memory,
1008 parent_64bit_addr,
1009 parent_64bit_data,
1010 FALSE);
1011
1012 if (child_thread == NULL) {
1013 /*
1014 * Failed to create thread; now we must deconstruct the new
1015 * process previously obtained from forkproc().
1016 */
1017 forkproc_free(child_proc);
1018 goto bad;
1019 }
1020
1021 child_task = get_threadtask(child_thread);
1022 if (parent_64bit_addr) {
1023 OSBitOrAtomic(P_LP64, (UInt32 *)&child_proc->p_flag);
1024 } else {
1025 OSBitAndAtomic(~((uint32_t)P_LP64), (UInt32 *)&child_proc->p_flag);
1026 }
1027
1028 #if CONFIG_MEMORYSTATUS
1029 if (memstat_internal) {
1030 proc_list_lock();
1031 child_proc->p_memstat_state |= P_MEMSTAT_INTERNAL;
1032 proc_list_unlock();
1033 }
1034 #endif
1035
1036 /* make child visible */
1037 pinsertchild(parent_proc, child_proc);
1038
1039 /*
1040 * Make child runnable, set start time.
1041 */
1042 child_proc->p_stat = SRUN;
1043 bad:
1044 return child_thread;
1045 }
1046
1047
1048 /*
1049 * Destroy a process structure that resulted from a call to forkproc(), but
1050 * which must be returned to the system because of a subsequent failure
1051 * preventing it from becoming active.
1052 *
1053 * Parameters: p The incomplete process from forkproc()
1054 *
1055 * Returns: (void)
1056 *
1057 * Note: This function should only be used in an error handler following
1058 * a call to forkproc().
1059 *
1060 * Operations occur in reverse order of those in forkproc().
1061 */
1062 void
1063 forkproc_free(proc_t p)
1064 {
1065 #if CONFIG_PERSONAS
1066 persona_proc_drop(p);
1067 #endif /* CONFIG_PERSONAS */
1068
1069 #if PSYNCH
1070 pth_proc_hashdelete(p);
1071 #endif /* PSYNCH */
1072
1073 /* We held signal and a transition locks; drop them */
1074 proc_signalend(p, 0);
1075 proc_transend(p, 0);
1076
1077 /*
1078 * If we have our own copy of the resource limits structure, we
1079 * need to free it. If it's a shared copy, we need to drop our
1080 * reference on it.
1081 */
1082 proc_limitdrop(p, 0);
1083 p->p_limit = NULL;
1084
1085 #if SYSV_SHM
1086 /* Need to drop references to the shared memory segment(s), if any */
1087 if (p->vm_shm) {
1088 /*
1089 * Use shmexec(): we have no address space, so no mappings
1090 *
1091 * XXX Yes, the routine is badly named.
1092 */
1093 shmexec(p);
1094 }
1095 #endif
1096
1097 /* Need to undo the effects of the fdcopy(), if any */
1098 fdfree(p);
1099
1100 /*
1101 * Drop the reference on a text vnode pointer, if any
1102 * XXX This code is broken in forkproc(); see <rdar://4256419>;
1103 * XXX if anyone ever uses this field, we will be extremely unhappy.
1104 */
1105 if (p->p_textvp) {
1106 vnode_rele(p->p_textvp);
1107 p->p_textvp = NULL;
1108 }
1109
1110 /* Stop the profiling clock */
1111 stopprofclock(p);
1112
1113 /* Update the audit session proc count */
1114 AUDIT_SESSION_PROCEXIT(p);
1115
1116 lck_mtx_destroy(&p->p_mlock, proc_mlock_grp);
1117 lck_mtx_destroy(&p->p_fdmlock, proc_fdmlock_grp);
1118 lck_mtx_destroy(&p->p_ucred_mlock, proc_ucred_mlock_grp);
1119 #if CONFIG_DTRACE
1120 lck_mtx_destroy(&p->p_dtrace_sprlock, proc_lck_grp);
1121 #endif
1122 lck_spin_destroy(&p->p_slock, proc_slock_grp);
1123
1124 /* Release the credential reference */
1125 kauth_cred_unref(&p->p_ucred);
1126
1127 proc_list_lock();
1128 /* Decrement the count of processes in the system */
1129 nprocs--;
1130
1131 /* Take it out of process hash */
1132 LIST_REMOVE(p, p_hash);
1133
1134 proc_list_unlock();
1135
1136 thread_call_free(p->p_rcall);
1137
1138 /* Free allocated memory */
1139 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
1140 p->p_sigacts = NULL;
1141 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
1142 p->p_stats = NULL;
1143
1144 proc_checkdeadrefs(p);
1145 FREE_ZONE(p, sizeof *p, M_PROC);
1146 }
1147
1148
1149 /*
1150 * forkproc
1151 *
1152 * Description: Create a new process structure, given a parent process
1153 * structure.
1154 *
1155 * Parameters: parent_proc The parent process
1156 *
1157 * Returns: !NULL The new process structure
1158 * NULL Error (insufficient free memory)
1159 *
1160 * Note: When successful, the newly created process structure is
1161 * partially initialized; if a caller needs to deconstruct the
1162 * returned structure, they must call forkproc_free() to do so.
1163 */
1164 proc_t
1165 forkproc(proc_t parent_proc)
1166 {
1167 proc_t child_proc; /* Our new process */
1168 static int nextpid = 0, pidwrap = 0;
1169 static uint64_t nextuniqueid = 0;
1170 int error = 0;
1171 struct session *sessp;
1172 uthread_t parent_uthread = (uthread_t)get_bsdthread_info(current_thread());
1173
1174 MALLOC_ZONE(child_proc, proc_t, sizeof *child_proc, M_PROC, M_WAITOK);
1175 if (child_proc == NULL) {
1176 printf("forkproc: M_PROC zone exhausted\n");
1177 goto bad;
1178 }
1179 /* zero it out as we need to insert in hash */
1180 bzero(child_proc, sizeof *child_proc);
1181
1182 MALLOC_ZONE(child_proc->p_stats, struct pstats *,
1183 sizeof *child_proc->p_stats, M_PSTATS, M_WAITOK);
1184 if (child_proc->p_stats == NULL) {
1185 printf("forkproc: M_SUBPROC zone exhausted (p_stats)\n");
1186 FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
1187 child_proc = NULL;
1188 goto bad;
1189 }
1190 MALLOC_ZONE(child_proc->p_sigacts, struct sigacts *,
1191 sizeof *child_proc->p_sigacts, M_SIGACTS, M_WAITOK);
1192 if (child_proc->p_sigacts == NULL) {
1193 printf("forkproc: M_SUBPROC zone exhausted (p_sigacts)\n");
1194 FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS);
1195 child_proc->p_stats = NULL;
1196 FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
1197 child_proc = NULL;
1198 goto bad;
1199 }
1200
1201 /* allocate a callout for use by interval timers */
1202 child_proc->p_rcall = thread_call_allocate((thread_call_func_t)realitexpire, child_proc);
1203 if (child_proc->p_rcall == NULL) {
1204 FREE_ZONE(child_proc->p_sigacts, sizeof *child_proc->p_sigacts, M_SIGACTS);
1205 child_proc->p_sigacts = NULL;
1206 FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS);
1207 child_proc->p_stats = NULL;
1208 FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
1209 child_proc = NULL;
1210 goto bad;
1211 }
1212
1213
1214 /*
1215 * Find an unused PID.
1216 */
1217
1218 proc_list_lock();
1219
1220 nextpid++;
1221 retry:
1222 /*
1223 * If the process ID prototype has wrapped around,
1224 * restart somewhat above 0, as the low-numbered procs
1225 * tend to include daemons that don't exit.
1226 */
1227 if (nextpid >= PID_MAX) {
1228 nextpid = 100;
1229 pidwrap = 1;
1230 }
1231 if (pidwrap != 0) {
1232 /* if the pid stays in hash both for zombie and runniing state */
1233 if (pfind_locked(nextpid) != PROC_NULL) {
1234 nextpid++;
1235 goto retry;
1236 }
1237
1238 if (pgfind_internal(nextpid) != PGRP_NULL) {
1239 nextpid++;
1240 goto retry;
1241 }
1242 if (session_find_internal(nextpid) != SESSION_NULL) {
1243 nextpid++;
1244 goto retry;
1245 }
1246 }
1247 nprocs++;
1248 child_proc->p_pid = nextpid;
1249 child_proc->p_responsible_pid = nextpid; /* initially responsible for self */
1250 child_proc->p_idversion = OSIncrementAtomic(&nextpidversion);
1251 /* kernel process is handcrafted and not from fork, so start from 1 */
1252 child_proc->p_uniqueid = ++nextuniqueid;
1253 #if 1
1254 if (child_proc->p_pid != 0) {
1255 if (pfind_locked(child_proc->p_pid) != PROC_NULL) {
1256 panic("proc in the list already\n");
1257 }
1258 }
1259 #endif
1260 /* Insert in the hash */
1261 child_proc->p_listflag |= (P_LIST_INHASH | P_LIST_INCREATE);
1262 LIST_INSERT_HEAD(PIDHASH(child_proc->p_pid), child_proc, p_hash);
1263 proc_list_unlock();
1264
1265 if (child_proc->p_uniqueid == startup_serial_num_procs) {
1266 /*
1267 * Turn off startup serial logging now that we have reached
1268 * the defined number of startup processes.
1269 */
1270 startup_serial_logging_active = false;
1271 }
1272
1273 /*
1274 * We've identified the PID we are going to use; initialize the new
1275 * process structure.
1276 */
1277 child_proc->p_stat = SIDL;
1278 child_proc->p_pgrpid = PGRPID_DEAD;
1279
1280 /*
1281 * The zero'ing of the proc was at the allocation time due to need
1282 * for insertion to hash. Copy the section that is to be copied
1283 * directly from the parent.
1284 */
1285 bcopy(&parent_proc->p_startcopy, &child_proc->p_startcopy,
1286 (unsigned) ((caddr_t)&child_proc->p_endcopy - (caddr_t)&child_proc->p_startcopy));
1287
1288 /*
1289 * Some flags are inherited from the parent.
1290 * Duplicate sub-structures as needed.
1291 * Increase reference counts on shared objects.
1292 * The p_stats and p_sigacts substructs are set in vm_fork.
1293 */
1294 #if !CONFIG_EMBEDDED
1295 child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_DELAYIDLESLEEP | P_SUGID));
1296 #else /* !CONFIG_EMBEDDED */
1297 child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_SUGID));
1298 #endif /* !CONFIG_EMBEDDED */
1299 if (parent_proc->p_flag & P_PROFIL) {
1300 startprofclock(child_proc);
1301 }
1302
1303 child_proc->p_vfs_iopolicy = (parent_proc->p_vfs_iopolicy & (P_VFS_IOPOLICY_VALID_MASK));
1304
1305 /*
1306 * Note that if the current thread has an assumed identity, this
1307 * credential will be granted to the new process.
1308 */
1309 child_proc->p_ucred = kauth_cred_get_with_ref();
1310 /* update cred on proc */
1311 PROC_UPDATE_CREDS_ONPROC(child_proc);
1312 /* update audit session proc count */
1313 AUDIT_SESSION_PROCNEW(child_proc);
1314
1315 lck_mtx_init(&child_proc->p_mlock, proc_mlock_grp, proc_lck_attr);
1316 lck_mtx_init(&child_proc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr);
1317 lck_mtx_init(&child_proc->p_ucred_mlock, proc_ucred_mlock_grp, proc_lck_attr);
1318 #if CONFIG_DTRACE
1319 lck_mtx_init(&child_proc->p_dtrace_sprlock, proc_lck_grp, proc_lck_attr);
1320 #endif
1321 lck_spin_init(&child_proc->p_slock, proc_slock_grp, proc_lck_attr);
1322
1323 klist_init(&child_proc->p_klist);
1324
1325 if (child_proc->p_textvp != NULLVP) {
1326 /* bump references to the text vnode */
1327 /* Need to hold iocount across the ref call */
1328 if (vnode_getwithref(child_proc->p_textvp) == 0) {
1329 error = vnode_ref(child_proc->p_textvp);
1330 vnode_put(child_proc->p_textvp);
1331 if (error != 0) {
1332 child_proc->p_textvp = NULLVP;
1333 }
1334 }
1335 }
1336
1337 /*
1338 * Copy the parents per process open file table to the child; if
1339 * there is a per-thread current working directory, set the childs
1340 * per-process current working directory to that instead of the
1341 * parents.
1342 *
1343 * XXX may fail to copy descriptors to child
1344 */
1345 child_proc->p_fd = fdcopy(parent_proc, parent_uthread->uu_cdir);
1346
1347 #if SYSV_SHM
1348 if (parent_proc->vm_shm) {
1349 /* XXX may fail to attach shm to child */
1350 (void)shmfork(parent_proc, child_proc);
1351 }
1352 #endif
1353 /*
1354 * inherit the limit structure to child
1355 */
1356 proc_limitfork(parent_proc, child_proc);
1357
1358 if (child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1359 uint64_t rlim_cur = child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur;
1360 child_proc->p_rlim_cpu.tv_sec = (rlim_cur > __INT_MAX__) ? __INT_MAX__ : rlim_cur;
1361 }
1362
1363 /* Intialize new process stats, including start time */
1364 /* <rdar://6640543> non-zeroed portion contains garbage AFAICT */
1365 bzero(child_proc->p_stats, sizeof(*child_proc->p_stats));
1366 microtime_with_abstime(&child_proc->p_start, &child_proc->p_stats->ps_start);
1367
1368 if (parent_proc->p_sigacts != NULL) {
1369 (void)memcpy(child_proc->p_sigacts,
1370 parent_proc->p_sigacts, sizeof *child_proc->p_sigacts);
1371 } else {
1372 (void)memset(child_proc->p_sigacts, 0, sizeof *child_proc->p_sigacts);
1373 }
1374
1375 sessp = proc_session(parent_proc);
1376 if (sessp->s_ttyvp != NULL && parent_proc->p_flag & P_CONTROLT) {
1377 OSBitOrAtomic(P_CONTROLT, &child_proc->p_flag);
1378 }
1379 session_rele(sessp);
1380
1381 /*
1382 * block all signals to reach the process.
1383 * no transition race should be occuring with the child yet,
1384 * but indicate that the process is in (the creation) transition.
1385 */
1386 proc_signalstart(child_proc, 0);
1387 proc_transstart(child_proc, 0, 0);
1388
1389 child_proc->p_pcaction = 0;
1390
1391 TAILQ_INIT(&child_proc->p_uthlist);
1392 TAILQ_INIT(&child_proc->p_aio_activeq);
1393 TAILQ_INIT(&child_proc->p_aio_doneq);
1394
1395 /* Inherit the parent flags for code sign */
1396 child_proc->p_csflags = (parent_proc->p_csflags & ~CS_KILLED);
1397
1398 /*
1399 * Copy work queue information
1400 *
1401 * Note: This should probably only happen in the case where we are
1402 * creating a child that is a copy of the parent; since this
1403 * routine is called in the non-duplication case of vfork()
1404 * or posix_spawn(), then this information should likely not
1405 * be duplicated.
1406 *
1407 * <rdar://6640553> Work queue pointers that no longer point to code
1408 */
1409 child_proc->p_wqthread = parent_proc->p_wqthread;
1410 child_proc->p_threadstart = parent_proc->p_threadstart;
1411 child_proc->p_pthsize = parent_proc->p_pthsize;
1412 if ((parent_proc->p_lflag & P_LREGISTER) != 0) {
1413 child_proc->p_lflag |= P_LREGISTER;
1414 }
1415 child_proc->p_dispatchqueue_offset = parent_proc->p_dispatchqueue_offset;
1416 child_proc->p_dispatchqueue_serialno_offset = parent_proc->p_dispatchqueue_serialno_offset;
1417 child_proc->p_return_to_kernel_offset = parent_proc->p_return_to_kernel_offset;
1418 child_proc->p_mach_thread_self_offset = parent_proc->p_mach_thread_self_offset;
1419 child_proc->p_pth_tsd_offset = parent_proc->p_pth_tsd_offset;
1420 #if PSYNCH
1421 pth_proc_hashinit(child_proc);
1422 #endif /* PSYNCH */
1423
1424 #if CONFIG_PERSONAS
1425 child_proc->p_persona = NULL;
1426 error = persona_proc_inherit(child_proc, parent_proc);
1427 if (error != 0) {
1428 printf("forkproc: persona_proc_inherit failed (persona %d being destroyed?)\n", persona_get_uid(parent_proc->p_persona));
1429 forkproc_free(child_proc);
1430 child_proc = NULL;
1431 goto bad;
1432 }
1433 #endif
1434
1435 #if CONFIG_MEMORYSTATUS
1436 /* Memorystatus init */
1437 child_proc->p_memstat_state = 0;
1438 child_proc->p_memstat_effectivepriority = JETSAM_PRIORITY_DEFAULT;
1439 child_proc->p_memstat_requestedpriority = JETSAM_PRIORITY_DEFAULT;
1440 child_proc->p_memstat_userdata = 0;
1441 child_proc->p_memstat_idle_start = 0;
1442 child_proc->p_memstat_idle_delta = 0;
1443 child_proc->p_memstat_memlimit = 0;
1444 child_proc->p_memstat_memlimit_active = 0;
1445 child_proc->p_memstat_memlimit_inactive = 0;
1446 #if CONFIG_FREEZE
1447 child_proc->p_memstat_freeze_sharedanon_pages = 0;
1448 #endif
1449 child_proc->p_memstat_dirty = 0;
1450 child_proc->p_memstat_idledeadline = 0;
1451 #endif /* CONFIG_MEMORYSTATUS */
1452
1453 bad:
1454 return child_proc;
1455 }
1456
1457 void
1458 proc_lock(proc_t p)
1459 {
1460 LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
1461 lck_mtx_lock(&p->p_mlock);
1462 }
1463
1464 void
1465 proc_unlock(proc_t p)
1466 {
1467 lck_mtx_unlock(&p->p_mlock);
1468 }
1469
1470 void
1471 proc_spinlock(proc_t p)
1472 {
1473 lck_spin_lock_grp(&p->p_slock, proc_slock_grp);
1474 }
1475
1476 void
1477 proc_spinunlock(proc_t p)
1478 {
1479 lck_spin_unlock(&p->p_slock);
1480 }
1481
1482 void
1483 proc_list_lock(void)
1484 {
1485 lck_mtx_lock(proc_list_mlock);
1486 }
1487
1488 void
1489 proc_list_unlock(void)
1490 {
1491 lck_mtx_unlock(proc_list_mlock);
1492 }
1493
1494 void
1495 proc_ucred_lock(proc_t p)
1496 {
1497 lck_mtx_lock(&p->p_ucred_mlock);
1498 }
1499
1500 void
1501 proc_ucred_unlock(proc_t p)
1502 {
1503 lck_mtx_unlock(&p->p_ucred_mlock);
1504 }
1505
1506 #include <kern/zalloc.h>
1507
1508 struct zone *uthread_zone = NULL;
1509
1510 static lck_grp_t *rethrottle_lock_grp;
1511 static lck_attr_t *rethrottle_lock_attr;
1512 static lck_grp_attr_t *rethrottle_lock_grp_attr;
1513
1514 static void
1515 uthread_zone_init(void)
1516 {
1517 assert(uthread_zone == NULL);
1518
1519 rethrottle_lock_grp_attr = lck_grp_attr_alloc_init();
1520 rethrottle_lock_grp = lck_grp_alloc_init("rethrottle", rethrottle_lock_grp_attr);
1521 rethrottle_lock_attr = lck_attr_alloc_init();
1522
1523 uthread_zone = zinit(sizeof(struct uthread),
1524 thread_max * sizeof(struct uthread),
1525 THREAD_CHUNK * sizeof(struct uthread),
1526 "uthreads");
1527 }
1528
1529 void *
1530 uthread_alloc(task_t task, thread_t thread, int noinherit)
1531 {
1532 proc_t p;
1533 uthread_t uth;
1534 uthread_t uth_parent;
1535 void *ut;
1536
1537 if (uthread_zone == NULL) {
1538 uthread_zone_init();
1539 }
1540
1541 ut = (void *)zalloc(uthread_zone);
1542 bzero(ut, sizeof(struct uthread));
1543
1544 p = (proc_t) get_bsdtask_info(task);
1545 uth = (uthread_t)ut;
1546 uth->uu_thread = thread;
1547
1548 lck_spin_init(&uth->uu_rethrottle_lock, rethrottle_lock_grp,
1549 rethrottle_lock_attr);
1550
1551 /*
1552 * Thread inherits credential from the creating thread, if both
1553 * are in the same task.
1554 *
1555 * If the creating thread has no credential or is from another
1556 * task we can leave the new thread credential NULL. If it needs
1557 * one later, it will be lazily assigned from the task's process.
1558 */
1559 uth_parent = (uthread_t)get_bsdthread_info(current_thread());
1560 if ((noinherit == 0) && task == current_task() &&
1561 uth_parent != NULL &&
1562 IS_VALID_CRED(uth_parent->uu_ucred)) {
1563 /*
1564 * XXX The new thread is, in theory, being created in context
1565 * XXX of parent thread, so a direct reference to the parent
1566 * XXX is OK.
1567 */
1568 kauth_cred_ref(uth_parent->uu_ucred);
1569 uth->uu_ucred = uth_parent->uu_ucred;
1570 /* the credential we just inherited is an assumed credential */
1571 if (uth_parent->uu_flag & UT_SETUID) {
1572 uth->uu_flag |= UT_SETUID;
1573 }
1574 } else {
1575 /* sometimes workqueue threads are created out task context */
1576 if ((task != kernel_task) && (p != PROC_NULL)) {
1577 uth->uu_ucred = kauth_cred_proc_ref(p);
1578 } else {
1579 uth->uu_ucred = NOCRED;
1580 }
1581 }
1582
1583
1584 if ((task != kernel_task) && p) {
1585 proc_lock(p);
1586 if (noinherit != 0) {
1587 /* workq threads will not inherit masks */
1588 uth->uu_sigmask = ~workq_threadmask;
1589 } else if (uth_parent) {
1590 if (uth_parent->uu_flag & UT_SAS_OLDMASK) {
1591 uth->uu_sigmask = uth_parent->uu_oldmask;
1592 } else {
1593 uth->uu_sigmask = uth_parent->uu_sigmask;
1594 }
1595 }
1596 uth->uu_context.vc_thread = thread;
1597 /*
1598 * Do not add the uthread to proc uthlist for exec copy task,
1599 * since they do not hold a ref on proc.
1600 */
1601 if (!task_is_exec_copy(task)) {
1602 TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list);
1603 }
1604 proc_unlock(p);
1605
1606 #if CONFIG_DTRACE
1607 if (p->p_dtrace_ptss_pages != NULL && !task_is_exec_copy(task)) {
1608 uth->t_dtrace_scratch = dtrace_ptss_claim_entry(p);
1609 }
1610 #endif
1611 }
1612
1613 return ut;
1614 }
1615
1616 /*
1617 * This routine frees the thread name field of the uthread_t structure. Split out of
1618 * uthread_cleanup() so thread name does not get deallocated while generating a corpse fork.
1619 */
1620 void
1621 uthread_cleanup_name(void *uthread)
1622 {
1623 uthread_t uth = (uthread_t)uthread;
1624
1625 /*
1626 * <rdar://17834538>
1627 * Set pth_name to NULL before calling free().
1628 * Previously there was a race condition in the
1629 * case this code was executing during a stackshot
1630 * where the stackshot could try and copy pth_name
1631 * after it had been freed and before if was marked
1632 * as null.
1633 */
1634 if (uth->pth_name != NULL) {
1635 void *pth_name = uth->pth_name;
1636 uth->pth_name = NULL;
1637 kfree(pth_name, MAXTHREADNAMESIZE);
1638 }
1639 return;
1640 }
1641
1642 /*
1643 * This routine frees all the BSD context in uthread except the credential.
1644 * It does not free the uthread structure as well
1645 */
1646 void
1647 uthread_cleanup(task_t task, void *uthread, void * bsd_info)
1648 {
1649 struct _select *sel;
1650 uthread_t uth = (uthread_t)uthread;
1651 proc_t p = (proc_t)bsd_info;
1652
1653 #if PROC_REF_DEBUG
1654 if (__improbable(uthread_get_proc_refcount(uthread) != 0)) {
1655 panic("uthread_cleanup called for uthread %p with uu_proc_refcount != 0", uthread);
1656 }
1657 #endif
1658
1659 if (uth->uu_lowpri_window || uth->uu_throttle_info) {
1660 /*
1661 * task is marked as a low priority I/O type
1662 * and we've somehow managed to not dismiss the throttle
1663 * through the normal exit paths back to user space...
1664 * no need to throttle this thread since its going away
1665 * but we do need to update our bookeeping w/r to throttled threads
1666 *
1667 * Calling this routine will clean up any throttle info reference
1668 * still inuse by the thread.
1669 */
1670 throttle_lowpri_io(0);
1671 }
1672 /*
1673 * Per-thread audit state should never last beyond system
1674 * call return. Since we don't audit the thread creation/
1675 * removal, the thread state pointer should never be
1676 * non-NULL when we get here.
1677 */
1678 assert(uth->uu_ar == NULL);
1679
1680 if (uth->uu_kqr_bound) {
1681 kqueue_threadreq_unbind(p, uth->uu_kqr_bound);
1682 }
1683
1684 sel = &uth->uu_select;
1685 /* cleanup the select bit space */
1686 if (sel->nbytes) {
1687 FREE(sel->ibits, M_TEMP);
1688 FREE(sel->obits, M_TEMP);
1689 sel->nbytes = 0;
1690 }
1691
1692 if (uth->uu_cdir) {
1693 vnode_rele(uth->uu_cdir);
1694 uth->uu_cdir = NULLVP;
1695 }
1696
1697 if (uth->uu_wqset) {
1698 if (waitq_set_is_valid(uth->uu_wqset)) {
1699 waitq_set_deinit(uth->uu_wqset);
1700 }
1701 FREE(uth->uu_wqset, M_SELECT);
1702 uth->uu_wqset = NULL;
1703 uth->uu_wqstate_sz = 0;
1704 }
1705
1706 os_reason_free(uth->uu_exit_reason);
1707
1708 if ((task != kernel_task) && p) {
1709 if (((uth->uu_flag & UT_VFORK) == UT_VFORK) && (uth->uu_proc != PROC_NULL)) {
1710 vfork_exit_internal(uth->uu_proc, 0, 1);
1711 }
1712 /*
1713 * Remove the thread from the process list and
1714 * transfer [appropriate] pending signals to the process.
1715 * Do not remove the uthread from proc uthlist for exec
1716 * copy task, since they does not have a ref on proc and
1717 * would not have been added to the list.
1718 */
1719 if (get_bsdtask_info(task) == p && !task_is_exec_copy(task)) {
1720 proc_lock(p);
1721
1722 TAILQ_REMOVE(&p->p_uthlist, uth, uu_list);
1723 p->p_siglist |= (uth->uu_siglist & execmask & (~p->p_sigignore | sigcantmask));
1724 proc_unlock(p);
1725 }
1726 #if CONFIG_DTRACE
1727 struct dtrace_ptss_page_entry *tmpptr = uth->t_dtrace_scratch;
1728 uth->t_dtrace_scratch = NULL;
1729 if (tmpptr != NULL && !task_is_exec_copy(task)) {
1730 dtrace_ptss_release_entry(p, tmpptr);
1731 }
1732 #endif
1733 }
1734 }
1735
1736 /* This routine releases the credential stored in uthread */
1737 void
1738 uthread_cred_free(void *uthread)
1739 {
1740 uthread_t uth = (uthread_t)uthread;
1741
1742 /* and free the uthread itself */
1743 if (IS_VALID_CRED(uth->uu_ucred)) {
1744 kauth_cred_t oldcred = uth->uu_ucred;
1745 uth->uu_ucred = NOCRED;
1746 kauth_cred_unref(&oldcred);
1747 }
1748 }
1749
1750 /* This routine frees the uthread structure held in thread structure */
1751 void
1752 uthread_zone_free(void *uthread)
1753 {
1754 uthread_t uth = (uthread_t)uthread;
1755
1756 if (uth->t_tombstone) {
1757 kfree(uth->t_tombstone, sizeof(struct doc_tombstone));
1758 uth->t_tombstone = NULL;
1759 }
1760
1761 lck_spin_destroy(&uth->uu_rethrottle_lock, rethrottle_lock_grp);
1762
1763 uthread_cleanup_name(uthread);
1764 /* and free the uthread itself */
1765 zfree(uthread_zone, uthread);
1766 }