]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_fork.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / bsd / kern / kern_fork.c
1 /*
2 * Copyright (c) 2000-2007, 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_fork.c 8.8 (Berkeley) 2/14/95
67 */
68 /*
69 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74 /*
75 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
76 * support for mandatory and extensible security protections. This notice
77 * is included in support of clause 2.2 (b) of the Apple Public License,
78 * Version 2.0.
79 */
80
81 #include <kern/assert.h>
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/filedesc.h>
85 #include <sys/kernel.h>
86 #include <sys/malloc.h>
87 #include <sys/proc_internal.h>
88 #include <sys/kauth.h>
89 #include <sys/user.h>
90 #include <sys/reason.h>
91 #include <sys/resourcevar.h>
92 #include <sys/vnode_internal.h>
93 #include <sys/file_internal.h>
94 #include <sys/acct.h>
95 #include <sys/codesign.h>
96 #include <sys/sysproto.h>
97 #if CONFIG_PERSONAS
98 #include <sys/persona.h>
99 #endif
100 #include <sys/doc_tombstone.h>
101 #if CONFIG_DTRACE
102 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
103 extern void (*dtrace_proc_waitfor_exec_ptr)(proc_t);
104 extern void dtrace_proc_fork(proc_t, proc_t, int);
105
106 /*
107 * Since dtrace_proc_waitfor_exec_ptr can be added/removed in dtrace_subr.c,
108 * we will store its value before actually calling it.
109 */
110 static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL;
111
112 #include <sys/dtrace_ptss.h>
113 #endif
114
115 #include <security/audit/audit.h>
116
117 #include <mach/mach_types.h>
118 #include <kern/coalition.h>
119 #include <kern/kern_types.h>
120 #include <kern/kalloc.h>
121 #include <kern/mach_param.h>
122 #include <kern/task.h>
123 #include <kern/thread.h>
124 #include <kern/thread_call.h>
125 #include <kern/zalloc.h>
126
127 #include <os/log.h>
128
129 #include <os/log.h>
130
131 #if CONFIG_MACF
132 #include <security/mac_framework.h>
133 #include <security/mac_mach_internal.h>
134 #endif
135
136 #include <vm/vm_map.h>
137 #include <vm/vm_protos.h>
138 #include <vm/vm_shared_region.h>
139
140 #include <sys/shm_internal.h> /* for shmfork() */
141 #include <mach/task.h> /* for thread_create() */
142 #include <mach/thread_act.h> /* for thread_resume() */
143
144 #include <sys/sdt.h>
145
146 #if CONFIG_MEMORYSTATUS
147 #include <sys/kern_memorystatus.h>
148 #endif
149
150 /* XXX routines which should have Mach prototypes, but don't */
151 void thread_set_parent(thread_t parent, int pid);
152 extern void act_thread_catt(void *ctx);
153 void thread_set_child(thread_t child, int pid);
154 void *act_thread_csave(void);
155 extern boolean_t task_is_exec_copy(task_t);
156
157
158 thread_t cloneproc(task_t, coalition_t *, proc_t, int, int);
159 proc_t forkproc(proc_t);
160 void forkproc_free(proc_t);
161 thread_t fork_create_child(task_t parent_task, coalition_t *parent_coalitions, proc_t child, int inherit_memory, int is64bit, int in_exec);
162 void proc_vfork_begin(proc_t parent_proc);
163 void proc_vfork_end(proc_t parent_proc);
164
165 #define DOFORK 0x1 /* fork() system call */
166 #define DOVFORK 0x2 /* vfork() system call */
167
168 /*
169 * proc_vfork_begin
170 *
171 * Description: start a vfork on a process
172 *
173 * Parameters: parent_proc process (re)entering vfork state
174 *
175 * Returns: (void)
176 *
177 * Notes: Although this function increments a count, a count in
178 * excess of 1 is not currently supported. According to the
179 * POSIX standard, calling anything other than execve() or
180 * _exit() following a vfork(), including calling vfork()
181 * itself again, will result in undefined behaviour
182 */
183 void
184 proc_vfork_begin(proc_t parent_proc)
185 {
186 proc_lock(parent_proc);
187 parent_proc->p_lflag |= P_LVFORK;
188 parent_proc->p_vforkcnt++;
189 proc_unlock(parent_proc);
190 }
191
192 /*
193 * proc_vfork_end
194 *
195 * Description: stop a vfork on a process
196 *
197 * Parameters: parent_proc process leaving vfork state
198 *
199 * Returns: (void)
200 *
201 * Notes: Decrements the count; currently, reentrancy of vfork()
202 * is unsupported on the current process
203 */
204 void
205 proc_vfork_end(proc_t parent_proc)
206 {
207 proc_lock(parent_proc);
208 parent_proc->p_vforkcnt--;
209 if (parent_proc->p_vforkcnt < 0)
210 panic("vfork cnt is -ve");
211 if (parent_proc->p_vforkcnt == 0)
212 parent_proc->p_lflag &= ~P_LVFORK;
213 proc_unlock(parent_proc);
214 }
215
216
217 /*
218 * vfork
219 *
220 * Description: vfork system call
221 *
222 * Parameters: void [no arguments]
223 *
224 * Retval: 0 (to child process)
225 * !0 pid of child (to parent process)
226 * -1 error (see "Returns:")
227 *
228 * Returns: EAGAIN Administrative limit reached
229 * EINVAL vfork() called during vfork()
230 * ENOMEM Failed to allocate new process
231 *
232 * Note: After a successful call to this function, the parent process
233 * has its task, thread, and uthread lent to the child process,
234 * and control is returned to the caller; if this function is
235 * invoked as a system call, the return is to user space, and
236 * is effectively running on the child process.
237 *
238 * Subsequent calls that operate on process state are permitted,
239 * though discouraged, and will operate on the child process; any
240 * operations on the task, thread, or uthread will result in
241 * changes in the parent state, and, if inheritable, the child
242 * state, when a task, thread, and uthread are realized for the
243 * child process at execve() time, will also be effected. Given
244 * this, it's recemmended that people use the posix_spawn() call
245 * instead.
246 *
247 * BLOCK DIAGRAM OF VFORK
248 *
249 * Before:
250 *
251 * ,----------------. ,-------------.
252 * | | task | |
253 * | parent_thread | ------> | parent_task |
254 * | | <.list. | |
255 * `----------------' `-------------'
256 * uthread | ^ bsd_info | ^
257 * v | vc_thread v | task
258 * ,----------------. ,-------------.
259 * | | | |
260 * | parent_uthread | <.list. | parent_proc | <-- current_proc()
261 * | | | |
262 * `----------------' `-------------'
263 * uu_proc |
264 * v
265 * NULL
266 *
267 * After:
268 *
269 * ,----------------. ,-------------.
270 * | | task | |
271 * ,----> | parent_thread | ------> | parent_task |
272 * | | | <.list. | |
273 * | `----------------' `-------------'
274 * | uthread | ^ bsd_info | ^
275 * | v | vc_thread v | task
276 * | ,----------------. ,-------------.
277 * | | | | |
278 * | | parent_uthread | <.list. | parent_proc |
279 * | | | | |
280 * | `----------------' `-------------'
281 * | uu_proc | . list
282 * | v v
283 * | ,----------------.
284 * `----- | |
285 * p_vforkact | child_proc | <-- current_proc()
286 * | |
287 * `----------------'
288 */
289 int
290 vfork(proc_t parent_proc, __unused struct vfork_args *uap, int32_t *retval)
291 {
292 thread_t child_thread;
293 int err;
294
295 if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_VFORK, NULL)) != 0) {
296 retval[1] = 0;
297 } else {
298 uthread_t ut = get_bsdthread_info(current_thread());
299 proc_t child_proc = ut->uu_proc;
300
301 retval[0] = child_proc->p_pid;
302 retval[1] = 1; /* flag child return for user space */
303
304 /*
305 * Drop the signal lock on the child which was taken on our
306 * behalf by forkproc()/cloneproc() to prevent signals being
307 * received by the child in a partially constructed state.
308 */
309 proc_signalend(child_proc, 0);
310 proc_transend(child_proc, 0);
311
312 proc_knote(parent_proc, NOTE_FORK | child_proc->p_pid);
313 DTRACE_PROC1(create, proc_t, child_proc);
314 ut->uu_flag &= ~UT_VFORKING;
315 }
316
317 return (err);
318 }
319
320
321 /*
322 * fork1
323 *
324 * Description: common code used by all new process creation other than the
325 * bootstrap of the initial process on the system
326 *
327 * Parameters: parent_proc parent process of the process being
328 * child_threadp pointer to location to receive the
329 * Mach thread_t of the child process
330 * created
331 * kind kind of creation being requested
332 * coalitions if spawn, the set of coalitions the
333 * child process should join, or NULL to
334 * inherit the parent's. On non-spawns,
335 * this param is ignored and the child
336 * always inherits the parent's
337 * coalitions.
338 *
339 * Notes: Permissable values for 'kind':
340 *
341 * PROC_CREATE_FORK Create a complete process which will
342 * return actively running in both the
343 * parent and the child; the child copies
344 * the parent address space.
345 * PROC_CREATE_SPAWN Create a complete process which will
346 * return actively running in the parent
347 * only after returning actively running
348 * in the child; the child address space
349 * is newly created by an image activator,
350 * after which the child is run.
351 * PROC_CREATE_VFORK Creates a partial process which will
352 * borrow the parent task, thread, and
353 * uthread to return running in the child;
354 * the child address space and other parts
355 * are lazily created at execve() time, or
356 * the child is terminated, and the parent
357 * does not actively run until that
358 * happens.
359 *
360 * At first it may seem strange that we return the child thread
361 * address rather than process structure, since the process is
362 * the only part guaranteed to be "new"; however, since we do
363 * not actualy adjust other references between Mach and BSD (see
364 * the block diagram above the implementation of vfork()), this
365 * is the only method which guarantees us the ability to get
366 * back to the other information.
367 */
368 int
369 fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalitions)
370 {
371 thread_t parent_thread = (thread_t)current_thread();
372 uthread_t parent_uthread = (uthread_t)get_bsdthread_info(parent_thread);
373 proc_t child_proc = NULL; /* set in switch, but compiler... */
374 thread_t child_thread = NULL;
375 uid_t uid;
376 int count;
377 int err = 0;
378 int spawn = 0;
379
380 /*
381 * Although process entries are dynamically created, we still keep
382 * a global limit on the maximum number we will create. Don't allow
383 * a nonprivileged user to use the last process; don't let root
384 * exceed the limit. The variable nprocs is the current number of
385 * processes, maxproc is the limit.
386 */
387 uid = kauth_getruid();
388 proc_list_lock();
389 if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
390 #if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED
391 /*
392 * On the development kernel, panic so that the fact that we hit
393 * the process limit is obvious, as this may very well wedge the
394 * system.
395 */
396 panic("The process table is full; parent pid=%d", parent_proc->p_pid);
397 #endif
398 proc_list_unlock();
399 tablefull("proc");
400 return (EAGAIN);
401 }
402 proc_list_unlock();
403
404 /*
405 * Increment the count of procs running with this uid. Don't allow
406 * a nonprivileged user to exceed their current limit, which is
407 * always less than what an rlim_t can hold.
408 * (locking protection is provided by list lock held in chgproccnt)
409 */
410 count = chgproccnt(uid, 1);
411 if (uid != 0 &&
412 (rlim_t)count > parent_proc->p_rlimit[RLIMIT_NPROC].rlim_cur) {
413 #if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED
414 /*
415 * On the development kernel, panic so that the fact that we hit
416 * the per user process limit is obvious. This may be less dire
417 * than hitting the global process limit, but we cannot rely on
418 * that.
419 */
420 panic("The per-user process limit has been hit; parent pid=%d, uid=%d", parent_proc->p_pid, uid);
421 #endif
422 err = EAGAIN;
423 goto bad;
424 }
425
426 #if CONFIG_MACF
427 /*
428 * Determine if MAC policies applied to the process will allow
429 * it to fork. This is an advisory-only check.
430 */
431 err = mac_proc_check_fork(parent_proc);
432 if (err != 0) {
433 goto bad;
434 }
435 #endif
436
437 switch(kind) {
438 case PROC_CREATE_VFORK:
439 /*
440 * Prevent a vfork while we are in vfork(); we should
441 * also likely preventing a fork here as well, and this
442 * check should then be outside the switch statement,
443 * since the proc struct contents will copy from the
444 * child and the tash/thread/uthread from the parent in
445 * that case. We do not support vfork() in vfork()
446 * because we don't have to; the same non-requirement
447 * is true of both fork() and posix_spawn() and any
448 * call other than execve() amd _exit(), but we've
449 * been historically lenient, so we continue to be so
450 * (for now).
451 *
452 * <rdar://6640521> Probably a source of random panics
453 */
454 if (parent_uthread->uu_flag & UT_VFORK) {
455 printf("fork1 called within vfork by %s\n", parent_proc->p_comm);
456 err = EINVAL;
457 goto bad;
458 }
459
460 /*
461 * Flag us in progress; if we chose to support vfork() in
462 * vfork(), we would chain our parent at this point (in
463 * effect, a stack push). We don't, since we actually want
464 * to disallow everything not specified in the standard
465 */
466 proc_vfork_begin(parent_proc);
467
468 /* The newly created process comes with signal lock held */
469 if ((child_proc = forkproc(parent_proc)) == NULL) {
470 /* Failed to allocate new process */
471 proc_vfork_end(parent_proc);
472 err = ENOMEM;
473 goto bad;
474 }
475
476 // XXX BEGIN: wants to move to be common code (and safe)
477 #if CONFIG_MACF
478 /*
479 * allow policies to associate the credential/label that
480 * we referenced from the parent ... with the child
481 * JMM - this really isn't safe, as we can drop that
482 * association without informing the policy in other
483 * situations (keep long enough to get policies changed)
484 */
485 mac_cred_label_associate_fork(child_proc->p_ucred, child_proc);
486 #endif
487
488 /*
489 * Propogate change of PID - may get new cred if auditing.
490 *
491 * NOTE: This has no effect in the vfork case, since
492 * child_proc->task != current_task(), but we duplicate it
493 * because this is probably, ultimately, wrong, since we
494 * will be running in the "child" which is the parent task
495 * with the wrong token until we get to the execve() or
496 * _exit() call; a lot of "undefined" can happen before
497 * that.
498 *
499 * <rdar://6640530> disallow everything but exeve()/_exit()?
500 */
501 set_security_token(child_proc);
502
503 AUDIT_ARG(pid, child_proc->p_pid);
504
505 // XXX END: wants to move to be common code (and safe)
506
507 /*
508 * BORROW PARENT TASK, THREAD, UTHREAD FOR CHILD
509 *
510 * Note: this is where we would "push" state instead of setting
511 * it for nested vfork() support (see proc_vfork_end() for
512 * description if issues here).
513 */
514 child_proc->task = parent_proc->task;
515
516 child_proc->p_lflag |= P_LINVFORK;
517 child_proc->p_vforkact = parent_thread;
518 child_proc->p_stat = SRUN;
519
520 /*
521 * Until UT_VFORKING is cleared at the end of the vfork
522 * syscall, the process identity of this thread is slightly
523 * murky.
524 *
525 * As long as UT_VFORK and it's associated field (uu_proc)
526 * is set, current_proc() will always return the child process.
527 *
528 * However dtrace_proc_selfpid() returns the parent pid to
529 * ensure that e.g. the proc:::create probe actions accrue
530 * to the parent. (Otherwise the child magically seems to
531 * have created itself!)
532 */
533 parent_uthread->uu_flag |= UT_VFORK | UT_VFORKING;
534 parent_uthread->uu_proc = child_proc;
535 parent_uthread->uu_userstate = (void *)act_thread_csave();
536 parent_uthread->uu_vforkmask = parent_uthread->uu_sigmask;
537
538 /* temporarily drop thread-set-id state */
539 if (parent_uthread->uu_flag & UT_SETUID) {
540 parent_uthread->uu_flag |= UT_WASSETUID;
541 parent_uthread->uu_flag &= ~UT_SETUID;
542 }
543
544 /* blow thread state information */
545 /* XXX is this actually necessary, given syscall return? */
546 thread_set_child(parent_thread, child_proc->p_pid);
547
548 child_proc->p_acflag = AFORK; /* forked but not exec'ed */
549
550 /*
551 * Preserve synchronization semantics of vfork. If
552 * waiting for child to exec or exit, set P_PPWAIT
553 * on child, and sleep on our proc (in case of exit).
554 */
555 child_proc->p_lflag |= P_LPPWAIT;
556 pinsertchild(parent_proc, child_proc); /* set visible */
557
558 break;
559
560 case PROC_CREATE_SPAWN:
561 /*
562 * A spawned process differs from a forked process in that
563 * the spawned process does not carry around the parents
564 * baggage with regard to address space copying, dtrace,
565 * and so on.
566 */
567 spawn = 1;
568
569 /* FALLSTHROUGH */
570
571 case PROC_CREATE_FORK:
572 /*
573 * When we clone the parent process, we are going to inherit
574 * its task attributes and memory, since when we fork, we
575 * will, in effect, create a duplicate of it, with only minor
576 * differences. Contrarily, spawned processes do not inherit.
577 */
578 if ((child_thread = cloneproc(parent_proc->task,
579 spawn ? coalitions : NULL,
580 parent_proc,
581 spawn ? FALSE : TRUE,
582 FALSE)) == NULL) {
583 /* Failed to create thread */
584 err = EAGAIN;
585 goto bad;
586 }
587
588 /* copy current thread state into the child thread (only for fork) */
589 if (!spawn) {
590 thread_dup(child_thread);
591 }
592
593 /* child_proc = child_thread->task->proc; */
594 child_proc = (proc_t)(get_bsdtask_info(get_threadtask(child_thread)));
595
596 // XXX BEGIN: wants to move to be common code (and safe)
597 #if CONFIG_MACF
598 /*
599 * allow policies to associate the credential/label that
600 * we referenced from the parent ... with the child
601 * JMM - this really isn't safe, as we can drop that
602 * association without informing the policy in other
603 * situations (keep long enough to get policies changed)
604 */
605 mac_cred_label_associate_fork(child_proc->p_ucred, child_proc);
606 #endif
607
608 /*
609 * Propogate change of PID - may get new cred if auditing.
610 *
611 * NOTE: This has no effect in the vfork case, since
612 * child_proc->task != current_task(), but we duplicate it
613 * because this is probably, ultimately, wrong, since we
614 * will be running in the "child" which is the parent task
615 * with the wrong token until we get to the execve() or
616 * _exit() call; a lot of "undefined" can happen before
617 * that.
618 *
619 * <rdar://6640530> disallow everything but exeve()/_exit()?
620 */
621 set_security_token(child_proc);
622
623 AUDIT_ARG(pid, child_proc->p_pid);
624
625 // XXX END: wants to move to be common code (and safe)
626
627 /*
628 * Blow thread state information; this is what gives the child
629 * process its "return" value from a fork() call.
630 *
631 * Note: this should probably move to fork() proper, since it
632 * is not relevent to spawn, and the value won't matter
633 * until we resume the child there. If you are in here
634 * refactoring code, consider doing this at the same time.
635 */
636 thread_set_child(child_thread, child_proc->p_pid);
637
638 child_proc->p_acflag = AFORK; /* forked but not exec'ed */
639
640 #if CONFIG_DTRACE
641 dtrace_proc_fork(parent_proc, child_proc, spawn);
642 #endif /* CONFIG_DTRACE */
643 if (!spawn) {
644 /*
645 * Of note, we need to initialize the bank context behind
646 * the protection of the proc_trans lock to prevent a race with exit.
647 */
648 task_bank_init(get_threadtask(child_thread));
649 }
650
651 break;
652
653 default:
654 panic("fork1 called with unknown kind %d", kind);
655 break;
656 }
657
658
659 /* return the thread pointer to the caller */
660 *child_threadp = child_thread;
661
662 bad:
663 /*
664 * In the error case, we return a 0 value for the returned pid (but
665 * it is ignored in the trampoline due to the error return); this
666 * is probably not necessary.
667 */
668 if (err) {
669 (void)chgproccnt(uid, -1);
670 }
671
672 return (err);
673 }
674
675
676 /*
677 * vfork_return
678 *
679 * Description: "Return" to parent vfork thread() following execve/_exit;
680 * this is done by reassociating the parent process structure
681 * with the task, thread, and uthread.
682 *
683 * Refer to the ASCII art above vfork() to figure out the
684 * state we're undoing.
685 *
686 * Parameters: child_proc Child process
687 * retval System call return value array
688 * rval Return value to present to parent
689 *
690 * Returns: void
691 *
692 * Notes: The caller resumes or exits the parent, as appropriate, after
693 * calling this function.
694 */
695 void
696 vfork_return(proc_t child_proc, int32_t *retval, int rval)
697 {
698 task_t parent_task = get_threadtask(child_proc->p_vforkact);
699 proc_t parent_proc = get_bsdtask_info(parent_task);
700 thread_t th = current_thread();
701 uthread_t uth = get_bsdthread_info(th);
702
703 act_thread_catt(uth->uu_userstate);
704
705 /* clear vfork state in parent proc structure */
706 proc_vfork_end(parent_proc);
707
708 /* REPATRIATE PARENT TASK, THREAD, UTHREAD */
709 uth->uu_userstate = 0;
710 uth->uu_flag &= ~UT_VFORK;
711 /* restore thread-set-id state */
712 if (uth->uu_flag & UT_WASSETUID) {
713 uth->uu_flag |= UT_SETUID;
714 uth->uu_flag &= UT_WASSETUID;
715 }
716 uth->uu_proc = 0;
717 uth->uu_sigmask = uth->uu_vforkmask;
718
719 proc_lock(child_proc);
720 child_proc->p_lflag &= ~P_LINVFORK;
721 child_proc->p_vforkact = 0;
722 proc_unlock(child_proc);
723
724 thread_set_parent(th, rval);
725
726 if (retval) {
727 retval[0] = rval;
728 retval[1] = 0; /* mark parent */
729 }
730 }
731
732
733 /*
734 * fork_create_child
735 *
736 * Description: Common operations associated with the creation of a child
737 * process
738 *
739 * Parameters: parent_task parent task
740 * parent_coalitions parent's set of coalitions
741 * child_proc child process
742 * inherit_memory TRUE, if the parents address space is
743 * to be inherited by the child
744 * is64bit TRUE, if the child being created will
745 * be associated with a 64 bit process
746 * rather than a 32 bit process
747 * in_exec TRUE, if called from execve or posix spawn set exec
748 * FALSE, if called from fork or vfexec
749 *
750 * Note: This code is called in the fork() case, from the execve() call
751 * graph, if implementing an execve() following a vfork(), from
752 * the posix_spawn() call graph (which implicitly includes a
753 * vfork() equivalent call, and in the system bootstrap case.
754 *
755 * It creates a new task and thread (and as a side effect of the
756 * thread creation, a uthread) in the parent coalition set, which is
757 * then associated with the process 'child'. If the parent
758 * process address space is to be inherited, then a flag
759 * indicates that the newly created task should inherit this from
760 * the child task.
761 *
762 * As a special concession to bootstrapping the initial process
763 * in the system, it's possible for 'parent_task' to be TASK_NULL;
764 * in this case, 'inherit_memory' MUST be FALSE.
765 */
766 thread_t
767 fork_create_child(task_t parent_task, coalition_t *parent_coalitions, proc_t child_proc, int inherit_memory, int is64bit, int in_exec)
768 {
769 thread_t child_thread = NULL;
770 task_t child_task;
771 kern_return_t result;
772
773 /* Create a new task for the child process */
774 result = task_create_internal(parent_task,
775 parent_coalitions,
776 inherit_memory,
777 is64bit,
778 TF_LRETURNWAIT | TF_LRETURNWAITER, /* All created threads will wait in task_wait_to_return */
779 in_exec ? TPF_EXEC_COPY : TPF_NONE, /* Mark the task exec copy if in execve */
780 &child_task);
781 if (result != KERN_SUCCESS) {
782 printf("%s: task_create_internal failed. Code: %d\n",
783 __func__, result);
784 goto bad;
785 }
786
787 if (!in_exec) {
788 /*
789 * Set the child process task to the new task if not in exec,
790 * will set the task for exec case in proc_exec_switch_task after image activation.
791 */
792 child_proc->task = child_task;
793 }
794
795 /* Set child task process to child proc */
796 set_bsdtask_info(child_task, child_proc);
797
798 /* Propagate CPU limit timer from parent */
799 if (timerisset(&child_proc->p_rlim_cpu))
800 task_vtimer_set(child_task, TASK_VTIMER_RLIM);
801
802 /*
803 * Set child process BSD visible scheduler priority if nice value
804 * inherited from parent
805 */
806 if (child_proc->p_nice != 0)
807 resetpriority(child_proc);
808
809 /*
810 * Create a new thread for the child process
811 * The new thread is waiting on the event triggered by 'task_clear_return_wait'
812 */
813 result = thread_create_waiting(child_task,
814 (thread_continue_t)task_wait_to_return,
815 task_get_return_wait_event(child_task),
816 &child_thread);
817
818 if (result != KERN_SUCCESS) {
819 printf("%s: thread_create failed. Code: %d\n",
820 __func__, result);
821 task_deallocate(child_task);
822 child_task = NULL;
823 }
824
825 /*
826 * Tag thread as being the first thread in its task.
827 */
828 thread_set_tag(child_thread, THREAD_TAG_MAINTHREAD);
829
830 bad:
831 thread_yield_internal(1);
832
833 return(child_thread);
834 }
835
836
837 /*
838 * fork
839 *
840 * Description: fork system call.
841 *
842 * Parameters: parent Parent process to fork
843 * uap (void) [unused]
844 * retval Return value
845 *
846 * Returns: 0 Success
847 * EAGAIN Resource unavailable, try again
848 *
849 * Notes: Attempts to create a new child process which inherits state
850 * from the parent process. If successful, the call returns
851 * having created an initially suspended child process with an
852 * extra Mach task and thread reference, for which the thread
853 * is initially suspended. Until we resume the child process,
854 * it is not yet running.
855 *
856 * The return information to the child is contained in the
857 * thread state structure of the new child, and does not
858 * become visible to the child through a normal return process,
859 * since it never made the call into the kernel itself in the
860 * first place.
861 *
862 * After resuming the thread, this function returns directly to
863 * the parent process which invoked the fork() system call.
864 *
865 * Important: The child thread_resume occurs before the parent returns;
866 * depending on scheduling latency, this means that it is not
867 * deterministic as to whether the parent or child is scheduled
868 * to run first. It is entirely possible that the child could
869 * run to completion prior to the parent running.
870 */
871 int
872 fork(proc_t parent_proc, __unused struct fork_args *uap, int32_t *retval)
873 {
874 thread_t child_thread;
875 int err;
876
877 retval[1] = 0; /* flag parent return for user space */
878
879 if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_FORK, NULL)) == 0) {
880 task_t child_task;
881 proc_t child_proc;
882
883 /* Return to the parent */
884 child_proc = (proc_t)get_bsdthreadtask_info(child_thread);
885 retval[0] = child_proc->p_pid;
886
887 /*
888 * Drop the signal lock on the child which was taken on our
889 * behalf by forkproc()/cloneproc() to prevent signals being
890 * received by the child in a partially constructed state.
891 */
892 proc_signalend(child_proc, 0);
893 proc_transend(child_proc, 0);
894
895 /* flag the fork has occurred */
896 proc_knote(parent_proc, NOTE_FORK | child_proc->p_pid);
897 DTRACE_PROC1(create, proc_t, child_proc);
898
899 #if CONFIG_DTRACE
900 if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL)
901 (*dtrace_proc_waitfor_hook)(child_proc);
902 #endif
903
904 /* "Return" to the child */
905 task_clear_return_wait(get_threadtask(child_thread));
906
907 /* drop the extra references we got during the creation */
908 if ((child_task = (task_t)get_threadtask(child_thread)) != NULL) {
909 task_deallocate(child_task);
910 }
911 thread_deallocate(child_thread);
912 }
913
914 return(err);
915 }
916
917
918 /*
919 * cloneproc
920 *
921 * Description: Create a new process from a specified process.
922 *
923 * Parameters: parent_task The parent task to be cloned, or
924 * TASK_NULL is task characteristics
925 * are not to be inherited
926 * be cloned, or TASK_NULL if the new
927 * task is not to inherit the VM
928 * characteristics of the parent
929 * parent_proc The parent process to be cloned
930 * inherit_memory True if the child is to inherit
931 * memory from the parent; if this is
932 * non-NULL, then the parent_task must
933 * also be non-NULL
934 * memstat_internal Whether to track the process in the
935 * jetsam priority list (if configured)
936 *
937 * Returns: !NULL pointer to new child thread
938 * NULL Failure (unspecified)
939 *
940 * Note: On return newly created child process has signal lock held
941 * to block delivery of signal to it if called with lock set.
942 * fork() code needs to explicity remove this lock before
943 * signals can be delivered
944 *
945 * In the case of bootstrap, this function can be called from
946 * bsd_utaskbootstrap() in order to bootstrap the first process;
947 * the net effect is to provide a uthread structure for the
948 * kernel process associated with the kernel task.
949 *
950 * XXX: Tristating using the value parent_task as the major key
951 * and inherit_memory as the minor key is something we should
952 * refactor later; we owe the current semantics, ultimately,
953 * to the semantics of task_create_internal. For now, we will
954 * live with this being somewhat awkward.
955 */
956 thread_t
957 cloneproc(task_t parent_task, coalition_t *parent_coalitions, proc_t parent_proc, int inherit_memory, int memstat_internal)
958 {
959 #if !CONFIG_MEMORYSTATUS
960 #pragma unused(memstat_internal)
961 #endif
962 task_t child_task;
963 proc_t child_proc;
964 thread_t child_thread = NULL;
965
966 if ((child_proc = forkproc(parent_proc)) == NULL) {
967 /* Failed to allocate new process */
968 goto bad;
969 }
970
971 child_thread = fork_create_child(parent_task, parent_coalitions, child_proc, inherit_memory, parent_proc->p_flag & P_LP64, FALSE);
972
973 if (child_thread == NULL) {
974 /*
975 * Failed to create thread; now we must deconstruct the new
976 * process previously obtained from forkproc().
977 */
978 forkproc_free(child_proc);
979 goto bad;
980 }
981
982 child_task = get_threadtask(child_thread);
983 if (parent_proc->p_flag & P_LP64) {
984 task_set_64bit(child_task, TRUE);
985 OSBitOrAtomic(P_LP64, (UInt32 *)&child_proc->p_flag);
986 } else {
987 task_set_64bit(child_task, FALSE);
988 OSBitAndAtomic(~((uint32_t)P_LP64), (UInt32 *)&child_proc->p_flag);
989 }
990
991 #if CONFIG_MEMORYSTATUS
992 if (memstat_internal) {
993 proc_list_lock();
994 child_proc->p_memstat_state |= P_MEMSTAT_INTERNAL;
995 proc_list_unlock();
996 }
997 #endif
998
999 /* make child visible */
1000 pinsertchild(parent_proc, child_proc);
1001
1002 /*
1003 * Make child runnable, set start time.
1004 */
1005 child_proc->p_stat = SRUN;
1006 bad:
1007 return(child_thread);
1008 }
1009
1010
1011 /*
1012 * Destroy a process structure that resulted from a call to forkproc(), but
1013 * which must be returned to the system because of a subsequent failure
1014 * preventing it from becoming active.
1015 *
1016 * Parameters: p The incomplete process from forkproc()
1017 *
1018 * Returns: (void)
1019 *
1020 * Note: This function should only be used in an error handler following
1021 * a call to forkproc().
1022 *
1023 * Operations occur in reverse order of those in forkproc().
1024 */
1025 void
1026 forkproc_free(proc_t p)
1027 {
1028 #if CONFIG_PERSONAS
1029 persona_proc_drop(p);
1030 #endif /* CONFIG_PERSONAS */
1031
1032 #if PSYNCH
1033 pth_proc_hashdelete(p);
1034 #endif /* PSYNCH */
1035
1036 /* We held signal and a transition locks; drop them */
1037 proc_signalend(p, 0);
1038 proc_transend(p, 0);
1039
1040 /*
1041 * If we have our own copy of the resource limits structure, we
1042 * need to free it. If it's a shared copy, we need to drop our
1043 * reference on it.
1044 */
1045 proc_limitdrop(p, 0);
1046 p->p_limit = NULL;
1047
1048 #if SYSV_SHM
1049 /* Need to drop references to the shared memory segment(s), if any */
1050 if (p->vm_shm) {
1051 /*
1052 * Use shmexec(): we have no address space, so no mappings
1053 *
1054 * XXX Yes, the routine is badly named.
1055 */
1056 shmexec(p);
1057 }
1058 #endif
1059
1060 /* Need to undo the effects of the fdcopy(), if any */
1061 fdfree(p);
1062
1063 /*
1064 * Drop the reference on a text vnode pointer, if any
1065 * XXX This code is broken in forkproc(); see <rdar://4256419>;
1066 * XXX if anyone ever uses this field, we will be extremely unhappy.
1067 */
1068 if (p->p_textvp) {
1069 vnode_rele(p->p_textvp);
1070 p->p_textvp = NULL;
1071 }
1072
1073 /* Stop the profiling clock */
1074 stopprofclock(p);
1075
1076 /* Update the audit session proc count */
1077 AUDIT_SESSION_PROCEXIT(p);
1078
1079 #if CONFIG_FINE_LOCK_GROUPS
1080 lck_mtx_destroy(&p->p_mlock, proc_mlock_grp);
1081 lck_mtx_destroy(&p->p_fdmlock, proc_fdmlock_grp);
1082 lck_mtx_destroy(&p->p_ucred_mlock, proc_ucred_mlock_grp);
1083 #if CONFIG_DTRACE
1084 lck_mtx_destroy(&p->p_dtrace_sprlock, proc_lck_grp);
1085 #endif
1086 lck_spin_destroy(&p->p_slock, proc_slock_grp);
1087 #else /* CONFIG_FINE_LOCK_GROUPS */
1088 lck_mtx_destroy(&p->p_mlock, proc_lck_grp);
1089 lck_mtx_destroy(&p->p_fdmlock, proc_lck_grp);
1090 lck_mtx_destroy(&p->p_ucred_mlock, proc_lck_grp);
1091 #if CONFIG_DTRACE
1092 lck_mtx_destroy(&p->p_dtrace_sprlock, proc_lck_grp);
1093 #endif
1094 lck_spin_destroy(&p->p_slock, proc_lck_grp);
1095 #endif /* CONFIG_FINE_LOCK_GROUPS */
1096
1097 /* Release the credential reference */
1098 kauth_cred_unref(&p->p_ucred);
1099
1100 proc_list_lock();
1101 /* Decrement the count of processes in the system */
1102 nprocs--;
1103
1104 /* Take it out of process hash */
1105 LIST_REMOVE(p, p_hash);
1106
1107 proc_list_unlock();
1108
1109 thread_call_free(p->p_rcall);
1110
1111 /* Free allocated memory */
1112 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
1113 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
1114 proc_checkdeadrefs(p);
1115 FREE_ZONE(p, sizeof *p, M_PROC);
1116 }
1117
1118
1119 /*
1120 * forkproc
1121 *
1122 * Description: Create a new process structure, given a parent process
1123 * structure.
1124 *
1125 * Parameters: parent_proc The parent process
1126 *
1127 * Returns: !NULL The new process structure
1128 * NULL Error (insufficient free memory)
1129 *
1130 * Note: When successful, the newly created process structure is
1131 * partially initialized; if a caller needs to deconstruct the
1132 * returned structure, they must call forkproc_free() to do so.
1133 */
1134 proc_t
1135 forkproc(proc_t parent_proc)
1136 {
1137 proc_t child_proc; /* Our new process */
1138 static int nextpid = 0, pidwrap = 0, nextpidversion = 0;
1139 static uint64_t nextuniqueid = 0;
1140 int error = 0;
1141 struct session *sessp;
1142 uthread_t parent_uthread = (uthread_t)get_bsdthread_info(current_thread());
1143
1144 MALLOC_ZONE(child_proc, proc_t , sizeof *child_proc, M_PROC, M_WAITOK);
1145 if (child_proc == NULL) {
1146 printf("forkproc: M_PROC zone exhausted\n");
1147 goto bad;
1148 }
1149 /* zero it out as we need to insert in hash */
1150 bzero(child_proc, sizeof *child_proc);
1151
1152 MALLOC_ZONE(child_proc->p_stats, struct pstats *,
1153 sizeof *child_proc->p_stats, M_PSTATS, M_WAITOK);
1154 if (child_proc->p_stats == NULL) {
1155 printf("forkproc: M_SUBPROC zone exhausted (p_stats)\n");
1156 FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
1157 child_proc = NULL;
1158 goto bad;
1159 }
1160 MALLOC_ZONE(child_proc->p_sigacts, struct sigacts *,
1161 sizeof *child_proc->p_sigacts, M_SIGACTS, M_WAITOK);
1162 if (child_proc->p_sigacts == NULL) {
1163 printf("forkproc: M_SUBPROC zone exhausted (p_sigacts)\n");
1164 FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS);
1165 FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
1166 child_proc = NULL;
1167 goto bad;
1168 }
1169
1170 /* allocate a callout for use by interval timers */
1171 child_proc->p_rcall = thread_call_allocate((thread_call_func_t)realitexpire, child_proc);
1172 if (child_proc->p_rcall == NULL) {
1173 FREE_ZONE(child_proc->p_sigacts, sizeof *child_proc->p_sigacts, M_SIGACTS);
1174 FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS);
1175 FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
1176 child_proc = NULL;
1177 goto bad;
1178 }
1179
1180
1181 /*
1182 * Find an unused PID.
1183 */
1184
1185 proc_list_lock();
1186
1187 nextpid++;
1188 retry:
1189 /*
1190 * If the process ID prototype has wrapped around,
1191 * restart somewhat above 0, as the low-numbered procs
1192 * tend to include daemons that don't exit.
1193 */
1194 if (nextpid >= PID_MAX) {
1195 nextpid = 100;
1196 pidwrap = 1;
1197 }
1198 if (pidwrap != 0) {
1199
1200 /* if the pid stays in hash both for zombie and runniing state */
1201 if (pfind_locked(nextpid) != PROC_NULL) {
1202 nextpid++;
1203 goto retry;
1204 }
1205
1206 if (pgfind_internal(nextpid) != PGRP_NULL) {
1207 nextpid++;
1208 goto retry;
1209 }
1210 if (session_find_internal(nextpid) != SESSION_NULL) {
1211 nextpid++;
1212 goto retry;
1213 }
1214 }
1215 nprocs++;
1216 child_proc->p_pid = nextpid;
1217 child_proc->p_responsible_pid = nextpid; /* initially responsible for self */
1218 child_proc->p_idversion = nextpidversion++;
1219 /* kernel process is handcrafted and not from fork, so start from 1 */
1220 child_proc->p_uniqueid = ++nextuniqueid;
1221 #if 1
1222 if (child_proc->p_pid != 0) {
1223 if (pfind_locked(child_proc->p_pid) != PROC_NULL)
1224 panic("proc in the list already\n");
1225 }
1226 #endif
1227 /* Insert in the hash */
1228 child_proc->p_listflag |= (P_LIST_INHASH | P_LIST_INCREATE);
1229 LIST_INSERT_HEAD(PIDHASH(child_proc->p_pid), child_proc, p_hash);
1230 proc_list_unlock();
1231
1232 if (child_proc->p_uniqueid == startup_serial_num_procs) {
1233 /*
1234 * Turn off startup serial logging now that we have reached
1235 * the defined number of startup processes.
1236 */
1237 startup_serial_logging_active = false;
1238 }
1239
1240 /*
1241 * We've identified the PID we are going to use; initialize the new
1242 * process structure.
1243 */
1244 child_proc->p_stat = SIDL;
1245 child_proc->p_pgrpid = PGRPID_DEAD;
1246
1247 /*
1248 * The zero'ing of the proc was at the allocation time due to need
1249 * for insertion to hash. Copy the section that is to be copied
1250 * directly from the parent.
1251 */
1252 bcopy(&parent_proc->p_startcopy, &child_proc->p_startcopy,
1253 (unsigned) ((caddr_t)&child_proc->p_endcopy - (caddr_t)&child_proc->p_startcopy));
1254
1255 /*
1256 * Some flags are inherited from the parent.
1257 * Duplicate sub-structures as needed.
1258 * Increase reference counts on shared objects.
1259 * The p_stats and p_sigacts substructs are set in vm_fork.
1260 */
1261 #if !CONFIG_EMBEDDED
1262 child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_DELAYIDLESLEEP | P_SUGID));
1263 #else /* !CONFIG_EMBEDDED */
1264 child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_SUGID));
1265 #endif /* !CONFIG_EMBEDDED */
1266 if (parent_proc->p_flag & P_PROFIL)
1267 startprofclock(child_proc);
1268
1269 child_proc->p_vfs_iopolicy = (parent_proc->p_vfs_iopolicy & (P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY));
1270
1271 /*
1272 * Note that if the current thread has an assumed identity, this
1273 * credential will be granted to the new process.
1274 */
1275 child_proc->p_ucred = kauth_cred_get_with_ref();
1276 /* update cred on proc */
1277 PROC_UPDATE_CREDS_ONPROC(child_proc);
1278 /* update audit session proc count */
1279 AUDIT_SESSION_PROCNEW(child_proc);
1280
1281 #if CONFIG_FINE_LOCK_GROUPS
1282 lck_mtx_init(&child_proc->p_mlock, proc_mlock_grp, proc_lck_attr);
1283 lck_mtx_init(&child_proc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr);
1284 lck_mtx_init(&child_proc->p_ucred_mlock, proc_ucred_mlock_grp, proc_lck_attr);
1285 #if CONFIG_DTRACE
1286 lck_mtx_init(&child_proc->p_dtrace_sprlock, proc_lck_grp, proc_lck_attr);
1287 #endif
1288 lck_spin_init(&child_proc->p_slock, proc_slock_grp, proc_lck_attr);
1289 #else /* !CONFIG_FINE_LOCK_GROUPS */
1290 lck_mtx_init(&child_proc->p_mlock, proc_lck_grp, proc_lck_attr);
1291 lck_mtx_init(&child_proc->p_fdmlock, proc_lck_grp, proc_lck_attr);
1292 lck_mtx_init(&child_proc->p_ucred_mlock, proc_lck_grp, proc_lck_attr);
1293 #if CONFIG_DTRACE
1294 lck_mtx_init(&child_proc->p_dtrace_sprlock, proc_lck_grp, proc_lck_attr);
1295 #endif
1296 lck_spin_init(&child_proc->p_slock, proc_lck_grp, proc_lck_attr);
1297 #endif /* !CONFIG_FINE_LOCK_GROUPS */
1298 klist_init(&child_proc->p_klist);
1299
1300 if (child_proc->p_textvp != NULLVP) {
1301 /* bump references to the text vnode */
1302 /* Need to hold iocount across the ref call */
1303 if (vnode_getwithref(child_proc->p_textvp) == 0) {
1304 error = vnode_ref(child_proc->p_textvp);
1305 vnode_put(child_proc->p_textvp);
1306 if (error != 0)
1307 child_proc->p_textvp = NULLVP;
1308 }
1309 }
1310
1311 /*
1312 * Copy the parents per process open file table to the child; if
1313 * there is a per-thread current working directory, set the childs
1314 * per-process current working directory to that instead of the
1315 * parents.
1316 *
1317 * XXX may fail to copy descriptors to child
1318 */
1319 child_proc->p_fd = fdcopy(parent_proc, parent_uthread->uu_cdir);
1320
1321 #if SYSV_SHM
1322 if (parent_proc->vm_shm) {
1323 /* XXX may fail to attach shm to child */
1324 (void)shmfork(parent_proc, child_proc);
1325 }
1326 #endif
1327 /*
1328 * inherit the limit structure to child
1329 */
1330 proc_limitfork(parent_proc, child_proc);
1331
1332 if (child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1333 uint64_t rlim_cur = child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur;
1334 child_proc->p_rlim_cpu.tv_sec = (rlim_cur > __INT_MAX__) ? __INT_MAX__ : rlim_cur;
1335 }
1336
1337 /* Intialize new process stats, including start time */
1338 /* <rdar://6640543> non-zeroed portion contains garbage AFAICT */
1339 bzero(child_proc->p_stats, sizeof(*child_proc->p_stats));
1340 microtime_with_abstime(&child_proc->p_start, &child_proc->p_stats->ps_start);
1341
1342 if (parent_proc->p_sigacts != NULL)
1343 (void)memcpy(child_proc->p_sigacts,
1344 parent_proc->p_sigacts, sizeof *child_proc->p_sigacts);
1345 else
1346 (void)memset(child_proc->p_sigacts, 0, sizeof *child_proc->p_sigacts);
1347
1348 sessp = proc_session(parent_proc);
1349 if (sessp->s_ttyvp != NULL && parent_proc->p_flag & P_CONTROLT)
1350 OSBitOrAtomic(P_CONTROLT, &child_proc->p_flag);
1351 session_rele(sessp);
1352
1353 /*
1354 * block all signals to reach the process.
1355 * no transition race should be occuring with the child yet,
1356 * but indicate that the process is in (the creation) transition.
1357 */
1358 proc_signalstart(child_proc, 0);
1359 proc_transstart(child_proc, 0, 0);
1360
1361 child_proc->p_pcaction = 0;
1362
1363 TAILQ_INIT(&child_proc->p_uthlist);
1364 TAILQ_INIT(&child_proc->p_aio_activeq);
1365 TAILQ_INIT(&child_proc->p_aio_doneq);
1366
1367 /* Inherit the parent flags for code sign */
1368 child_proc->p_csflags = (parent_proc->p_csflags & ~CS_KILLED);
1369
1370 /*
1371 * Copy work queue information
1372 *
1373 * Note: This should probably only happen in the case where we are
1374 * creating a child that is a copy of the parent; since this
1375 * routine is called in the non-duplication case of vfork()
1376 * or posix_spawn(), then this information should likely not
1377 * be duplicated.
1378 *
1379 * <rdar://6640553> Work queue pointers that no longer point to code
1380 */
1381 child_proc->p_wqthread = parent_proc->p_wqthread;
1382 child_proc->p_threadstart = parent_proc->p_threadstart;
1383 child_proc->p_pthsize = parent_proc->p_pthsize;
1384 if ((parent_proc->p_lflag & P_LREGISTER) != 0) {
1385 child_proc->p_lflag |= P_LREGISTER;
1386 }
1387 child_proc->p_dispatchqueue_offset = parent_proc->p_dispatchqueue_offset;
1388 child_proc->p_dispatchqueue_serialno_offset = parent_proc->p_dispatchqueue_serialno_offset;
1389 child_proc->p_return_to_kernel_offset = parent_proc->p_return_to_kernel_offset;
1390 child_proc->p_mach_thread_self_offset = parent_proc->p_mach_thread_self_offset;
1391 child_proc->p_pth_tsd_offset = parent_proc->p_pth_tsd_offset;
1392 #if PSYNCH
1393 pth_proc_hashinit(child_proc);
1394 #endif /* PSYNCH */
1395
1396 #if CONFIG_PERSONAS
1397 child_proc->p_persona = NULL;
1398 error = persona_proc_inherit(child_proc, parent_proc);
1399 if (error != 0) {
1400 printf("forkproc: persona_proc_inherit failed (persona %d being destroyed?)\n", persona_get_uid(parent_proc->p_persona));
1401 forkproc_free(child_proc);
1402 child_proc = NULL;
1403 goto bad;
1404 }
1405 #endif
1406
1407 #if CONFIG_MEMORYSTATUS
1408 /* Memorystatus init */
1409 child_proc->p_memstat_state = 0;
1410 child_proc->p_memstat_effectivepriority = JETSAM_PRIORITY_DEFAULT;
1411 child_proc->p_memstat_requestedpriority = JETSAM_PRIORITY_DEFAULT;
1412 child_proc->p_memstat_userdata = 0;
1413 child_proc->p_memstat_idle_start = 0;
1414 child_proc->p_memstat_idle_delta = 0;
1415 child_proc->p_memstat_memlimit = 0;
1416 child_proc->p_memstat_memlimit_active = 0;
1417 child_proc->p_memstat_memlimit_inactive = 0;
1418 #if CONFIG_FREEZE
1419 child_proc->p_memstat_suspendedfootprint = 0;
1420 #endif
1421 child_proc->p_memstat_dirty = 0;
1422 child_proc->p_memstat_idledeadline = 0;
1423 #endif /* CONFIG_MEMORYSTATUS */
1424
1425 bad:
1426 return(child_proc);
1427 }
1428
1429 void
1430 proc_lock(proc_t p)
1431 {
1432 LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
1433 lck_mtx_lock(&p->p_mlock);
1434 }
1435
1436 void
1437 proc_unlock(proc_t p)
1438 {
1439 lck_mtx_unlock(&p->p_mlock);
1440 }
1441
1442 void
1443 proc_spinlock(proc_t p)
1444 {
1445 lck_spin_lock(&p->p_slock);
1446 }
1447
1448 void
1449 proc_spinunlock(proc_t p)
1450 {
1451 lck_spin_unlock(&p->p_slock);
1452 }
1453
1454 void
1455 proc_list_lock(void)
1456 {
1457 lck_mtx_lock(proc_list_mlock);
1458 }
1459
1460 void
1461 proc_list_unlock(void)
1462 {
1463 lck_mtx_unlock(proc_list_mlock);
1464 }
1465
1466 void
1467 proc_ucred_lock(proc_t p)
1468 {
1469 lck_mtx_lock(&p->p_ucred_mlock);
1470 }
1471
1472 void
1473 proc_ucred_unlock(proc_t p)
1474 {
1475 lck_mtx_unlock(&p->p_ucred_mlock);
1476 }
1477
1478 #include <kern/zalloc.h>
1479
1480 struct zone *uthread_zone = NULL;
1481
1482 static lck_grp_t *rethrottle_lock_grp;
1483 static lck_attr_t *rethrottle_lock_attr;
1484 static lck_grp_attr_t *rethrottle_lock_grp_attr;
1485
1486 static void
1487 uthread_zone_init(void)
1488 {
1489 assert(uthread_zone == NULL);
1490
1491 rethrottle_lock_grp_attr = lck_grp_attr_alloc_init();
1492 rethrottle_lock_grp = lck_grp_alloc_init("rethrottle", rethrottle_lock_grp_attr);
1493 rethrottle_lock_attr = lck_attr_alloc_init();
1494
1495 uthread_zone = zinit(sizeof(struct uthread),
1496 thread_max * sizeof(struct uthread),
1497 THREAD_CHUNK * sizeof(struct uthread),
1498 "uthreads");
1499 }
1500
1501 void *
1502 uthread_alloc(task_t task, thread_t thread, int noinherit)
1503 {
1504 proc_t p;
1505 uthread_t uth;
1506 uthread_t uth_parent;
1507 void *ut;
1508
1509 if (uthread_zone == NULL)
1510 uthread_zone_init();
1511
1512 ut = (void *)zalloc(uthread_zone);
1513 bzero(ut, sizeof(struct uthread));
1514
1515 p = (proc_t) get_bsdtask_info(task);
1516 uth = (uthread_t)ut;
1517 uth->uu_thread = thread;
1518
1519 lck_spin_init(&uth->uu_rethrottle_lock, rethrottle_lock_grp,
1520 rethrottle_lock_attr);
1521
1522 /*
1523 * Thread inherits credential from the creating thread, if both
1524 * are in the same task.
1525 *
1526 * If the creating thread has no credential or is from another
1527 * task we can leave the new thread credential NULL. If it needs
1528 * one later, it will be lazily assigned from the task's process.
1529 */
1530 uth_parent = (uthread_t)get_bsdthread_info(current_thread());
1531 if ((noinherit == 0) && task == current_task() &&
1532 uth_parent != NULL &&
1533 IS_VALID_CRED(uth_parent->uu_ucred)) {
1534 /*
1535 * XXX The new thread is, in theory, being created in context
1536 * XXX of parent thread, so a direct reference to the parent
1537 * XXX is OK.
1538 */
1539 kauth_cred_ref(uth_parent->uu_ucred);
1540 uth->uu_ucred = uth_parent->uu_ucred;
1541 /* the credential we just inherited is an assumed credential */
1542 if (uth_parent->uu_flag & UT_SETUID)
1543 uth->uu_flag |= UT_SETUID;
1544 } else {
1545 /* sometimes workqueue threads are created out task context */
1546 if ((task != kernel_task) && (p != PROC_NULL))
1547 uth->uu_ucred = kauth_cred_proc_ref(p);
1548 else
1549 uth->uu_ucred = NOCRED;
1550 }
1551
1552
1553 if ((task != kernel_task) && p) {
1554
1555 proc_lock(p);
1556 if (noinherit != 0) {
1557 /* workq threads will not inherit masks */
1558 uth->uu_sigmask = ~workq_threadmask;
1559 } else if (uth_parent) {
1560 if (uth_parent->uu_flag & UT_SAS_OLDMASK)
1561 uth->uu_sigmask = uth_parent->uu_oldmask;
1562 else
1563 uth->uu_sigmask = uth_parent->uu_sigmask;
1564 }
1565 uth->uu_context.vc_thread = thread;
1566 /*
1567 * Do not add the uthread to proc uthlist for exec copy task,
1568 * since they do not hold a ref on proc.
1569 */
1570 if (!task_is_exec_copy(task)) {
1571 TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list);
1572 }
1573 proc_unlock(p);
1574
1575 #if CONFIG_DTRACE
1576 if (p->p_dtrace_ptss_pages != NULL && !task_is_exec_copy(task)) {
1577 uth->t_dtrace_scratch = dtrace_ptss_claim_entry(p);
1578 }
1579 #endif
1580 }
1581
1582 return (ut);
1583 }
1584
1585 /*
1586 * This routine frees the thread name field of the uthread_t structure. Split out of
1587 * uthread_cleanup() so thread name does not get deallocated while generating a corpse fork.
1588 */
1589 void
1590 uthread_cleanup_name(void *uthread)
1591 {
1592 uthread_t uth = (uthread_t)uthread;
1593
1594 /*
1595 * <rdar://17834538>
1596 * Set pth_name to NULL before calling free().
1597 * Previously there was a race condition in the
1598 * case this code was executing during a stackshot
1599 * where the stackshot could try and copy pth_name
1600 * after it had been freed and before if was marked
1601 * as null.
1602 */
1603 if (uth->pth_name != NULL) {
1604 void *pth_name = uth->pth_name;
1605 uth->pth_name = NULL;
1606 kfree(pth_name, MAXTHREADNAMESIZE);
1607 }
1608 return;
1609 }
1610
1611 /*
1612 * This routine frees all the BSD context in uthread except the credential.
1613 * It does not free the uthread structure as well
1614 */
1615 void
1616 uthread_cleanup(task_t task, void *uthread, void * bsd_info)
1617 {
1618 struct _select *sel;
1619 uthread_t uth = (uthread_t)uthread;
1620 proc_t p = (proc_t)bsd_info;
1621
1622 #if PROC_REF_DEBUG
1623 if (__improbable(uthread_get_proc_refcount(uthread) != 0)) {
1624 panic("uthread_cleanup called for uthread %p with uu_proc_refcount != 0", uthread);
1625 }
1626 #endif
1627
1628 if (uth->uu_lowpri_window || uth->uu_throttle_info) {
1629 /*
1630 * task is marked as a low priority I/O type
1631 * and we've somehow managed to not dismiss the throttle
1632 * through the normal exit paths back to user space...
1633 * no need to throttle this thread since its going away
1634 * but we do need to update our bookeeping w/r to throttled threads
1635 *
1636 * Calling this routine will clean up any throttle info reference
1637 * still inuse by the thread.
1638 */
1639 throttle_lowpri_io(0);
1640 }
1641 /*
1642 * Per-thread audit state should never last beyond system
1643 * call return. Since we don't audit the thread creation/
1644 * removal, the thread state pointer should never be
1645 * non-NULL when we get here.
1646 */
1647 assert(uth->uu_ar == NULL);
1648
1649 if (uth->uu_kqueue_bound) {
1650 kevent_qos_internal_unbind(p,
1651 0, /* didn't save qos_class */
1652 uth->uu_thread,
1653 uth->uu_kqueue_flags);
1654 assert(uth->uu_kqueue_override_is_sync == 0);
1655 }
1656
1657 sel = &uth->uu_select;
1658 /* cleanup the select bit space */
1659 if (sel->nbytes) {
1660 FREE(sel->ibits, M_TEMP);
1661 FREE(sel->obits, M_TEMP);
1662 sel->nbytes = 0;
1663 }
1664
1665 if (uth->uu_cdir) {
1666 vnode_rele(uth->uu_cdir);
1667 uth->uu_cdir = NULLVP;
1668 }
1669
1670 if (uth->uu_wqset) {
1671 if (waitq_set_is_valid(uth->uu_wqset))
1672 waitq_set_deinit(uth->uu_wqset);
1673 FREE(uth->uu_wqset, M_SELECT);
1674 uth->uu_wqset = NULL;
1675 uth->uu_wqstate_sz = 0;
1676 }
1677
1678 os_reason_free(uth->uu_exit_reason);
1679
1680 if ((task != kernel_task) && p) {
1681
1682 if (((uth->uu_flag & UT_VFORK) == UT_VFORK) && (uth->uu_proc != PROC_NULL)) {
1683 vfork_exit_internal(uth->uu_proc, 0, 1);
1684 }
1685 /*
1686 * Remove the thread from the process list and
1687 * transfer [appropriate] pending signals to the process.
1688 * Do not remove the uthread from proc uthlist for exec
1689 * copy task, since they does not have a ref on proc and
1690 * would not have been added to the list.
1691 */
1692 if (get_bsdtask_info(task) == p && !task_is_exec_copy(task)) {
1693 proc_lock(p);
1694
1695 TAILQ_REMOVE(&p->p_uthlist, uth, uu_list);
1696 p->p_siglist |= (uth->uu_siglist & execmask & (~p->p_sigignore | sigcantmask));
1697 proc_unlock(p);
1698 }
1699 #if CONFIG_DTRACE
1700 struct dtrace_ptss_page_entry *tmpptr = uth->t_dtrace_scratch;
1701 uth->t_dtrace_scratch = NULL;
1702 if (tmpptr != NULL && !task_is_exec_copy(task)) {
1703 dtrace_ptss_release_entry(p, tmpptr);
1704 }
1705 #endif
1706 }
1707 }
1708
1709 /* This routine releases the credential stored in uthread */
1710 void
1711 uthread_cred_free(void *uthread)
1712 {
1713 uthread_t uth = (uthread_t)uthread;
1714
1715 /* and free the uthread itself */
1716 if (IS_VALID_CRED(uth->uu_ucred)) {
1717 kauth_cred_t oldcred = uth->uu_ucred;
1718 uth->uu_ucred = NOCRED;
1719 kauth_cred_unref(&oldcred);
1720 }
1721 }
1722
1723 /* This routine frees the uthread structure held in thread structure */
1724 void
1725 uthread_zone_free(void *uthread)
1726 {
1727 uthread_t uth = (uthread_t)uthread;
1728
1729 if (uth->t_tombstone) {
1730 kfree(uth->t_tombstone, sizeof(struct doc_tombstone));
1731 uth->t_tombstone = NULL;
1732 }
1733
1734 lck_spin_destroy(&uth->uu_rethrottle_lock, rethrottle_lock_grp);
1735
1736 uthread_cleanup_name(uthread);
1737 /* and free the uthread itself */
1738 zfree(uthread_zone, uthread);
1739 }