]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_exec.c
03bcf7896c58579ee416cc8a483ce1beac28c0bb
[apple/xnu.git] / bsd / kern / kern_exec.c
1 /*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Mach Operating System
31 * Copyright (c) 1987 Carnegie-Mellon University
32 * All rights reserved. The CMU software License Agreement specifies
33 * the terms and conditions for use and redistribution.
34 */
35
36 /*-
37 * Copyright (c) 1982, 1986, 1991, 1993
38 * The Regents of the University of California. All rights reserved.
39 * (c) UNIX System Laboratories, Inc.
40 * All or some portions of this file are derived from material licensed
41 * to the University of California by American Telephone and Telegraph
42 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
43 * the permission of UNIX System Laboratories, Inc.
44 *
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 * 3. All advertising materials mentioning features or use of this software
54 * must display the following acknowledgement:
55 * This product includes software developed by the University of
56 * California, Berkeley and its contributors.
57 * 4. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * from: @(#)kern_exec.c 8.1 (Berkeley) 6/10/93
74 */
75 /*
76 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
77 * support for mandatory and extensible security protections. This notice
78 * is included in support of clause 2.2 (b) of the Apple Public License,
79 * Version 2.0.
80 */
81 #include <machine/reg.h>
82 #include <machine/cpu_capabilities.h>
83
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/filedesc.h>
87 #include <sys/kernel.h>
88 #include <sys/proc_internal.h>
89 #include <sys/kauth.h>
90 #include <sys/user.h>
91 #include <sys/socketvar.h>
92 #include <sys/malloc.h>
93 #include <sys/namei.h>
94 #include <sys/mount_internal.h>
95 #include <sys/vnode_internal.h>
96 #include <sys/file_internal.h>
97 #include <sys/stat.h>
98 #include <sys/uio_internal.h>
99 #include <sys/acct.h>
100 #include <sys/exec.h>
101 #include <sys/kdebug.h>
102 #include <sys/signal.h>
103 #include <sys/aio_kern.h>
104 #include <sys/sysproto.h>
105 #include <sys/persona.h>
106 #include <sys/reason.h>
107 #if SYSV_SHM
108 #include <sys/shm_internal.h> /* shmexec() */
109 #endif
110 #include <sys/ubc_internal.h> /* ubc_map() */
111 #include <sys/spawn.h>
112 #include <sys/spawn_internal.h>
113 #include <sys/process_policy.h>
114 #include <sys/codesign.h>
115 #include <sys/random.h>
116 #include <crypto/sha1.h>
117
118 #include <libkern/libkern.h>
119
120 #include <security/audit/audit.h>
121
122 #include <ipc/ipc_types.h>
123
124 #include <mach/mach_param.h>
125 #include <mach/mach_types.h>
126 #include <mach/port.h>
127 #include <mach/task.h>
128 #include <mach/task_access.h>
129 #include <mach/thread_act.h>
130 #include <mach/vm_map.h>
131 #include <mach/mach_vm.h>
132 #include <mach/vm_param.h>
133
134 #include <kern/sched_prim.h> /* thread_wakeup() */
135 #include <kern/affinity.h>
136 #include <kern/assert.h>
137 #include <kern/task.h>
138 #include <kern/coalition.h>
139 #include <kern/policy_internal.h>
140 #include <kern/kalloc.h>
141
142 #include <os/log.h>
143
144 #if CONFIG_MACF
145 #include <security/mac_framework.h>
146 #include <security/mac_mach_internal.h>
147 #endif
148
149 #if CONFIG_ARCADE
150 #include <kern/arcade.h>
151 #endif
152
153 #include <vm/vm_map.h>
154 #include <vm/vm_kern.h>
155 #include <vm/vm_protos.h>
156 #include <vm/vm_kern.h>
157 #include <vm/vm_fault.h>
158 #include <vm/vm_pageout.h>
159
160 #include <kdp/kdp_dyld.h>
161
162 #include <machine/machine_routines.h>
163 #include <machine/pal_routines.h>
164
165 #include <pexpert/pexpert.h>
166
167 #if CONFIG_MEMORYSTATUS
168 #include <sys/kern_memorystatus.h>
169 #endif
170
171 #include <IOKit/IOBSD.h>
172
173 extern boolean_t vm_darkwake_mode;
174
175 extern int bootarg_execfailurereports; /* bsd_init.c */
176
177 #if CONFIG_DTRACE
178 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
179 extern void dtrace_proc_exec(proc_t);
180 extern void (*dtrace_proc_waitfor_exec_ptr)(proc_t);
181
182 /*
183 * Since dtrace_proc_waitfor_exec_ptr can be added/removed in dtrace_subr.c,
184 * we will store its value before actually calling it.
185 */
186 static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL;
187
188 #include <sys/dtrace_ptss.h>
189 #endif
190
191 /* support for child creation in exec after vfork */
192 thread_t fork_create_child(task_t parent_task,
193 coalition_t *parent_coalition,
194 proc_t child_proc,
195 int inherit_memory,
196 int is_64bit_addr,
197 int is_64bit_data,
198 int in_exec);
199 void vfork_exit(proc_t p, int rv);
200 extern void proc_apply_task_networkbg_internal(proc_t, thread_t);
201 extern void task_set_did_exec_flag(task_t task);
202 extern void task_clear_exec_copy_flag(task_t task);
203 proc_t proc_exec_switch_task(proc_t p, task_t old_task, task_t new_task, thread_t new_thread);
204 boolean_t task_is_active(task_t);
205 boolean_t thread_is_active(thread_t thread);
206 void thread_copy_resource_info(thread_t dst_thread, thread_t src_thread);
207 void *ipc_importance_exec_switch_task(task_t old_task, task_t new_task);
208 extern void ipc_importance_release(void *elem);
209 extern boolean_t task_has_watchports(task_t task);
210
211 /*
212 * Mach things for which prototypes are unavailable from Mach headers
213 */
214 #define IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND 0x1
215 void ipc_task_reset(
216 task_t task);
217 void ipc_thread_reset(
218 thread_t thread);
219 kern_return_t ipc_object_copyin(
220 ipc_space_t space,
221 mach_port_name_t name,
222 mach_msg_type_name_t msgt_name,
223 ipc_object_t *objectp,
224 mach_port_context_t context,
225 mach_msg_guard_flags_t *guard_flags,
226 uint32_t kmsg_flags);
227 void ipc_port_release_send(ipc_port_t);
228
229 #if DEVELOPMENT || DEBUG
230 void task_importance_update_owner_info(task_t);
231 #endif
232
233 extern struct savearea *get_user_regs(thread_t);
234
235 __attribute__((noinline)) int __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(mach_port_t task_access_port, int32_t new_pid);
236
237 #include <kern/thread.h>
238 #include <kern/task.h>
239 #include <kern/ast.h>
240 #include <kern/mach_loader.h>
241 #include <kern/mach_fat.h>
242 #include <mach-o/fat.h>
243 #include <mach-o/loader.h>
244 #include <machine/vmparam.h>
245 #include <sys/imgact.h>
246
247 #include <sys/sdt.h>
248
249
250 /*
251 * EAI_ITERLIMIT The maximum number of times to iterate an image
252 * activator in exec_activate_image() before treating
253 * it as malformed/corrupt.
254 */
255 #define EAI_ITERLIMIT 3
256
257 /*
258 * For #! interpreter parsing
259 */
260 #define IS_WHITESPACE(ch) ((ch == ' ') || (ch == '\t'))
261 #define IS_EOL(ch) ((ch == '#') || (ch == '\n'))
262
263 extern vm_map_t bsd_pageable_map;
264 extern const struct fileops vnops;
265 extern int nextpidversion;
266
267 #define USER_ADDR_ALIGN(addr, val) \
268 ( ( (user_addr_t)(addr) + (val) - 1) \
269 & ~((val) - 1) )
270
271 /* Platform Code Exec Logging */
272 static int platform_exec_logging = 0;
273
274 SYSCTL_DECL(_security_mac);
275
276 SYSCTL_INT(_security_mac, OID_AUTO, platform_exec_logging, CTLFLAG_RW, &platform_exec_logging, 0,
277 "log cdhashes for all platform binary executions");
278
279 static os_log_t peLog = OS_LOG_DEFAULT;
280
281 struct exec_port_actions {
282 uint32_t portwatch_count;
283 uint32_t registered_count;
284 ipc_port_t *portwatch_array;
285 ipc_port_t *registered_array;
286 };
287
288 struct image_params; /* Forward */
289 static int exec_activate_image(struct image_params *imgp);
290 static int exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp);
291 static int load_return_to_errno(load_return_t lrtn);
292 static int execargs_alloc(struct image_params *imgp);
293 static int execargs_free(struct image_params *imgp);
294 static int exec_check_permissions(struct image_params *imgp);
295 static int exec_extract_strings(struct image_params *imgp);
296 static int exec_add_apple_strings(struct image_params *imgp, const load_result_t *load_result);
297 static int exec_handle_sugid(struct image_params *imgp);
298 static int sugid_scripts = 0;
299 SYSCTL_INT(_kern, OID_AUTO, sugid_scripts, CTLFLAG_RW | CTLFLAG_LOCKED, &sugid_scripts, 0, "");
300 static kern_return_t create_unix_stack(vm_map_t map, load_result_t* load_result, proc_t p);
301 static int copyoutptr(user_addr_t ua, user_addr_t ptr, int ptr_size);
302 static void exec_resettextvp(proc_t, struct image_params *);
303 static int check_for_signature(proc_t, struct image_params *);
304 static void exec_prefault_data(proc_t, struct image_params *, load_result_t *);
305 static errno_t exec_handle_port_actions(struct image_params *imgp,
306 struct exec_port_actions *port_actions);
307 static errno_t exec_handle_spawnattr_policy(proc_t p, thread_t thread, int psa_apptype, uint64_t psa_qos_clamp,
308 uint64_t psa_darwin_role, struct exec_port_actions *port_actions);
309 static void exec_port_actions_destroy(struct exec_port_actions *port_actions);
310
311 /*
312 * exec_add_user_string
313 *
314 * Add the requested string to the string space area.
315 *
316 * Parameters; struct image_params * image parameter block
317 * user_addr_t string to add to strings area
318 * int segment from which string comes
319 * boolean_t TRUE if string contributes to NCARGS
320 *
321 * Returns: 0 Success
322 * !0 Failure errno from copyinstr()
323 *
324 * Implicit returns:
325 * (imgp->ip_strendp) updated location of next add, if any
326 * (imgp->ip_strspace) updated byte count of space remaining
327 * (imgp->ip_argspace) updated byte count of space in NCARGS
328 */
329 static int
330 exec_add_user_string(struct image_params *imgp, user_addr_t str, int seg, boolean_t is_ncargs)
331 {
332 int error = 0;
333
334 do {
335 size_t len = 0;
336 int space;
337
338 if (is_ncargs) {
339 space = imgp->ip_argspace; /* by definition smaller than ip_strspace */
340 } else {
341 space = imgp->ip_strspace;
342 }
343
344 if (space <= 0) {
345 error = E2BIG;
346 break;
347 }
348
349 if (!UIO_SEG_IS_USER_SPACE(seg)) {
350 char *kstr = CAST_DOWN(char *, str); /* SAFE */
351 error = copystr(kstr, imgp->ip_strendp, space, &len);
352 } else {
353 error = copyinstr(str, imgp->ip_strendp, space, &len);
354 }
355
356 imgp->ip_strendp += len;
357 imgp->ip_strspace -= len;
358 if (is_ncargs) {
359 imgp->ip_argspace -= len;
360 }
361 } while (error == ENAMETOOLONG);
362
363 return error;
364 }
365
366 /*
367 * dyld is now passed the executable path as a getenv-like variable
368 * in the same fashion as the stack_guard and malloc_entropy keys.
369 */
370 #define EXECUTABLE_KEY "executable_path="
371
372 /*
373 * exec_save_path
374 *
375 * To support new app package launching for Mac OS X, the dyld needs the
376 * first argument to execve() stored on the user stack.
377 *
378 * Save the executable path name at the bottom of the strings area and set
379 * the argument vector pointer to the location following that to indicate
380 * the start of the argument and environment tuples, setting the remaining
381 * string space count to the size of the string area minus the path length.
382 *
383 * Parameters; struct image_params * image parameter block
384 * char * path used to invoke program
385 * int segment from which path comes
386 *
387 * Returns: int 0 Success
388 * EFAULT Bad address
389 * copy[in]str:EFAULT Bad address
390 * copy[in]str:ENAMETOOLONG Filename too long
391 *
392 * Implicit returns:
393 * (imgp->ip_strings) saved path
394 * (imgp->ip_strspace) space remaining in ip_strings
395 * (imgp->ip_strendp) start of remaining copy area
396 * (imgp->ip_argspace) space remaining of NCARGS
397 * (imgp->ip_applec) Initial applev[0]
398 *
399 * Note: We have to do this before the initial namei() since in the
400 * path contains symbolic links, namei() will overwrite the
401 * original path buffer contents. If the last symbolic link
402 * resolved was a relative pathname, we would lose the original
403 * "path", which could be an absolute pathname. This might be
404 * unacceptable for dyld.
405 */
406 static int
407 exec_save_path(struct image_params *imgp, user_addr_t path, int seg, const char **excpath)
408 {
409 int error;
410 size_t len;
411 char *kpath;
412
413 // imgp->ip_strings can come out of a cache, so we need to obliterate the
414 // old path.
415 memset(imgp->ip_strings, '\0', strlen(EXECUTABLE_KEY) + MAXPATHLEN);
416
417 len = MIN(MAXPATHLEN, imgp->ip_strspace);
418
419 switch (seg) {
420 case UIO_USERSPACE32:
421 case UIO_USERSPACE64: /* Same for copyin()... */
422 error = copyinstr(path, imgp->ip_strings + strlen(EXECUTABLE_KEY), len, &len);
423 break;
424 case UIO_SYSSPACE:
425 kpath = CAST_DOWN(char *, path); /* SAFE */
426 error = copystr(kpath, imgp->ip_strings + strlen(EXECUTABLE_KEY), len, &len);
427 break;
428 default:
429 error = EFAULT;
430 break;
431 }
432
433 if (!error) {
434 bcopy(EXECUTABLE_KEY, imgp->ip_strings, strlen(EXECUTABLE_KEY));
435 len += strlen(EXECUTABLE_KEY);
436
437 imgp->ip_strendp += len;
438 imgp->ip_strspace -= len;
439
440 if (excpath) {
441 *excpath = imgp->ip_strings + strlen(EXECUTABLE_KEY);
442 }
443 }
444
445 return error;
446 }
447
448 /*
449 * exec_reset_save_path
450 *
451 * If we detect a shell script, we need to reset the string area
452 * state so that the interpreter can be saved onto the stack.
453 *
454 * Parameters; struct image_params * image parameter block
455 *
456 * Returns: int 0 Success
457 *
458 * Implicit returns:
459 * (imgp->ip_strings) saved path
460 * (imgp->ip_strspace) space remaining in ip_strings
461 * (imgp->ip_strendp) start of remaining copy area
462 * (imgp->ip_argspace) space remaining of NCARGS
463 *
464 */
465 static int
466 exec_reset_save_path(struct image_params *imgp)
467 {
468 imgp->ip_strendp = imgp->ip_strings;
469 imgp->ip_argspace = NCARGS;
470 imgp->ip_strspace = (NCARGS + PAGE_SIZE);
471
472 return 0;
473 }
474
475 /*
476 * exec_shell_imgact
477 *
478 * Image activator for interpreter scripts. If the image begins with
479 * the characters "#!", then it is an interpreter script. Verify the
480 * length of the script line indicating the interpreter is not in
481 * excess of the maximum allowed size. If this is the case, then
482 * break out the arguments, if any, which are separated by white
483 * space, and copy them into the argument save area as if they were
484 * provided on the command line before all other arguments. The line
485 * ends when we encounter a comment character ('#') or newline.
486 *
487 * Parameters; struct image_params * image parameter block
488 *
489 * Returns: -1 not an interpreter (keep looking)
490 * -3 Success: interpreter: relookup
491 * >0 Failure: interpreter: error number
492 *
493 * A return value other than -1 indicates subsequent image activators should
494 * not be given the opportunity to attempt to activate the image.
495 */
496 static int
497 exec_shell_imgact(struct image_params *imgp)
498 {
499 char *vdata = imgp->ip_vdata;
500 char *ihp;
501 char *line_startp, *line_endp;
502 char *interp;
503
504 /*
505 * Make sure it's a shell script. If we've already redirected
506 * from an interpreted file once, don't do it again.
507 */
508 if (vdata[0] != '#' ||
509 vdata[1] != '!' ||
510 (imgp->ip_flags & IMGPF_INTERPRET) != 0) {
511 return -1;
512 }
513
514 if (imgp->ip_origcputype != 0) {
515 /* Fat header previously matched, don't allow shell script inside */
516 return -1;
517 }
518
519 imgp->ip_flags |= IMGPF_INTERPRET;
520 imgp->ip_interp_sugid_fd = -1;
521 imgp->ip_interp_buffer[0] = '\0';
522
523 /* Check to see if SUGID scripts are permitted. If they aren't then
524 * clear the SUGID bits.
525 * imgp->ip_vattr is known to be valid.
526 */
527 if (sugid_scripts == 0) {
528 imgp->ip_origvattr->va_mode &= ~(VSUID | VSGID);
529 }
530
531 /* Try to find the first non-whitespace character */
532 for (ihp = &vdata[2]; ihp < &vdata[IMG_SHSIZE]; ihp++) {
533 if (IS_EOL(*ihp)) {
534 /* Did not find interpreter, "#!\n" */
535 return ENOEXEC;
536 } else if (IS_WHITESPACE(*ihp)) {
537 /* Whitespace, like "#! /bin/sh\n", keep going. */
538 } else {
539 /* Found start of interpreter */
540 break;
541 }
542 }
543
544 if (ihp == &vdata[IMG_SHSIZE]) {
545 /* All whitespace, like "#! " */
546 return ENOEXEC;
547 }
548
549 line_startp = ihp;
550
551 /* Try to find the end of the interpreter+args string */
552 for (; ihp < &vdata[IMG_SHSIZE]; ihp++) {
553 if (IS_EOL(*ihp)) {
554 /* Got it */
555 break;
556 } else {
557 /* Still part of interpreter or args */
558 }
559 }
560
561 if (ihp == &vdata[IMG_SHSIZE]) {
562 /* A long line, like "#! blah blah blah" without end */
563 return ENOEXEC;
564 }
565
566 /* Backtrack until we find the last non-whitespace */
567 while (IS_EOL(*ihp) || IS_WHITESPACE(*ihp)) {
568 ihp--;
569 }
570
571 /* The character after the last non-whitespace is our logical end of line */
572 line_endp = ihp + 1;
573
574 /*
575 * Now we have pointers to the usable part of:
576 *
577 * "#! /usr/bin/int first second third \n"
578 * ^ line_startp ^ line_endp
579 */
580
581 /* copy the interpreter name */
582 interp = imgp->ip_interp_buffer;
583 for (ihp = line_startp; (ihp < line_endp) && !IS_WHITESPACE(*ihp); ihp++) {
584 *interp++ = *ihp;
585 }
586 *interp = '\0';
587
588 exec_reset_save_path(imgp);
589 exec_save_path(imgp, CAST_USER_ADDR_T(imgp->ip_interp_buffer),
590 UIO_SYSSPACE, NULL);
591
592 /* Copy the entire interpreter + args for later processing into argv[] */
593 interp = imgp->ip_interp_buffer;
594 for (ihp = line_startp; (ihp < line_endp); ihp++) {
595 *interp++ = *ihp;
596 }
597 *interp = '\0';
598
599 #if !SECURE_KERNEL
600 /*
601 * If we have an SUID or SGID script, create a file descriptor
602 * from the vnode and pass /dev/fd/%d instead of the actual
603 * path name so that the script does not get opened twice
604 */
605 if (imgp->ip_origvattr->va_mode & (VSUID | VSGID)) {
606 proc_t p;
607 struct fileproc *fp;
608 int fd;
609 int error;
610
611 p = vfs_context_proc(imgp->ip_vfs_context);
612 error = falloc(p, &fp, &fd, imgp->ip_vfs_context);
613 if (error) {
614 return error;
615 }
616
617 fp->f_fglob->fg_flag = FREAD;
618 fp->f_fglob->fg_ops = &vnops;
619 fp->f_fglob->fg_data = (caddr_t)imgp->ip_vp;
620
621 proc_fdlock(p);
622 procfdtbl_releasefd(p, fd, NULL);
623 fp_drop(p, fd, fp, 1);
624 proc_fdunlock(p);
625 vnode_ref(imgp->ip_vp);
626
627 imgp->ip_interp_sugid_fd = fd;
628 }
629 #endif
630
631 return -3;
632 }
633
634
635
636 /*
637 * exec_fat_imgact
638 *
639 * Image activator for fat 1.0 binaries. If the binary is fat, then we
640 * need to select an image from it internally, and make that the image
641 * we are going to attempt to execute. At present, this consists of
642 * reloading the first page for the image with a first page from the
643 * offset location indicated by the fat header.
644 *
645 * Parameters; struct image_params * image parameter block
646 *
647 * Returns: -1 not a fat binary (keep looking)
648 * -2 Success: encapsulated binary: reread
649 * >0 Failure: error number
650 *
651 * Important: This image activator is byte order neutral.
652 *
653 * Note: A return value other than -1 indicates subsequent image
654 * activators should not be given the opportunity to attempt
655 * to activate the image.
656 *
657 * If we find an encapsulated binary, we make no assertions
658 * about its validity; instead, we leave that up to a rescan
659 * for an activator to claim it, and, if it is claimed by one,
660 * that activator is responsible for determining validity.
661 */
662 static int
663 exec_fat_imgact(struct image_params *imgp)
664 {
665 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
666 kauth_cred_t cred = kauth_cred_proc_ref(p);
667 struct fat_header *fat_header = (struct fat_header *)imgp->ip_vdata;
668 struct _posix_spawnattr *psa = NULL;
669 struct fat_arch fat_arch;
670 int resid, error;
671 load_return_t lret;
672
673 if (imgp->ip_origcputype != 0) {
674 /* Fat header previously matched, don't allow another fat file inside */
675 error = -1; /* not claimed */
676 goto bad;
677 }
678
679 /* Make sure it's a fat binary */
680 if (OSSwapBigToHostInt32(fat_header->magic) != FAT_MAGIC) {
681 error = -1; /* not claimed */
682 goto bad;
683 }
684
685 /* imgp->ip_vdata has PAGE_SIZE, zerofilled if the file is smaller */
686 lret = fatfile_validate_fatarches((vm_offset_t)fat_header, PAGE_SIZE);
687 if (lret != LOAD_SUCCESS) {
688 error = load_return_to_errno(lret);
689 goto bad;
690 }
691
692 /* If posix_spawn binprefs exist, respect those prefs. */
693 psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
694 if (psa != NULL && psa->psa_binprefs[0] != 0) {
695 uint32_t pr = 0;
696
697 /* Check each preference listed against all arches in header */
698 for (pr = 0; pr < NBINPREFS; pr++) {
699 cpu_type_t pref = psa->psa_binprefs[pr];
700 if (pref == 0) {
701 /* No suitable arch in the pref list */
702 error = EBADARCH;
703 goto bad;
704 }
705
706 if (pref == CPU_TYPE_ANY) {
707 /* Fall through to regular grading */
708 goto regular_grading;
709 }
710
711 lret = fatfile_getbestarch_for_cputype(pref,
712 (vm_offset_t)fat_header,
713 PAGE_SIZE,
714 imgp,
715 &fat_arch);
716 if (lret == LOAD_SUCCESS) {
717 goto use_arch;
718 }
719 }
720
721 /* Requested binary preference was not honored */
722 error = EBADEXEC;
723 goto bad;
724 }
725
726 regular_grading:
727 /* Look up our preferred architecture in the fat file. */
728 lret = fatfile_getbestarch((vm_offset_t)fat_header,
729 PAGE_SIZE,
730 imgp,
731 &fat_arch);
732 if (lret != LOAD_SUCCESS) {
733 error = load_return_to_errno(lret);
734 goto bad;
735 }
736
737 use_arch:
738 /* Read the Mach-O header out of fat_arch */
739 error = vn_rdwr(UIO_READ, imgp->ip_vp, imgp->ip_vdata,
740 PAGE_SIZE, fat_arch.offset,
741 UIO_SYSSPACE, (IO_UNIT | IO_NODELOCKED),
742 cred, &resid, p);
743 if (error) {
744 goto bad;
745 }
746
747 if (resid) {
748 memset(imgp->ip_vdata + (PAGE_SIZE - resid), 0x0, resid);
749 }
750
751 /* Success. Indicate we have identified an encapsulated binary */
752 error = -2;
753 imgp->ip_arch_offset = (user_size_t)fat_arch.offset;
754 imgp->ip_arch_size = (user_size_t)fat_arch.size;
755 imgp->ip_origcputype = fat_arch.cputype;
756 imgp->ip_origcpusubtype = fat_arch.cpusubtype;
757
758 bad:
759 kauth_cred_unref(&cred);
760 return error;
761 }
762
763 static int
764 activate_exec_state(task_t task, proc_t p, thread_t thread, load_result_t *result)
765 {
766 int ret;
767
768 task_set_dyld_info(task, MACH_VM_MIN_ADDRESS, 0);
769 task_set_64bit(task, result->is_64bit_addr, result->is_64bit_data);
770 if (result->is_64bit_addr) {
771 OSBitOrAtomic(P_LP64, &p->p_flag);
772 } else {
773 OSBitAndAtomic(~((uint32_t)P_LP64), &p->p_flag);
774 }
775 task_set_mach_header_address(task, result->mach_header);
776
777 ret = thread_state_initialize(thread);
778 if (ret != KERN_SUCCESS) {
779 return ret;
780 }
781
782 if (result->threadstate) {
783 uint32_t *ts = result->threadstate;
784 uint32_t total_size = result->threadstate_sz;
785
786 while (total_size > 0) {
787 uint32_t flavor = *ts++;
788 uint32_t size = *ts++;
789
790 ret = thread_setstatus(thread, flavor, (thread_state_t)ts, size);
791 if (ret) {
792 return ret;
793 }
794 ts += size;
795 total_size -= (size + 2) * sizeof(uint32_t);
796 }
797 }
798
799 thread_setentrypoint(thread, result->entry_point);
800
801 return KERN_SUCCESS;
802 }
803
804
805 /*
806 * Set p->p_comm and p->p_name to the name passed to exec
807 */
808 static void
809 set_proc_name(struct image_params *imgp, proc_t p)
810 {
811 int p_name_len = sizeof(p->p_name) - 1;
812
813 if (imgp->ip_ndp->ni_cnd.cn_namelen > p_name_len) {
814 imgp->ip_ndp->ni_cnd.cn_namelen = p_name_len;
815 }
816
817 bcopy((caddr_t)imgp->ip_ndp->ni_cnd.cn_nameptr, (caddr_t)p->p_name,
818 (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen);
819 p->p_name[imgp->ip_ndp->ni_cnd.cn_namelen] = '\0';
820
821 if (imgp->ip_ndp->ni_cnd.cn_namelen > MAXCOMLEN) {
822 imgp->ip_ndp->ni_cnd.cn_namelen = MAXCOMLEN;
823 }
824
825 bcopy((caddr_t)imgp->ip_ndp->ni_cnd.cn_nameptr, (caddr_t)p->p_comm,
826 (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen);
827 p->p_comm[imgp->ip_ndp->ni_cnd.cn_namelen] = '\0';
828 }
829
830 /*
831 * exec_mach_imgact
832 *
833 * Image activator for mach-o 1.0 binaries.
834 *
835 * Parameters; struct image_params * image parameter block
836 *
837 * Returns: -1 not a fat binary (keep looking)
838 * -2 Success: encapsulated binary: reread
839 * >0 Failure: error number
840 * EBADARCH Mach-o binary, but with an unrecognized
841 * architecture
842 * ENOMEM No memory for child process after -
843 * can only happen after vfork()
844 *
845 * Important: This image activator is NOT byte order neutral.
846 *
847 * Note: A return value other than -1 indicates subsequent image
848 * activators should not be given the opportunity to attempt
849 * to activate the image.
850 *
851 * TODO: More gracefully handle failures after vfork
852 */
853 static int
854 exec_mach_imgact(struct image_params *imgp)
855 {
856 struct mach_header *mach_header = (struct mach_header *)imgp->ip_vdata;
857 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
858 int error = 0;
859 task_t task;
860 task_t new_task = NULL; /* protected by vfexec */
861 thread_t thread;
862 struct uthread *uthread;
863 vm_map_t old_map = VM_MAP_NULL;
864 vm_map_t map = VM_MAP_NULL;
865 load_return_t lret;
866 load_result_t load_result = {};
867 struct _posix_spawnattr *psa = NULL;
868 int spawn = (imgp->ip_flags & IMGPF_SPAWN);
869 int vfexec = (imgp->ip_flags & IMGPF_VFORK_EXEC);
870 int exec = (imgp->ip_flags & IMGPF_EXEC);
871 os_reason_t exec_failure_reason = OS_REASON_NULL;
872
873 /*
874 * make sure it's a Mach-O 1.0 or Mach-O 2.0 binary; the difference
875 * is a reserved field on the end, so for the most part, we can
876 * treat them as if they were identical. Reverse-endian Mach-O
877 * binaries are recognized but not compatible.
878 */
879 if ((mach_header->magic == MH_CIGAM) ||
880 (mach_header->magic == MH_CIGAM_64)) {
881 error = EBADARCH;
882 goto bad;
883 }
884
885 if ((mach_header->magic != MH_MAGIC) &&
886 (mach_header->magic != MH_MAGIC_64)) {
887 error = -1;
888 goto bad;
889 }
890
891 if (mach_header->filetype != MH_EXECUTE) {
892 error = -1;
893 goto bad;
894 }
895
896 if (imgp->ip_origcputype != 0) {
897 /* Fat header previously had an idea about this thin file */
898 if (imgp->ip_origcputype != mach_header->cputype ||
899 imgp->ip_origcpusubtype != mach_header->cpusubtype) {
900 error = EBADARCH;
901 goto bad;
902 }
903 } else {
904 imgp->ip_origcputype = mach_header->cputype;
905 imgp->ip_origcpusubtype = mach_header->cpusubtype;
906 }
907
908 task = current_task();
909 thread = current_thread();
910 uthread = get_bsdthread_info(thread);
911
912 if ((mach_header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64) {
913 imgp->ip_flags |= IMGPF_IS_64BIT_ADDR | IMGPF_IS_64BIT_DATA;
914 }
915
916 /* If posix_spawn binprefs exist, respect those prefs. */
917 psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
918 if (psa != NULL && psa->psa_binprefs[0] != 0) {
919 int pr = 0;
920 for (pr = 0; pr < NBINPREFS; pr++) {
921 cpu_type_t pref = psa->psa_binprefs[pr];
922 if (pref == 0) {
923 /* No suitable arch in the pref list */
924 error = EBADARCH;
925 goto bad;
926 }
927
928 if (pref == CPU_TYPE_ANY) {
929 /* Jump to regular grading */
930 goto grade;
931 }
932
933 if (pref == imgp->ip_origcputype) {
934 /* We have a match! */
935 goto grade;
936 }
937 }
938 error = EBADARCH;
939 goto bad;
940 }
941 grade:
942 if (!grade_binary(imgp->ip_origcputype, imgp->ip_origcpusubtype & ~CPU_SUBTYPE_MASK, TRUE)) {
943 error = EBADARCH;
944 goto bad;
945 }
946
947 if (validate_potential_simulator_binary(imgp->ip_origcputype, imgp,
948 imgp->ip_arch_offset, imgp->ip_arch_size) != LOAD_SUCCESS) {
949 #if __x86_64__
950 const char *excpath;
951 error = exec_save_path(imgp, imgp->ip_user_fname, imgp->ip_seg, &excpath);
952 os_log_error(OS_LOG_DEFAULT, "Unsupported 32-bit executable: \"%s\"", (error) ? imgp->ip_vp->v_name : excpath);
953 #endif
954 error = EBADARCH;
955 goto bad;
956 }
957
958 #if defined(HAS_APPLE_PAC)
959 assert(mach_header->cputype == CPU_TYPE_ARM64
960 );
961
962 if (((mach_header->cputype == CPU_TYPE_ARM64 &&
963 (mach_header->cpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E)
964 ) && (CPU_SUBTYPE_ARM64_PTR_AUTH_VERSION(mach_header->cpusubtype) == 0)) {
965 imgp->ip_flags &= ~IMGPF_NOJOP;
966 } else {
967 imgp->ip_flags |= IMGPF_NOJOP;
968 }
969 #endif
970
971 /* Copy in arguments/environment from the old process */
972 error = exec_extract_strings(imgp);
973 if (error) {
974 goto bad;
975 }
976
977 AUDIT_ARG(argv, imgp->ip_startargv, imgp->ip_argc,
978 imgp->ip_endargv - imgp->ip_startargv);
979 AUDIT_ARG(envv, imgp->ip_endargv, imgp->ip_envc,
980 imgp->ip_endenvv - imgp->ip_endargv);
981
982 /*
983 * We are being called to activate an image subsequent to a vfork()
984 * operation; in this case, we know that our task, thread, and
985 * uthread are actually those of our parent, and our proc, which we
986 * obtained indirectly from the image_params vfs_context_t, is the
987 * new child process.
988 */
989 if (vfexec) {
990 imgp->ip_new_thread = fork_create_child(task,
991 NULL,
992 p,
993 FALSE,
994 (imgp->ip_flags & IMGPF_IS_64BIT_ADDR),
995 (imgp->ip_flags & IMGPF_IS_64BIT_DATA),
996 FALSE);
997 /* task and thread ref returned, will be released in __mac_execve */
998 if (imgp->ip_new_thread == NULL) {
999 error = ENOMEM;
1000 goto bad;
1001 }
1002 }
1003
1004
1005 /* reset local idea of thread, uthread, task */
1006 thread = imgp->ip_new_thread;
1007 uthread = get_bsdthread_info(thread);
1008 task = new_task = get_threadtask(thread);
1009
1010 /*
1011 * Load the Mach-O file.
1012 *
1013 * NOTE: An error after this point indicates we have potentially
1014 * destroyed or overwritten some process state while attempting an
1015 * execve() following a vfork(), which is an unrecoverable condition.
1016 * We send the new process an immediate SIGKILL to avoid it executing
1017 * any instructions in the mutated address space. For true spawns,
1018 * this is not the case, and "too late" is still not too late to
1019 * return an error code to the parent process.
1020 */
1021
1022 /*
1023 * Actually load the image file we previously decided to load.
1024 */
1025 lret = load_machfile(imgp, mach_header, thread, &map, &load_result);
1026 if (lret != LOAD_SUCCESS) {
1027 error = load_return_to_errno(lret);
1028
1029 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1030 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO, 0, 0);
1031 if (lret == LOAD_BADMACHO_UPX) {
1032 set_proc_name(imgp, p);
1033 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_UPX);
1034 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1035 } else {
1036 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO);
1037
1038 if (bootarg_execfailurereports) {
1039 set_proc_name(imgp, p);
1040 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1041 }
1042 }
1043
1044 exec_failure_reason->osr_flags |= OS_REASON_FLAG_CONSISTENT_FAILURE;
1045
1046 goto badtoolate;
1047 }
1048
1049 proc_lock(p);
1050 p->p_cputype = imgp->ip_origcputype;
1051 p->p_cpusubtype = imgp->ip_origcpusubtype;
1052 p->p_platform = load_result.ip_platform;
1053 p->p_sdk = load_result.lr_sdk;
1054 proc_unlock(p);
1055
1056 vm_map_set_user_wire_limit(map, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1057
1058 /*
1059 * Set code-signing flags if this binary is signed, or if parent has
1060 * requested them on exec.
1061 */
1062 if (load_result.csflags & CS_VALID) {
1063 imgp->ip_csflags |= load_result.csflags &
1064 (CS_VALID | CS_SIGNED | CS_DEV_CODE |
1065 CS_HARD | CS_KILL | CS_RESTRICT | CS_ENFORCEMENT | CS_REQUIRE_LV |
1066 CS_FORCED_LV | CS_ENTITLEMENTS_VALIDATED | CS_DYLD_PLATFORM | CS_RUNTIME |
1067 CS_ENTITLEMENT_FLAGS |
1068 CS_EXEC_SET_HARD | CS_EXEC_SET_KILL | CS_EXEC_SET_ENFORCEMENT);
1069 } else {
1070 imgp->ip_csflags &= ~CS_VALID;
1071 }
1072
1073 if (p->p_csflags & CS_EXEC_SET_HARD) {
1074 imgp->ip_csflags |= CS_HARD;
1075 }
1076 if (p->p_csflags & CS_EXEC_SET_KILL) {
1077 imgp->ip_csflags |= CS_KILL;
1078 }
1079 if (p->p_csflags & CS_EXEC_SET_ENFORCEMENT) {
1080 imgp->ip_csflags |= CS_ENFORCEMENT;
1081 }
1082 if (p->p_csflags & CS_EXEC_INHERIT_SIP) {
1083 if (p->p_csflags & CS_INSTALLER) {
1084 imgp->ip_csflags |= CS_INSTALLER;
1085 }
1086 if (p->p_csflags & CS_DATAVAULT_CONTROLLER) {
1087 imgp->ip_csflags |= CS_DATAVAULT_CONTROLLER;
1088 }
1089 if (p->p_csflags & CS_NVRAM_UNRESTRICTED) {
1090 imgp->ip_csflags |= CS_NVRAM_UNRESTRICTED;
1091 }
1092 }
1093
1094 /*
1095 * Set up the system reserved areas in the new address space.
1096 */
1097 int cpu_subtype;
1098 cpu_subtype = 0; /* all cpu_subtypes use the same shared region */
1099 #if defined(HAS_APPLE_PAC)
1100 if (cpu_type() == CPU_TYPE_ARM64 &&
1101 (p->p_cpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E) {
1102 assertf(p->p_cputype == CPU_TYPE_ARM64,
1103 "p %p cpu_type() 0x%x p->p_cputype 0x%x p->p_cpusubtype 0x%x",
1104 p, cpu_type(), p->p_cputype, p->p_cpusubtype);
1105 /*
1106 * arm64e uses pointer authentication, so request a separate
1107 * shared region for this CPU subtype.
1108 */
1109 cpu_subtype = p->p_cpusubtype & ~CPU_SUBTYPE_MASK;
1110 }
1111 #endif /* HAS_APPLE_PAC */
1112 vm_map_exec(map, task, load_result.is_64bit_addr, (void *)p->p_fd->fd_rdir, cpu_type(), cpu_subtype);
1113
1114 /*
1115 * Close file descriptors which specify close-on-exec.
1116 */
1117 fdexec(p, psa != NULL ? psa->psa_flags : 0, exec);
1118
1119 /*
1120 * deal with set[ug]id.
1121 */
1122 error = exec_handle_sugid(imgp);
1123 if (error) {
1124 vm_map_deallocate(map);
1125
1126 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1127 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_SUGID_FAILURE, 0, 0);
1128
1129 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_SUGID_FAILURE);
1130 if (bootarg_execfailurereports) {
1131 set_proc_name(imgp, p);
1132 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1133 }
1134
1135 goto badtoolate;
1136 }
1137
1138 /*
1139 * Commit to new map.
1140 *
1141 * Swap the new map for the old for target task, which consumes
1142 * our new map reference but each leaves us responsible for the
1143 * old_map reference. That lets us get off the pmap associated
1144 * with it, and then we can release it.
1145 *
1146 * The map needs to be set on the target task which is different
1147 * than current task, thus swap_task_map is used instead of
1148 * vm_map_switch.
1149 */
1150 old_map = swap_task_map(task, thread, map);
1151 vm_map_deallocate(old_map);
1152 old_map = NULL;
1153
1154 lret = activate_exec_state(task, p, thread, &load_result);
1155 if (lret != KERN_SUCCESS) {
1156 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1157 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_ACTV_THREADSTATE, 0, 0);
1158
1159 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_ACTV_THREADSTATE);
1160 if (bootarg_execfailurereports) {
1161 set_proc_name(imgp, p);
1162 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1163 }
1164
1165 goto badtoolate;
1166 }
1167
1168 /*
1169 * deal with voucher on exec-calling thread.
1170 */
1171 if (imgp->ip_new_thread == NULL) {
1172 thread_set_mach_voucher(current_thread(), IPC_VOUCHER_NULL);
1173 }
1174
1175 /* Make sure we won't interrupt ourself signalling a partial process */
1176 if (!vfexec && !spawn && (p->p_lflag & P_LTRACED)) {
1177 psignal(p, SIGTRAP);
1178 }
1179
1180 if (load_result.unixproc &&
1181 create_unix_stack(get_task_map(task),
1182 &load_result,
1183 p) != KERN_SUCCESS) {
1184 error = load_return_to_errno(LOAD_NOSPACE);
1185
1186 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1187 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_STACK_ALLOC, 0, 0);
1188
1189 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_STACK_ALLOC);
1190 if (bootarg_execfailurereports) {
1191 set_proc_name(imgp, p);
1192 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1193 }
1194
1195 goto badtoolate;
1196 }
1197
1198 error = exec_add_apple_strings(imgp, &load_result);
1199 if (error) {
1200 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1201 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_APPLE_STRING_INIT, 0, 0);
1202
1203 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_APPLE_STRING_INIT);
1204 if (bootarg_execfailurereports) {
1205 set_proc_name(imgp, p);
1206 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1207 }
1208 goto badtoolate;
1209 }
1210
1211 /* Switch to target task's map to copy out strings */
1212 old_map = vm_map_switch(get_task_map(task));
1213
1214 if (load_result.unixproc) {
1215 user_addr_t ap;
1216
1217 /*
1218 * Copy the strings area out into the new process address
1219 * space.
1220 */
1221 ap = p->user_stack;
1222 error = exec_copyout_strings(imgp, &ap);
1223 if (error) {
1224 vm_map_switch(old_map);
1225
1226 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1227 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_STRINGS, 0, 0);
1228
1229 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_STRINGS);
1230 if (bootarg_execfailurereports) {
1231 set_proc_name(imgp, p);
1232 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1233 }
1234 goto badtoolate;
1235 }
1236 /* Set the stack */
1237 thread_setuserstack(thread, ap);
1238 }
1239
1240 if (load_result.dynlinker) {
1241 uint64_t ap;
1242 int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4;
1243
1244 /* Adjust the stack */
1245 ap = thread_adjuserstack(thread, -new_ptr_size);
1246 error = copyoutptr(load_result.mach_header, ap, new_ptr_size);
1247
1248 if (error) {
1249 vm_map_switch(old_map);
1250
1251 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1252 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_DYNLINKER, 0, 0);
1253
1254 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_DYNLINKER);
1255 if (bootarg_execfailurereports) {
1256 set_proc_name(imgp, p);
1257 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1258 }
1259 goto badtoolate;
1260 }
1261 task_set_dyld_info(task, load_result.all_image_info_addr,
1262 load_result.all_image_info_size);
1263 }
1264
1265 /* Avoid immediate VM faults back into kernel */
1266 exec_prefault_data(p, imgp, &load_result);
1267
1268 vm_map_switch(old_map);
1269
1270 /*
1271 * Reset signal state.
1272 */
1273 execsigs(p, thread);
1274
1275 /*
1276 * need to cancel async IO requests that can be cancelled and wait for those
1277 * already active. MAY BLOCK!
1278 */
1279 _aio_exec( p );
1280
1281 #if SYSV_SHM
1282 /* FIXME: Till vmspace inherit is fixed: */
1283 if (!vfexec && p->vm_shm) {
1284 shmexec(p);
1285 }
1286 #endif
1287 #if SYSV_SEM
1288 /* Clean up the semaphores */
1289 semexit(p);
1290 #endif
1291
1292 /*
1293 * Remember file name for accounting.
1294 */
1295 p->p_acflag &= ~AFORK;
1296
1297 set_proc_name(imgp, p);
1298
1299 #if CONFIG_SECLUDED_MEMORY
1300 if (secluded_for_apps &&
1301 load_result.platform_binary) {
1302 if (strncmp(p->p_name,
1303 "Camera",
1304 sizeof(p->p_name)) == 0) {
1305 task_set_could_use_secluded_mem(task, TRUE);
1306 } else {
1307 task_set_could_use_secluded_mem(task, FALSE);
1308 }
1309 if (strncmp(p->p_name,
1310 "mediaserverd",
1311 sizeof(p->p_name)) == 0) {
1312 task_set_could_also_use_secluded_mem(task, TRUE);
1313 }
1314 }
1315 #endif /* CONFIG_SECLUDED_MEMORY */
1316
1317 #if __arm64__
1318 if (load_result.legacy_footprint) {
1319 task_set_legacy_footprint(task);
1320 }
1321 #endif /* __arm64__ */
1322
1323 pal_dbg_set_task_name(task);
1324
1325 /*
1326 * The load result will have already been munged by AMFI to include the
1327 * platform binary flag if boot-args dictated it (AMFI will mark anything
1328 * that doesn't go through the upcall path as a platform binary if its
1329 * enforcement is disabled).
1330 */
1331 if (load_result.platform_binary) {
1332 if (cs_debug) {
1333 printf("setting platform binary on task: pid = %d\n", p->p_pid);
1334 }
1335
1336 /*
1337 * We must use 'task' here because the proc's task has not yet been
1338 * switched to the new one.
1339 */
1340 task_set_platform_binary(task, TRUE);
1341 } else {
1342 if (cs_debug) {
1343 printf("clearing platform binary on task: pid = %d\n", p->p_pid);
1344 }
1345
1346 task_set_platform_binary(task, FALSE);
1347 }
1348
1349 #if DEVELOPMENT || DEBUG
1350 /*
1351 * Update the pid an proc name for importance base if any
1352 */
1353 task_importance_update_owner_info(task);
1354 #endif
1355
1356 memcpy(&p->p_uuid[0], &load_result.uuid[0], sizeof(p->p_uuid));
1357
1358 #if CONFIG_DTRACE
1359 dtrace_proc_exec(p);
1360 #endif
1361
1362 if (kdebug_enable) {
1363 long args[4] = {};
1364
1365 uintptr_t fsid = 0, fileid = 0;
1366 if (imgp->ip_vattr) {
1367 uint64_t fsid64 = vnode_get_va_fsid(imgp->ip_vattr);
1368 fsid = fsid64;
1369 fileid = imgp->ip_vattr->va_fileid;
1370 // check for (unexpected) overflow and trace zero in that case
1371 if (fsid != fsid64 || fileid != imgp->ip_vattr->va_fileid) {
1372 fsid = fileid = 0;
1373 }
1374 }
1375 KERNEL_DEBUG_CONSTANT_IST1(TRACE_DATA_EXEC, p->p_pid, fsid, fileid, 0,
1376 (uintptr_t)thread_tid(thread));
1377
1378 /*
1379 * Collect the pathname for tracing
1380 */
1381 kdbg_trace_string(p, &args[0], &args[1], &args[2], &args[3]);
1382 KERNEL_DEBUG_CONSTANT_IST1(TRACE_STRING_EXEC, args[0], args[1],
1383 args[2], args[3], (uintptr_t)thread_tid(thread));
1384 }
1385
1386 /*
1387 * If posix_spawned with the START_SUSPENDED flag, stop the
1388 * process before it runs.
1389 */
1390 if (imgp->ip_px_sa != NULL) {
1391 psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
1392 if (psa->psa_flags & POSIX_SPAWN_START_SUSPENDED) {
1393 proc_lock(p);
1394 p->p_stat = SSTOP;
1395 proc_unlock(p);
1396 (void) task_suspend_internal(task);
1397 }
1398 }
1399
1400 /*
1401 * mark as execed, wakeup the process that vforked (if any) and tell
1402 * it that it now has its own resources back
1403 */
1404 OSBitOrAtomic(P_EXEC, &p->p_flag);
1405 proc_resetregister(p);
1406 if (p->p_pptr && (p->p_lflag & P_LPPWAIT)) {
1407 proc_lock(p);
1408 p->p_lflag &= ~P_LPPWAIT;
1409 proc_unlock(p);
1410 wakeup((caddr_t)p->p_pptr);
1411 }
1412
1413 /*
1414 * Pay for our earlier safety; deliver the delayed signals from
1415 * the incomplete vfexec process now that it's complete.
1416 */
1417 if (vfexec && (p->p_lflag & P_LTRACED)) {
1418 psignal_vfork(p, new_task, thread, SIGTRAP);
1419 }
1420
1421 goto done;
1422
1423 badtoolate:
1424 /* Don't allow child process to execute any instructions */
1425 if (!spawn) {
1426 if (vfexec) {
1427 assert(exec_failure_reason != OS_REASON_NULL);
1428 psignal_vfork_with_reason(p, new_task, thread, SIGKILL, exec_failure_reason);
1429 exec_failure_reason = OS_REASON_NULL;
1430 } else {
1431 assert(exec_failure_reason != OS_REASON_NULL);
1432 psignal_with_reason(p, SIGKILL, exec_failure_reason);
1433 exec_failure_reason = OS_REASON_NULL;
1434
1435 if (exec) {
1436 /* Terminate the exec copy task */
1437 task_terminate_internal(task);
1438 }
1439 }
1440
1441 /* We can't stop this system call at this point, so just pretend we succeeded */
1442 error = 0;
1443 } else {
1444 os_reason_free(exec_failure_reason);
1445 exec_failure_reason = OS_REASON_NULL;
1446 }
1447
1448 done:
1449 if (load_result.threadstate) {
1450 kfree(load_result.threadstate, load_result.threadstate_sz);
1451 load_result.threadstate = NULL;
1452 }
1453
1454 bad:
1455 /* If we hit this, we likely would have leaked an exit reason */
1456 assert(exec_failure_reason == OS_REASON_NULL);
1457 return error;
1458 }
1459
1460
1461
1462
1463 /*
1464 * Our image activator table; this is the table of the image types we are
1465 * capable of loading. We list them in order of preference to ensure the
1466 * fastest image load speed.
1467 *
1468 * XXX hardcoded, for now; should use linker sets
1469 */
1470 struct execsw {
1471 int(*const ex_imgact)(struct image_params *);
1472 const char *ex_name;
1473 }const execsw[] = {
1474 { exec_mach_imgact, "Mach-o Binary" },
1475 { exec_fat_imgact, "Fat Binary" },
1476 { exec_shell_imgact, "Interpreter Script" },
1477 { NULL, NULL}
1478 };
1479
1480
1481 /*
1482 * exec_activate_image
1483 *
1484 * Description: Iterate through the available image activators, and activate
1485 * the image associated with the imgp structure. We start with
1486 * the activator for Mach-o binaries followed by that for Fat binaries
1487 * for Interpreter scripts.
1488 *
1489 * Parameters: struct image_params * Image parameter block
1490 *
1491 * Returns: 0 Success
1492 * EBADEXEC The executable is corrupt/unknown
1493 * execargs_alloc:EINVAL Invalid argument
1494 * execargs_alloc:EACCES Permission denied
1495 * execargs_alloc:EINTR Interrupted function
1496 * execargs_alloc:ENOMEM Not enough space
1497 * exec_save_path:EFAULT Bad address
1498 * exec_save_path:ENAMETOOLONG Filename too long
1499 * exec_check_permissions:EACCES Permission denied
1500 * exec_check_permissions:ENOEXEC Executable file format error
1501 * exec_check_permissions:ETXTBSY Text file busy [misuse of error code]
1502 * exec_check_permissions:???
1503 * namei:???
1504 * vn_rdwr:??? [anything vn_rdwr can return]
1505 * <ex_imgact>:??? [anything an imgact can return]
1506 * EDEADLK Process is being terminated
1507 */
1508 static int
1509 exec_activate_image(struct image_params *imgp)
1510 {
1511 struct nameidata *ndp = NULL;
1512 const char *excpath;
1513 int error;
1514 int resid;
1515 int once = 1; /* save SGUID-ness for interpreted files */
1516 int i;
1517 int itercount = 0;
1518 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
1519
1520 error = execargs_alloc(imgp);
1521 if (error) {
1522 goto bad_notrans;
1523 }
1524
1525 error = exec_save_path(imgp, imgp->ip_user_fname, imgp->ip_seg, &excpath);
1526 if (error) {
1527 goto bad_notrans;
1528 }
1529
1530 /* Use excpath, which contains the copyin-ed exec path */
1531 DTRACE_PROC1(exec, uintptr_t, excpath);
1532
1533 MALLOC(ndp, struct nameidata *, sizeof(*ndp), M_TEMP, M_WAITOK | M_ZERO);
1534 if (ndp == NULL) {
1535 error = ENOMEM;
1536 goto bad_notrans;
1537 }
1538
1539 NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
1540 UIO_SYSSPACE, CAST_USER_ADDR_T(excpath), imgp->ip_vfs_context);
1541
1542 again:
1543 error = namei(ndp);
1544 if (error) {
1545 goto bad_notrans;
1546 }
1547 imgp->ip_ndp = ndp; /* successful namei(); call nameidone() later */
1548 imgp->ip_vp = ndp->ni_vp; /* if set, need to vnode_put() at some point */
1549
1550 /*
1551 * Before we start the transition from binary A to binary B, make
1552 * sure another thread hasn't started exiting the process. We grab
1553 * the proc lock to check p_lflag initially, and the transition
1554 * mechanism ensures that the value doesn't change after we release
1555 * the lock.
1556 */
1557 proc_lock(p);
1558 if (p->p_lflag & P_LEXIT) {
1559 error = EDEADLK;
1560 proc_unlock(p);
1561 goto bad_notrans;
1562 }
1563 error = proc_transstart(p, 1, 0);
1564 proc_unlock(p);
1565 if (error) {
1566 goto bad_notrans;
1567 }
1568
1569 error = exec_check_permissions(imgp);
1570 if (error) {
1571 goto bad;
1572 }
1573
1574 /* Copy; avoid invocation of an interpreter overwriting the original */
1575 if (once) {
1576 once = 0;
1577 *imgp->ip_origvattr = *imgp->ip_vattr;
1578 }
1579
1580 error = vn_rdwr(UIO_READ, imgp->ip_vp, imgp->ip_vdata, PAGE_SIZE, 0,
1581 UIO_SYSSPACE, IO_NODELOCKED,
1582 vfs_context_ucred(imgp->ip_vfs_context),
1583 &resid, vfs_context_proc(imgp->ip_vfs_context));
1584 if (error) {
1585 goto bad;
1586 }
1587
1588 if (resid) {
1589 memset(imgp->ip_vdata + (PAGE_SIZE - resid), 0x0, resid);
1590 }
1591
1592 encapsulated_binary:
1593 /* Limit the number of iterations we will attempt on each binary */
1594 if (++itercount > EAI_ITERLIMIT) {
1595 error = EBADEXEC;
1596 goto bad;
1597 }
1598 error = -1;
1599 for (i = 0; error == -1 && execsw[i].ex_imgact != NULL; i++) {
1600 error = (*execsw[i].ex_imgact)(imgp);
1601
1602 switch (error) {
1603 /* case -1: not claimed: continue */
1604 case -2: /* Encapsulated binary, imgp->ip_XXX set for next iteration */
1605 goto encapsulated_binary;
1606
1607 case -3: /* Interpreter */
1608 #if CONFIG_MACF
1609 /*
1610 * Copy the script label for later use. Note that
1611 * the label can be different when the script is
1612 * actually read by the interpreter.
1613 */
1614 if (imgp->ip_scriptlabelp) {
1615 mac_vnode_label_free(imgp->ip_scriptlabelp);
1616 }
1617 imgp->ip_scriptlabelp = mac_vnode_label_alloc();
1618 if (imgp->ip_scriptlabelp == NULL) {
1619 error = ENOMEM;
1620 break;
1621 }
1622 mac_vnode_label_copy(imgp->ip_vp->v_label,
1623 imgp->ip_scriptlabelp);
1624
1625 /*
1626 * Take a ref of the script vnode for later use.
1627 */
1628 if (imgp->ip_scriptvp) {
1629 vnode_put(imgp->ip_scriptvp);
1630 }
1631 if (vnode_getwithref(imgp->ip_vp) == 0) {
1632 imgp->ip_scriptvp = imgp->ip_vp;
1633 }
1634 #endif
1635
1636 nameidone(ndp);
1637
1638 vnode_put(imgp->ip_vp);
1639 imgp->ip_vp = NULL; /* already put */
1640 imgp->ip_ndp = NULL; /* already nameidone */
1641
1642 /* Use excpath, which exec_shell_imgact reset to the interpreter */
1643 NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF,
1644 UIO_SYSSPACE, CAST_USER_ADDR_T(excpath), imgp->ip_vfs_context);
1645
1646 proc_transend(p, 0);
1647 goto again;
1648
1649 default:
1650 break;
1651 }
1652 }
1653
1654 if (error == 0) {
1655 if (imgp->ip_flags & IMGPF_INTERPRET && ndp->ni_vp) {
1656 AUDIT_ARG(vnpath, ndp->ni_vp, ARG_VNODE2);
1657 }
1658
1659 /*
1660 * Call out to allow 3rd party notification of exec.
1661 * Ignore result of kauth_authorize_fileop call.
1662 */
1663 if (kauth_authorize_fileop_has_listeners()) {
1664 kauth_authorize_fileop(vfs_context_ucred(imgp->ip_vfs_context),
1665 KAUTH_FILEOP_EXEC,
1666 (uintptr_t)ndp->ni_vp, 0);
1667 }
1668 }
1669 bad:
1670 proc_transend(p, 0);
1671
1672 bad_notrans:
1673 if (imgp->ip_strings) {
1674 execargs_free(imgp);
1675 }
1676 if (imgp->ip_ndp) {
1677 nameidone(imgp->ip_ndp);
1678 }
1679 if (ndp) {
1680 FREE(ndp, M_TEMP);
1681 }
1682
1683 return error;
1684 }
1685
1686 /*
1687 * exec_validate_spawnattr_policy
1688 *
1689 * Description: Validates the entitlements required to set the apptype.
1690 *
1691 * Parameters: int psa_apptype posix spawn attribute apptype
1692 *
1693 * Returns: 0 Success
1694 * EPERM Failure
1695 */
1696 static errno_t
1697 exec_validate_spawnattr_policy(int psa_apptype)
1698 {
1699 if ((psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK) != 0) {
1700 int proctype = psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK;
1701 if (proctype == POSIX_SPAWN_PROC_TYPE_DRIVER) {
1702 if (!IOTaskHasEntitlement(current_task(), POSIX_SPAWN_ENTITLEMENT_DRIVER)) {
1703 return EPERM;
1704 }
1705 }
1706 }
1707
1708 return 0;
1709 }
1710
1711 /*
1712 * exec_handle_spawnattr_policy
1713 *
1714 * Description: Decode and apply the posix_spawn apptype, qos clamp, and watchport ports to the task.
1715 *
1716 * Parameters: proc_t p process to apply attributes to
1717 * int psa_apptype posix spawn attribute apptype
1718 *
1719 * Returns: 0 Success
1720 */
1721 static errno_t
1722 exec_handle_spawnattr_policy(proc_t p, thread_t thread, int psa_apptype, uint64_t psa_qos_clamp,
1723 uint64_t psa_darwin_role, struct exec_port_actions *port_actions)
1724 {
1725 int apptype = TASK_APPTYPE_NONE;
1726 int qos_clamp = THREAD_QOS_UNSPECIFIED;
1727 int role = TASK_UNSPECIFIED;
1728
1729 if ((psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK) != 0) {
1730 int proctype = psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK;
1731
1732 switch (proctype) {
1733 case POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE:
1734 apptype = TASK_APPTYPE_DAEMON_INTERACTIVE;
1735 break;
1736 case POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD:
1737 apptype = TASK_APPTYPE_DAEMON_STANDARD;
1738 break;
1739 case POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE:
1740 apptype = TASK_APPTYPE_DAEMON_ADAPTIVE;
1741 break;
1742 case POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND:
1743 apptype = TASK_APPTYPE_DAEMON_BACKGROUND;
1744 break;
1745 case POSIX_SPAWN_PROC_TYPE_APP_DEFAULT:
1746 apptype = TASK_APPTYPE_APP_DEFAULT;
1747 break;
1748 #if !CONFIG_EMBEDDED
1749 case POSIX_SPAWN_PROC_TYPE_APP_TAL:
1750 apptype = TASK_APPTYPE_APP_TAL;
1751 break;
1752 #endif /* !CONFIG_EMBEDDED */
1753 case POSIX_SPAWN_PROC_TYPE_DRIVER:
1754 apptype = TASK_APPTYPE_DRIVER;
1755 break;
1756 default:
1757 apptype = TASK_APPTYPE_NONE;
1758 /* TODO: Should an invalid value here fail the spawn? */
1759 break;
1760 }
1761 }
1762
1763 if (psa_qos_clamp != POSIX_SPAWN_PROC_CLAMP_NONE) {
1764 switch (psa_qos_clamp) {
1765 case POSIX_SPAWN_PROC_CLAMP_UTILITY:
1766 qos_clamp = THREAD_QOS_UTILITY;
1767 break;
1768 case POSIX_SPAWN_PROC_CLAMP_BACKGROUND:
1769 qos_clamp = THREAD_QOS_BACKGROUND;
1770 break;
1771 case POSIX_SPAWN_PROC_CLAMP_MAINTENANCE:
1772 qos_clamp = THREAD_QOS_MAINTENANCE;
1773 break;
1774 default:
1775 qos_clamp = THREAD_QOS_UNSPECIFIED;
1776 /* TODO: Should an invalid value here fail the spawn? */
1777 break;
1778 }
1779 }
1780
1781 if (psa_darwin_role != PRIO_DARWIN_ROLE_DEFAULT) {
1782 proc_darwin_role_to_task_role(psa_darwin_role, &role);
1783 }
1784
1785 if (apptype != TASK_APPTYPE_NONE ||
1786 qos_clamp != THREAD_QOS_UNSPECIFIED ||
1787 role != TASK_UNSPECIFIED ||
1788 port_actions->portwatch_count) {
1789 proc_set_task_spawnpolicy(p->task, thread, apptype, qos_clamp, role,
1790 port_actions->portwatch_array, port_actions->portwatch_count);
1791 }
1792
1793 if (port_actions->registered_count) {
1794 if (mach_ports_register(p->task, port_actions->registered_array,
1795 port_actions->registered_count)) {
1796 return EINVAL;
1797 }
1798 /* mach_ports_register() consumed the array */
1799 port_actions->registered_array = NULL;
1800 port_actions->registered_count = 0;
1801 }
1802
1803 return 0;
1804 }
1805
1806 static void
1807 exec_port_actions_destroy(struct exec_port_actions *port_actions)
1808 {
1809 if (port_actions->portwatch_array) {
1810 for (uint32_t i = 0; i < port_actions->portwatch_count; i++) {
1811 ipc_port_t port = NULL;
1812 if ((port = port_actions->portwatch_array[i]) != NULL) {
1813 ipc_port_release_send(port);
1814 }
1815 }
1816 kfree(port_actions->portwatch_array,
1817 port_actions->portwatch_count * sizeof(ipc_port_t *));
1818 }
1819
1820 if (port_actions->registered_array) {
1821 for (uint32_t i = 0; i < port_actions->registered_count; i++) {
1822 ipc_port_t port = NULL;
1823 if ((port = port_actions->registered_array[i]) != NULL) {
1824 ipc_port_release_send(port);
1825 }
1826 }
1827 kfree(port_actions->registered_array,
1828 port_actions->registered_count * sizeof(ipc_port_t *));
1829 }
1830 }
1831
1832 /*
1833 * exec_handle_port_actions
1834 *
1835 * Description: Go through the _posix_port_actions_t contents,
1836 * calling task_set_special_port, task_set_exception_ports
1837 * and/or audit_session_spawnjoin for the current task.
1838 *
1839 * Parameters: struct image_params * Image parameter block
1840 *
1841 * Returns: 0 Success
1842 * EINVAL Failure
1843 * ENOTSUP Illegal posix_spawn attr flag was set
1844 */
1845 static errno_t
1846 exec_handle_port_actions(struct image_params *imgp,
1847 struct exec_port_actions *actions)
1848 {
1849 _posix_spawn_port_actions_t pacts = imgp->ip_px_spa;
1850 #if CONFIG_AUDIT
1851 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
1852 #endif
1853 _ps_port_action_t *act = NULL;
1854 task_t task = get_threadtask(imgp->ip_new_thread);
1855 ipc_port_t port = NULL;
1856 errno_t ret = 0;
1857 int i, portwatch_i = 0, registered_i = 0;
1858 kern_return_t kr;
1859 boolean_t task_has_watchport_boost = task_has_watchports(current_task());
1860 boolean_t in_exec = (imgp->ip_flags & IMGPF_EXEC);
1861
1862 for (i = 0; i < pacts->pspa_count; i++) {
1863 act = &pacts->pspa_actions[i];
1864
1865 switch (act->port_type) {
1866 case PSPA_SPECIAL:
1867 case PSPA_EXCEPTION:
1868 #if CONFIG_AUDIT
1869 case PSPA_AU_SESSION:
1870 #endif
1871 break;
1872 case PSPA_IMP_WATCHPORTS:
1873 if (++actions->portwatch_count > TASK_MAX_WATCHPORT_COUNT) {
1874 ret = EINVAL;
1875 goto done;
1876 }
1877 break;
1878 case PSPA_REGISTERED_PORTS:
1879 if (++actions->registered_count > TASK_PORT_REGISTER_MAX) {
1880 ret = EINVAL;
1881 goto done;
1882 }
1883 break;
1884 default:
1885 ret = EINVAL;
1886 goto done;
1887 }
1888 }
1889
1890 if (actions->portwatch_count) {
1891 if (in_exec && task_has_watchport_boost) {
1892 ret = EINVAL;
1893 goto done;
1894 }
1895 actions->portwatch_array =
1896 kalloc(sizeof(ipc_port_t *) * actions->portwatch_count);
1897 if (actions->portwatch_array == NULL) {
1898 ret = ENOMEM;
1899 goto done;
1900 }
1901 bzero(actions->portwatch_array,
1902 sizeof(ipc_port_t *) * actions->portwatch_count);
1903 }
1904
1905 if (actions->registered_count) {
1906 actions->registered_array =
1907 kalloc(sizeof(ipc_port_t *) * actions->registered_count);
1908 if (actions->registered_array == NULL) {
1909 ret = ENOMEM;
1910 goto done;
1911 }
1912 bzero(actions->registered_array,
1913 sizeof(ipc_port_t *) * actions->registered_count);
1914 }
1915
1916 for (i = 0; i < pacts->pspa_count; i++) {
1917 act = &pacts->pspa_actions[i];
1918
1919 if (MACH_PORT_VALID(act->new_port)) {
1920 kr = ipc_object_copyin(get_task_ipcspace(current_task()),
1921 act->new_port, MACH_MSG_TYPE_COPY_SEND,
1922 (ipc_object_t *) &port, 0, NULL, IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND);
1923
1924 if (kr != KERN_SUCCESS) {
1925 ret = EINVAL;
1926 goto done;
1927 }
1928 } else {
1929 /* it's NULL or DEAD */
1930 port = CAST_MACH_NAME_TO_PORT(act->new_port);
1931 }
1932
1933 switch (act->port_type) {
1934 case PSPA_SPECIAL:
1935 kr = task_set_special_port(task, act->which, port);
1936
1937 if (kr != KERN_SUCCESS) {
1938 ret = EINVAL;
1939 }
1940 break;
1941
1942 case PSPA_EXCEPTION:
1943 kr = task_set_exception_ports(task, act->mask, port,
1944 act->behavior, act->flavor);
1945 if (kr != KERN_SUCCESS) {
1946 ret = EINVAL;
1947 }
1948 break;
1949 #if CONFIG_AUDIT
1950 case PSPA_AU_SESSION:
1951 ret = audit_session_spawnjoin(p, task, port);
1952 if (ret) {
1953 /* audit_session_spawnjoin() has already dropped the reference in case of error. */
1954 goto done;
1955 }
1956
1957 break;
1958 #endif
1959 case PSPA_IMP_WATCHPORTS:
1960 if (actions->portwatch_array) {
1961 /* hold on to this till end of spawn */
1962 actions->portwatch_array[portwatch_i++] = port;
1963 } else {
1964 ipc_port_release_send(port);
1965 }
1966 break;
1967 case PSPA_REGISTERED_PORTS:
1968 /* hold on to this till end of spawn */
1969 actions->registered_array[registered_i++] = port;
1970 break;
1971 default:
1972 ret = EINVAL;
1973 break;
1974 }
1975
1976 if (ret) {
1977 /* action failed, so release port resources */
1978 ipc_port_release_send(port);
1979 break;
1980 }
1981 }
1982
1983 done:
1984 if (0 != ret) {
1985 DTRACE_PROC1(spawn__port__failure, mach_port_name_t, act->new_port);
1986 }
1987 return ret;
1988 }
1989
1990 /*
1991 * exec_handle_file_actions
1992 *
1993 * Description: Go through the _posix_file_actions_t contents applying the
1994 * open, close, and dup2 operations to the open file table for
1995 * the current process.
1996 *
1997 * Parameters: struct image_params * Image parameter block
1998 *
1999 * Returns: 0 Success
2000 * ???
2001 *
2002 * Note: Actions are applied in the order specified, with the credential
2003 * of the parent process. This is done to permit the parent
2004 * process to utilize POSIX_SPAWN_RESETIDS to drop privilege in
2005 * the child following operations the child may in fact not be
2006 * normally permitted to perform.
2007 */
2008 static int
2009 exec_handle_file_actions(struct image_params *imgp, short psa_flags)
2010 {
2011 int error = 0;
2012 int action;
2013 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
2014 _posix_spawn_file_actions_t px_sfap = imgp->ip_px_sfa;
2015 int ival[2]; /* dummy retval for system calls) */
2016
2017 for (action = 0; action < px_sfap->psfa_act_count; action++) {
2018 _psfa_action_t *psfa = &px_sfap->psfa_act_acts[action];
2019
2020 switch (psfa->psfaa_type) {
2021 case PSFA_OPEN: {
2022 /*
2023 * Open is different, in that it requires the use of
2024 * a path argument, which is normally copied in from
2025 * user space; because of this, we have to support an
2026 * open from kernel space that passes an address space
2027 * context of UIO_SYSSPACE, and casts the address
2028 * argument to a user_addr_t.
2029 */
2030 char *bufp = NULL;
2031 struct vnode_attr *vap;
2032 struct nameidata *ndp;
2033 int mode = psfa->psfaa_openargs.psfao_mode;
2034 struct dup2_args dup2a;
2035 struct close_nocancel_args ca;
2036 int origfd;
2037
2038 MALLOC(bufp, char *, sizeof(*vap) + sizeof(*ndp), M_TEMP, M_WAITOK | M_ZERO);
2039 if (bufp == NULL) {
2040 error = ENOMEM;
2041 break;
2042 }
2043
2044 vap = (struct vnode_attr *) bufp;
2045 ndp = (struct nameidata *) (bufp + sizeof(*vap));
2046
2047 VATTR_INIT(vap);
2048 /* Mask off all but regular access permissions */
2049 mode = ((mode & ~p->p_fd->fd_cmask) & ALLPERMS) & ~S_ISTXT;
2050 VATTR_SET(vap, va_mode, mode & ACCESSPERMS);
2051
2052 NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_SYSSPACE,
2053 CAST_USER_ADDR_T(psfa->psfaa_openargs.psfao_path),
2054 imgp->ip_vfs_context);
2055
2056 error = open1(imgp->ip_vfs_context,
2057 ndp,
2058 psfa->psfaa_openargs.psfao_oflag,
2059 vap,
2060 fileproc_alloc_init, NULL,
2061 ival);
2062
2063 FREE(bufp, M_TEMP);
2064
2065 /*
2066 * If there's an error, or we get the right fd by
2067 * accident, then drop out here. This is easier than
2068 * reworking all the open code to preallocate fd
2069 * slots, and internally taking one as an argument.
2070 */
2071 if (error || ival[0] == psfa->psfaa_filedes) {
2072 break;
2073 }
2074
2075 origfd = ival[0];
2076 /*
2077 * If we didn't fall out from an error, we ended up
2078 * with the wrong fd; so now we've got to try to dup2
2079 * it to the right one.
2080 */
2081 dup2a.from = origfd;
2082 dup2a.to = psfa->psfaa_filedes;
2083
2084 /*
2085 * The dup2() system call implementation sets
2086 * ival to newfd in the success case, but we
2087 * can ignore that, since if we didn't get the
2088 * fd we wanted, the error will stop us.
2089 */
2090 error = dup2(p, &dup2a, ival);
2091 if (error) {
2092 break;
2093 }
2094
2095 /*
2096 * Finally, close the original fd.
2097 */
2098 ca.fd = origfd;
2099
2100 error = close_nocancel(p, &ca, ival);
2101 }
2102 break;
2103
2104 case PSFA_DUP2: {
2105 struct dup2_args dup2a;
2106
2107 dup2a.from = psfa->psfaa_filedes;
2108 dup2a.to = psfa->psfaa_dup2args.psfad_newfiledes;
2109
2110 /*
2111 * The dup2() system call implementation sets
2112 * ival to newfd in the success case, but we
2113 * can ignore that, since if we didn't get the
2114 * fd we wanted, the error will stop us.
2115 */
2116 error = dup2(p, &dup2a, ival);
2117 }
2118 break;
2119
2120 case PSFA_FILEPORT_DUP2: {
2121 ipc_port_t port;
2122 kern_return_t kr;
2123 struct dup2_args dup2a;
2124 struct close_nocancel_args ca;
2125
2126 if (!MACH_PORT_VALID(psfa->psfaa_fileport)) {
2127 error = EINVAL;
2128 break;
2129 }
2130
2131 kr = ipc_object_copyin(get_task_ipcspace(current_task()),
2132 psfa->psfaa_fileport, MACH_MSG_TYPE_COPY_SEND,
2133 (ipc_object_t *) &port, 0, NULL, IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND);
2134
2135 if (kr != KERN_SUCCESS) {
2136 error = EINVAL;
2137 break;
2138 }
2139
2140 error = fileport_makefd_internal(p, port, 0, ival);
2141
2142 if (IPC_PORT_NULL != port) {
2143 ipc_port_release_send(port);
2144 }
2145
2146 if (error || ival[0] == psfa->psfaa_dup2args.psfad_newfiledes) {
2147 break;
2148 }
2149
2150 dup2a.from = ca.fd = ival[0];
2151 dup2a.to = psfa->psfaa_dup2args.psfad_newfiledes;
2152 error = dup2(p, &dup2a, ival);
2153 if (error) {
2154 break;
2155 }
2156
2157 error = close_nocancel(p, &ca, ival);
2158 }
2159 break;
2160
2161 case PSFA_CLOSE: {
2162 struct close_nocancel_args ca;
2163
2164 ca.fd = psfa->psfaa_filedes;
2165
2166 error = close_nocancel(p, &ca, ival);
2167 }
2168 break;
2169
2170 case PSFA_INHERIT: {
2171 struct fcntl_nocancel_args fcntla;
2172
2173 /*
2174 * Check to see if the descriptor exists, and
2175 * ensure it's -not- marked as close-on-exec.
2176 *
2177 * Attempting to "inherit" a guarded fd will
2178 * result in a error.
2179 */
2180 fcntla.fd = psfa->psfaa_filedes;
2181 fcntla.cmd = F_GETFD;
2182 if ((error = fcntl_nocancel(p, &fcntla, ival)) != 0) {
2183 break;
2184 }
2185
2186 if ((ival[0] & FD_CLOEXEC) == FD_CLOEXEC) {
2187 fcntla.fd = psfa->psfaa_filedes;
2188 fcntla.cmd = F_SETFD;
2189 fcntla.arg = ival[0] & ~FD_CLOEXEC;
2190 error = fcntl_nocancel(p, &fcntla, ival);
2191 }
2192 }
2193 break;
2194
2195 case PSFA_CHDIR: {
2196 /*
2197 * Chdir is different, in that it requires the use of
2198 * a path argument, which is normally copied in from
2199 * user space; because of this, we have to support a
2200 * chdir from kernel space that passes an address space
2201 * context of UIO_SYSSPACE, and casts the address
2202 * argument to a user_addr_t.
2203 */
2204 struct nameidata nd;
2205
2206 NDINIT(&nd, LOOKUP, OP_CHDIR, FOLLOW | AUDITVNPATH1, UIO_SYSSPACE,
2207 CAST_USER_ADDR_T(psfa->psfaa_chdirargs.psfac_path),
2208 imgp->ip_vfs_context);
2209
2210 error = chdir_internal(p, imgp->ip_vfs_context, &nd, 0);
2211 }
2212 break;
2213
2214 case PSFA_FCHDIR: {
2215 struct fchdir_args fchdira;
2216
2217 fchdira.fd = psfa->psfaa_filedes;
2218
2219 error = fchdir(p, &fchdira, ival);
2220 }
2221 break;
2222
2223 default:
2224 error = EINVAL;
2225 break;
2226 }
2227
2228 /* All file actions failures are considered fatal, per POSIX */
2229
2230 if (error) {
2231 if (PSFA_OPEN == psfa->psfaa_type) {
2232 DTRACE_PROC1(spawn__open__failure, uintptr_t,
2233 psfa->psfaa_openargs.psfao_path);
2234 } else {
2235 DTRACE_PROC1(spawn__fd__failure, int, psfa->psfaa_filedes);
2236 }
2237 break;
2238 }
2239 }
2240
2241 if (error != 0 || (psa_flags & POSIX_SPAWN_CLOEXEC_DEFAULT) == 0) {
2242 return error;
2243 }
2244
2245 /*
2246 * If POSIX_SPAWN_CLOEXEC_DEFAULT is set, behave (during
2247 * this spawn only) as if "close on exec" is the default
2248 * disposition of all pre-existing file descriptors. In this case,
2249 * the list of file descriptors mentioned in the file actions
2250 * are the only ones that can be inherited, so mark them now.
2251 *
2252 * The actual closing part comes later, in fdexec().
2253 */
2254 proc_fdlock(p);
2255 for (action = 0; action < px_sfap->psfa_act_count; action++) {
2256 _psfa_action_t *psfa = &px_sfap->psfa_act_acts[action];
2257 int fd = psfa->psfaa_filedes;
2258
2259 switch (psfa->psfaa_type) {
2260 case PSFA_DUP2:
2261 case PSFA_FILEPORT_DUP2:
2262 fd = psfa->psfaa_dup2args.psfad_newfiledes;
2263 /*FALLTHROUGH*/
2264 case PSFA_OPEN:
2265 case PSFA_INHERIT:
2266 *fdflags(p, fd) |= UF_INHERIT;
2267 break;
2268
2269 case PSFA_CLOSE:
2270 case PSFA_CHDIR:
2271 case PSFA_FCHDIR:
2272 /*
2273 * Although PSFA_FCHDIR does have a file descriptor, it is not
2274 * *creating* one, thus we do not automatically mark it for
2275 * inheritance under POSIX_SPAWN_CLOEXEC_DEFAULT. A client that
2276 * wishes it to be inherited should use the PSFA_INHERIT action
2277 * explicitly.
2278 */
2279 break;
2280 }
2281 }
2282 proc_fdunlock(p);
2283
2284 return 0;
2285 }
2286
2287 #if CONFIG_MACF
2288 /*
2289 * exec_spawnattr_getmacpolicyinfo
2290 */
2291 void *
2292 exec_spawnattr_getmacpolicyinfo(const void *macextensions, const char *policyname, size_t *lenp)
2293 {
2294 const struct _posix_spawn_mac_policy_extensions *psmx = macextensions;
2295 int i;
2296
2297 if (psmx == NULL) {
2298 return NULL;
2299 }
2300
2301 for (i = 0; i < psmx->psmx_count; i++) {
2302 const _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[i];
2303 if (strncmp(extension->policyname, policyname, sizeof(extension->policyname)) == 0) {
2304 if (lenp != NULL) {
2305 *lenp = extension->datalen;
2306 }
2307 return extension->datap;
2308 }
2309 }
2310
2311 if (lenp != NULL) {
2312 *lenp = 0;
2313 }
2314 return NULL;
2315 }
2316
2317 static int
2318 spawn_copyin_macpolicyinfo(const struct user__posix_spawn_args_desc *px_args, _posix_spawn_mac_policy_extensions_t *psmxp)
2319 {
2320 _posix_spawn_mac_policy_extensions_t psmx = NULL;
2321 int error = 0;
2322 int copycnt = 0;
2323 int i = 0;
2324
2325 *psmxp = NULL;
2326
2327 if (px_args->mac_extensions_size < PS_MAC_EXTENSIONS_SIZE(1) ||
2328 px_args->mac_extensions_size > PAGE_SIZE) {
2329 error = EINVAL;
2330 goto bad;
2331 }
2332
2333 MALLOC(psmx, _posix_spawn_mac_policy_extensions_t, px_args->mac_extensions_size, M_TEMP, M_WAITOK);
2334 if ((error = copyin(px_args->mac_extensions, psmx, px_args->mac_extensions_size)) != 0) {
2335 goto bad;
2336 }
2337
2338 size_t extsize = PS_MAC_EXTENSIONS_SIZE(psmx->psmx_count);
2339 if (extsize == 0 || extsize > px_args->mac_extensions_size) {
2340 error = EINVAL;
2341 goto bad;
2342 }
2343
2344 for (i = 0; i < psmx->psmx_count; i++) {
2345 _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[i];
2346 if (extension->datalen == 0 || extension->datalen > PAGE_SIZE) {
2347 error = EINVAL;
2348 goto bad;
2349 }
2350 }
2351
2352 for (copycnt = 0; copycnt < psmx->psmx_count; copycnt++) {
2353 _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[copycnt];
2354 void *data = NULL;
2355
2356 MALLOC(data, void *, extension->datalen, M_TEMP, M_WAITOK);
2357 if ((error = copyin(extension->data, data, extension->datalen)) != 0) {
2358 FREE(data, M_TEMP);
2359 goto bad;
2360 }
2361 extension->datap = data;
2362 }
2363
2364 *psmxp = psmx;
2365 return 0;
2366
2367 bad:
2368 if (psmx != NULL) {
2369 for (i = 0; i < copycnt; i++) {
2370 FREE(psmx->psmx_extensions[i].datap, M_TEMP);
2371 }
2372 FREE(psmx, M_TEMP);
2373 }
2374 return error;
2375 }
2376
2377 static void
2378 spawn_free_macpolicyinfo(_posix_spawn_mac_policy_extensions_t psmx)
2379 {
2380 int i;
2381
2382 if (psmx == NULL) {
2383 return;
2384 }
2385 for (i = 0; i < psmx->psmx_count; i++) {
2386 FREE(psmx->psmx_extensions[i].datap, M_TEMP);
2387 }
2388 FREE(psmx, M_TEMP);
2389 }
2390 #endif /* CONFIG_MACF */
2391
2392 #if CONFIG_COALITIONS
2393 static inline void
2394 spawn_coalitions_release_all(coalition_t coal[COALITION_NUM_TYPES])
2395 {
2396 for (int c = 0; c < COALITION_NUM_TYPES; c++) {
2397 if (coal[c]) {
2398 coalition_remove_active(coal[c]);
2399 coalition_release(coal[c]);
2400 }
2401 }
2402 }
2403 #endif
2404
2405 #if CONFIG_PERSONAS
2406 static int
2407 spawn_validate_persona(struct _posix_spawn_persona_info *px_persona)
2408 {
2409 int error = 0;
2410 struct persona *persona = NULL;
2411 int verify = px_persona->pspi_flags & POSIX_SPAWN_PERSONA_FLAGS_VERIFY;
2412
2413 if (!IOTaskHasEntitlement(current_task(), PERSONA_MGMT_ENTITLEMENT)) {
2414 return EPERM;
2415 }
2416
2417 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GROUPS) {
2418 if (px_persona->pspi_ngroups > NGROUPS_MAX) {
2419 return EINVAL;
2420 }
2421 }
2422
2423 persona = persona_lookup(px_persona->pspi_id);
2424 if (!persona) {
2425 error = ESRCH;
2426 goto out;
2427 }
2428
2429 if (verify) {
2430 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_UID) {
2431 if (px_persona->pspi_uid != persona_get_uid(persona)) {
2432 error = EINVAL;
2433 goto out;
2434 }
2435 }
2436 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GID) {
2437 if (px_persona->pspi_gid != persona_get_gid(persona)) {
2438 error = EINVAL;
2439 goto out;
2440 }
2441 }
2442 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GROUPS) {
2443 unsigned ngroups = 0;
2444 gid_t groups[NGROUPS_MAX];
2445
2446 if (persona_get_groups(persona, &ngroups, groups,
2447 px_persona->pspi_ngroups) != 0) {
2448 error = EINVAL;
2449 goto out;
2450 }
2451 if (ngroups != px_persona->pspi_ngroups) {
2452 error = EINVAL;
2453 goto out;
2454 }
2455 while (ngroups--) {
2456 if (px_persona->pspi_groups[ngroups] != groups[ngroups]) {
2457 error = EINVAL;
2458 goto out;
2459 }
2460 }
2461 if (px_persona->pspi_gmuid != persona_get_gmuid(persona)) {
2462 error = EINVAL;
2463 goto out;
2464 }
2465 }
2466 }
2467
2468 out:
2469 if (persona) {
2470 persona_put(persona);
2471 }
2472
2473 return error;
2474 }
2475
2476 static int
2477 spawn_persona_adopt(proc_t p, struct _posix_spawn_persona_info *px_persona)
2478 {
2479 int ret;
2480 kauth_cred_t cred;
2481 struct persona *persona = NULL;
2482 int override = !!(px_persona->pspi_flags & POSIX_SPAWN_PERSONA_FLAGS_OVERRIDE);
2483
2484 if (!override) {
2485 return persona_proc_adopt_id(p, px_persona->pspi_id, NULL);
2486 }
2487
2488 /*
2489 * we want to spawn into the given persona, but we want to override
2490 * the kauth with a different UID/GID combo
2491 */
2492 persona = persona_lookup(px_persona->pspi_id);
2493 if (!persona) {
2494 return ESRCH;
2495 }
2496
2497 cred = persona_get_cred(persona);
2498 if (!cred) {
2499 ret = EINVAL;
2500 goto out;
2501 }
2502
2503 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_UID) {
2504 cred = kauth_cred_setresuid(cred,
2505 px_persona->pspi_uid,
2506 px_persona->pspi_uid,
2507 px_persona->pspi_uid,
2508 KAUTH_UID_NONE);
2509 }
2510
2511 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GID) {
2512 cred = kauth_cred_setresgid(cred,
2513 px_persona->pspi_gid,
2514 px_persona->pspi_gid,
2515 px_persona->pspi_gid);
2516 }
2517
2518 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GROUPS) {
2519 cred = kauth_cred_setgroups(cred,
2520 px_persona->pspi_groups,
2521 px_persona->pspi_ngroups,
2522 px_persona->pspi_gmuid);
2523 }
2524
2525 ret = persona_proc_adopt(p, persona, cred);
2526
2527 out:
2528 persona_put(persona);
2529 return ret;
2530 }
2531 #endif
2532
2533 #if __arm64__
2534 extern int legacy_footprint_entitlement_mode;
2535 static inline void
2536 proc_legacy_footprint_entitled(proc_t p, task_t task, const char *caller)
2537 {
2538 #pragma unused(p, caller)
2539 boolean_t legacy_footprint_entitled;
2540
2541 switch (legacy_footprint_entitlement_mode) {
2542 case LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE:
2543 /* the entitlement is ignored */
2544 break;
2545 case LEGACY_FOOTPRINT_ENTITLEMENT_IOS11_ACCT:
2546 /* the entitlement grants iOS11 legacy accounting */
2547 legacy_footprint_entitled = IOTaskHasEntitlement(task,
2548 "com.apple.private.memory.legacy_footprint");
2549 if (legacy_footprint_entitled) {
2550 task_set_legacy_footprint(task);
2551 }
2552 break;
2553 case LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE:
2554 /* the entitlement grants a footprint limit increase */
2555 legacy_footprint_entitled = IOTaskHasEntitlement(task,
2556 "com.apple.private.memory.legacy_footprint");
2557 if (legacy_footprint_entitled) {
2558 task_set_extra_footprint_limit(task);
2559 }
2560 break;
2561 default:
2562 break;
2563 }
2564 }
2565 #endif /* __arm64__ */
2566
2567 /*
2568 * Apply a modification on the proc's kauth cred until it converges.
2569 *
2570 * `update` consumes its argument to return a new kauth cred.
2571 */
2572 static void
2573 apply_kauth_cred_update(proc_t p,
2574 kauth_cred_t (^update)(kauth_cred_t orig_cred))
2575 {
2576 kauth_cred_t my_cred, my_new_cred;
2577
2578 my_cred = kauth_cred_proc_ref(p);
2579 for (;;) {
2580 my_new_cred = update(my_cred);
2581 if (my_cred == my_new_cred) {
2582 kauth_cred_unref(&my_new_cred);
2583 break;
2584 }
2585
2586 /* try update cred on proc */
2587 proc_ucred_lock(p);
2588
2589 if (p->p_ucred == my_cred) {
2590 /* base pointer didn't change, donate our ref */
2591 p->p_ucred = my_new_cred;
2592 PROC_UPDATE_CREDS_ONPROC(p);
2593 proc_ucred_unlock(p);
2594
2595 /* drop p->p_ucred reference */
2596 kauth_cred_unref(&my_cred);
2597 break;
2598 }
2599
2600 /* base pointer changed, retry */
2601 my_cred = p->p_ucred;
2602 kauth_cred_ref(my_cred);
2603 proc_ucred_unlock(p);
2604
2605 kauth_cred_unref(&my_new_cred);
2606 }
2607 }
2608
2609 static int
2610 spawn_posix_cred_adopt(proc_t p,
2611 struct _posix_spawn_posix_cred_info *px_pcred_info)
2612 {
2613 int error = 0;
2614
2615 if (px_pcred_info->pspci_flags & POSIX_SPAWN_POSIX_CRED_GID) {
2616 struct setgid_args args = {
2617 .gid = px_pcred_info->pspci_gid,
2618 };
2619 error = setgid(p, &args, NULL);
2620 if (error) {
2621 return error;
2622 }
2623 }
2624
2625 if (px_pcred_info->pspci_flags & POSIX_SPAWN_POSIX_CRED_GROUPS) {
2626 error = setgroups_internal(p,
2627 px_pcred_info->pspci_ngroups,
2628 px_pcred_info->pspci_groups,
2629 px_pcred_info->pspci_gmuid);
2630 if (error) {
2631 return error;
2632 }
2633 }
2634
2635 if (px_pcred_info->pspci_flags & POSIX_SPAWN_POSIX_CRED_UID) {
2636 struct setuid_args args = {
2637 .uid = px_pcred_info->pspci_uid,
2638 };
2639 error = setuid(p, &args, NULL);
2640 if (error) {
2641 return error;
2642 }
2643 }
2644 return 0;
2645 }
2646
2647 /*
2648 * posix_spawn
2649 *
2650 * Parameters: uap->pid Pointer to pid return area
2651 * uap->fname File name to exec
2652 * uap->argp Argument list
2653 * uap->envp Environment list
2654 *
2655 * Returns: 0 Success
2656 * EINVAL Invalid argument
2657 * ENOTSUP Not supported
2658 * ENOEXEC Executable file format error
2659 * exec_activate_image:EINVAL Invalid argument
2660 * exec_activate_image:EACCES Permission denied
2661 * exec_activate_image:EINTR Interrupted function
2662 * exec_activate_image:ENOMEM Not enough space
2663 * exec_activate_image:EFAULT Bad address
2664 * exec_activate_image:ENAMETOOLONG Filename too long
2665 * exec_activate_image:ENOEXEC Executable file format error
2666 * exec_activate_image:ETXTBSY Text file busy [misuse of error code]
2667 * exec_activate_image:EAUTH Image decryption failed
2668 * exec_activate_image:EBADEXEC The executable is corrupt/unknown
2669 * exec_activate_image:???
2670 * mac_execve_enter:???
2671 *
2672 * TODO: Expect to need __mac_posix_spawn() at some point...
2673 * Handle posix_spawnattr_t
2674 * Handle posix_spawn_file_actions_t
2675 */
2676 int
2677 posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval)
2678 {
2679 proc_t p = ap; /* quiet bogus GCC vfork() warning */
2680 user_addr_t pid = uap->pid;
2681 int ival[2]; /* dummy retval for setpgid() */
2682 char *bufp = NULL;
2683 struct image_params *imgp;
2684 struct vnode_attr *vap;
2685 struct vnode_attr *origvap;
2686 struct uthread *uthread = 0; /* compiler complains if not set to 0*/
2687 int error, sig;
2688 int is_64 = IS_64BIT_PROCESS(p);
2689 struct vfs_context context;
2690 struct user__posix_spawn_args_desc px_args;
2691 struct _posix_spawnattr px_sa;
2692 _posix_spawn_file_actions_t px_sfap = NULL;
2693 _posix_spawn_port_actions_t px_spap = NULL;
2694 struct __kern_sigaction vec;
2695 boolean_t spawn_no_exec = FALSE;
2696 boolean_t proc_transit_set = TRUE;
2697 boolean_t exec_done = FALSE;
2698 struct exec_port_actions port_actions = { };
2699 vm_size_t px_sa_offset = offsetof(struct _posix_spawnattr, psa_ports);
2700 task_t old_task = current_task();
2701 task_t new_task = NULL;
2702 boolean_t should_release_proc_ref = FALSE;
2703 void *inherit = NULL;
2704 #if CONFIG_PERSONAS
2705 struct _posix_spawn_persona_info *px_persona = NULL;
2706 #endif
2707 struct _posix_spawn_posix_cred_info *px_pcred_info = NULL;
2708
2709 /*
2710 * Allocate a big chunk for locals instead of using stack since these
2711 * structures are pretty big.
2712 */
2713 MALLOC(bufp, char *, (sizeof(*imgp) + sizeof(*vap) + sizeof(*origvap)), M_TEMP, M_WAITOK | M_ZERO);
2714 imgp = (struct image_params *) bufp;
2715 if (bufp == NULL) {
2716 error = ENOMEM;
2717 goto bad;
2718 }
2719 vap = (struct vnode_attr *) (bufp + sizeof(*imgp));
2720 origvap = (struct vnode_attr *) (bufp + sizeof(*imgp) + sizeof(*vap));
2721
2722 /* Initialize the common data in the image_params structure */
2723 imgp->ip_user_fname = uap->path;
2724 imgp->ip_user_argv = uap->argv;
2725 imgp->ip_user_envv = uap->envp;
2726 imgp->ip_vattr = vap;
2727 imgp->ip_origvattr = origvap;
2728 imgp->ip_vfs_context = &context;
2729 imgp->ip_flags = (is_64 ? IMGPF_WAS_64BIT_ADDR : IMGPF_NONE);
2730 imgp->ip_seg = (is_64 ? UIO_USERSPACE64 : UIO_USERSPACE32);
2731 imgp->ip_mac_return = 0;
2732 imgp->ip_px_persona = NULL;
2733 imgp->ip_px_pcred_info = NULL;
2734 imgp->ip_cs_error = OS_REASON_NULL;
2735 imgp->ip_simulator_binary = IMGPF_SB_DEFAULT;
2736
2737 if (uap->adesc != USER_ADDR_NULL) {
2738 if (is_64) {
2739 error = copyin(uap->adesc, &px_args, sizeof(px_args));
2740 } else {
2741 struct user32__posix_spawn_args_desc px_args32;
2742
2743 error = copyin(uap->adesc, &px_args32, sizeof(px_args32));
2744
2745 /*
2746 * Convert arguments descriptor from external 32 bit
2747 * representation to internal 64 bit representation
2748 */
2749 px_args.attr_size = px_args32.attr_size;
2750 px_args.attrp = CAST_USER_ADDR_T(px_args32.attrp);
2751 px_args.file_actions_size = px_args32.file_actions_size;
2752 px_args.file_actions = CAST_USER_ADDR_T(px_args32.file_actions);
2753 px_args.port_actions_size = px_args32.port_actions_size;
2754 px_args.port_actions = CAST_USER_ADDR_T(px_args32.port_actions);
2755 px_args.mac_extensions_size = px_args32.mac_extensions_size;
2756 px_args.mac_extensions = CAST_USER_ADDR_T(px_args32.mac_extensions);
2757 px_args.coal_info_size = px_args32.coal_info_size;
2758 px_args.coal_info = CAST_USER_ADDR_T(px_args32.coal_info);
2759 px_args.persona_info_size = px_args32.persona_info_size;
2760 px_args.persona_info = CAST_USER_ADDR_T(px_args32.persona_info);
2761 px_args.posix_cred_info_size = px_args32.posix_cred_info_size;
2762 px_args.posix_cred_info = CAST_USER_ADDR_T(px_args32.posix_cred_info);
2763 }
2764 if (error) {
2765 goto bad;
2766 }
2767
2768 if (px_args.attr_size != 0) {
2769 /*
2770 * We are not copying the port_actions pointer,
2771 * because we already have it from px_args.
2772 * This is a bit fragile: <rdar://problem/16427422>
2773 */
2774
2775 if ((error = copyin(px_args.attrp, &px_sa, px_sa_offset)) != 0) {
2776 goto bad;
2777 }
2778
2779 bzero((void *)((unsigned long) &px_sa + px_sa_offset), sizeof(px_sa) - px_sa_offset );
2780
2781 imgp->ip_px_sa = &px_sa;
2782 }
2783 if (px_args.file_actions_size != 0) {
2784 /* Limit file_actions to allowed number of open files */
2785 int maxfa = (p->p_limit ? p->p_rlimit[RLIMIT_NOFILE].rlim_cur : NOFILE);
2786 size_t maxfa_size = PSF_ACTIONS_SIZE(maxfa);
2787 if (px_args.file_actions_size < PSF_ACTIONS_SIZE(1) ||
2788 maxfa_size == 0 || px_args.file_actions_size > maxfa_size) {
2789 error = EINVAL;
2790 goto bad;
2791 }
2792 MALLOC(px_sfap, _posix_spawn_file_actions_t, px_args.file_actions_size, M_TEMP, M_WAITOK);
2793 if (px_sfap == NULL) {
2794 error = ENOMEM;
2795 goto bad;
2796 }
2797 imgp->ip_px_sfa = px_sfap;
2798
2799 if ((error = copyin(px_args.file_actions, px_sfap,
2800 px_args.file_actions_size)) != 0) {
2801 goto bad;
2802 }
2803
2804 /* Verify that the action count matches the struct size */
2805 size_t psfsize = PSF_ACTIONS_SIZE(px_sfap->psfa_act_count);
2806 if (psfsize == 0 || psfsize != px_args.file_actions_size) {
2807 error = EINVAL;
2808 goto bad;
2809 }
2810 }
2811 if (px_args.port_actions_size != 0) {
2812 /* Limit port_actions to one page of data */
2813 if (px_args.port_actions_size < PS_PORT_ACTIONS_SIZE(1) ||
2814 px_args.port_actions_size > PAGE_SIZE) {
2815 error = EINVAL;
2816 goto bad;
2817 }
2818
2819 MALLOC(px_spap, _posix_spawn_port_actions_t,
2820 px_args.port_actions_size, M_TEMP, M_WAITOK);
2821 if (px_spap == NULL) {
2822 error = ENOMEM;
2823 goto bad;
2824 }
2825 imgp->ip_px_spa = px_spap;
2826
2827 if ((error = copyin(px_args.port_actions, px_spap,
2828 px_args.port_actions_size)) != 0) {
2829 goto bad;
2830 }
2831
2832 /* Verify that the action count matches the struct size */
2833 size_t pasize = PS_PORT_ACTIONS_SIZE(px_spap->pspa_count);
2834 if (pasize == 0 || pasize != px_args.port_actions_size) {
2835 error = EINVAL;
2836 goto bad;
2837 }
2838 }
2839 #if CONFIG_PERSONAS
2840 /* copy in the persona info */
2841 if (px_args.persona_info_size != 0 && px_args.persona_info != 0) {
2842 /* for now, we need the exact same struct in user space */
2843 if (px_args.persona_info_size != sizeof(*px_persona)) {
2844 error = ERANGE;
2845 goto bad;
2846 }
2847
2848 MALLOC(px_persona, struct _posix_spawn_persona_info *, px_args.persona_info_size, M_TEMP, M_WAITOK | M_ZERO);
2849 if (px_persona == NULL) {
2850 error = ENOMEM;
2851 goto bad;
2852 }
2853 imgp->ip_px_persona = px_persona;
2854
2855 if ((error = copyin(px_args.persona_info, px_persona,
2856 px_args.persona_info_size)) != 0) {
2857 goto bad;
2858 }
2859 if ((error = spawn_validate_persona(px_persona)) != 0) {
2860 goto bad;
2861 }
2862 }
2863 #endif
2864 /* copy in the posix cred info */
2865 if (px_args.posix_cred_info_size != 0 && px_args.posix_cred_info != 0) {
2866 /* for now, we need the exact same struct in user space */
2867 if (px_args.posix_cred_info_size != sizeof(*px_pcred_info)) {
2868 error = ERANGE;
2869 goto bad;
2870 }
2871
2872 if (!kauth_cred_issuser(kauth_cred_get())) {
2873 error = EPERM;
2874 goto bad;
2875 }
2876
2877 MALLOC(px_pcred_info, struct _posix_spawn_posix_cred_info *,
2878 px_args.posix_cred_info_size, M_TEMP, M_WAITOK | M_ZERO);
2879 if (px_pcred_info == NULL) {
2880 error = ENOMEM;
2881 goto bad;
2882 }
2883 imgp->ip_px_pcred_info = px_pcred_info;
2884
2885 if ((error = copyin(px_args.posix_cred_info, px_pcred_info,
2886 px_args.posix_cred_info_size)) != 0) {
2887 goto bad;
2888 }
2889
2890 if (px_pcred_info->pspci_flags & POSIX_SPAWN_POSIX_CRED_GROUPS) {
2891 if (px_pcred_info->pspci_ngroups > NGROUPS_MAX) {
2892 error = EINVAL;
2893 goto bad;
2894 }
2895 }
2896 }
2897 #if CONFIG_MACF
2898 if (px_args.mac_extensions_size != 0) {
2899 if ((error = spawn_copyin_macpolicyinfo(&px_args, (_posix_spawn_mac_policy_extensions_t *)&imgp->ip_px_smpx)) != 0) {
2900 goto bad;
2901 }
2902 }
2903 #endif /* CONFIG_MACF */
2904 }
2905
2906 /* set uthread to parent */
2907 uthread = get_bsdthread_info(current_thread());
2908
2909 /*
2910 * <rdar://6640530>; this does not result in a behaviour change
2911 * relative to Leopard, so there should not be any existing code
2912 * which depends on it.
2913 */
2914 if (uthread->uu_flag & UT_VFORK) {
2915 error = EINVAL;
2916 goto bad;
2917 }
2918
2919 if (imgp->ip_px_sa != NULL) {
2920 struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
2921 if ((error = exec_validate_spawnattr_policy(psa->psa_apptype)) != 0) {
2922 goto bad;
2923 }
2924 }
2925
2926 /*
2927 * If we don't have the extension flag that turns "posix_spawn()"
2928 * into "execve() with options", then we will be creating a new
2929 * process which does not inherit memory from the parent process,
2930 * which is one of the most expensive things about using fork()
2931 * and execve().
2932 */
2933 if (imgp->ip_px_sa == NULL || !(px_sa.psa_flags & POSIX_SPAWN_SETEXEC)) {
2934 /* Set the new task's coalition, if it is requested. */
2935 coalition_t coal[COALITION_NUM_TYPES] = { COALITION_NULL };
2936 #if CONFIG_COALITIONS
2937 int i, ncoals;
2938 kern_return_t kr = KERN_SUCCESS;
2939 struct _posix_spawn_coalition_info coal_info;
2940 int coal_role[COALITION_NUM_TYPES];
2941
2942 if (imgp->ip_px_sa == NULL || !px_args.coal_info) {
2943 goto do_fork1;
2944 }
2945
2946 memset(&coal_info, 0, sizeof(coal_info));
2947
2948 if (px_args.coal_info_size > sizeof(coal_info)) {
2949 px_args.coal_info_size = sizeof(coal_info);
2950 }
2951 error = copyin(px_args.coal_info,
2952 &coal_info, px_args.coal_info_size);
2953 if (error != 0) {
2954 goto bad;
2955 }
2956
2957 ncoals = 0;
2958 for (i = 0; i < COALITION_NUM_TYPES; i++) {
2959 uint64_t cid = coal_info.psci_info[i].psci_id;
2960 if (cid != 0) {
2961 /*
2962 * don't allow tasks which are not in a
2963 * privileged coalition to spawn processes
2964 * into coalitions other than their own
2965 */
2966 if (!task_is_in_privileged_coalition(p->task, i)) {
2967 coal_dbg("ERROR: %d not in privilegd "
2968 "coalition of type %d",
2969 p->p_pid, i);
2970 spawn_coalitions_release_all(coal);
2971 error = EPERM;
2972 goto bad;
2973 }
2974
2975 coal_dbg("searching for coalition id:%llu", cid);
2976 /*
2977 * take a reference and activation on the
2978 * coalition to guard against free-while-spawn
2979 * races
2980 */
2981 coal[i] = coalition_find_and_activate_by_id(cid);
2982 if (coal[i] == COALITION_NULL) {
2983 coal_dbg("could not find coalition id:%llu "
2984 "(perhaps it has been terminated or reaped)", cid);
2985 /*
2986 * release any other coalition's we
2987 * may have a reference to
2988 */
2989 spawn_coalitions_release_all(coal);
2990 error = ESRCH;
2991 goto bad;
2992 }
2993 if (coalition_type(coal[i]) != i) {
2994 coal_dbg("coalition with id:%lld is not of type:%d"
2995 " (it's type:%d)", cid, i, coalition_type(coal[i]));
2996 error = ESRCH;
2997 goto bad;
2998 }
2999 coal_role[i] = coal_info.psci_info[i].psci_role;
3000 ncoals++;
3001 }
3002 }
3003 if (ncoals < COALITION_NUM_TYPES) {
3004 /*
3005 * If the user is attempting to spawn into a subset of
3006 * the known coalition types, then make sure they have
3007 * _at_least_ specified a resource coalition. If not,
3008 * the following fork1() call will implicitly force an
3009 * inheritance from 'p' and won't actually spawn the
3010 * new task into the coalitions the user specified.
3011 * (also the call to coalitions_set_roles will panic)
3012 */
3013 if (coal[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
3014 spawn_coalitions_release_all(coal);
3015 error = EINVAL;
3016 goto bad;
3017 }
3018 }
3019 do_fork1:
3020 #endif /* CONFIG_COALITIONS */
3021
3022 /*
3023 * note that this will implicitly inherit the
3024 * caller's persona (if it exists)
3025 */
3026 error = fork1(p, &imgp->ip_new_thread, PROC_CREATE_SPAWN, coal);
3027 /* returns a thread and task reference */
3028
3029 if (error == 0) {
3030 new_task = get_threadtask(imgp->ip_new_thread);
3031 }
3032 #if CONFIG_COALITIONS
3033 /* set the roles of this task within each given coalition */
3034 if (error == 0) {
3035 kr = coalitions_set_roles(coal, new_task, coal_role);
3036 if (kr != KERN_SUCCESS) {
3037 error = EINVAL;
3038 }
3039 if (kdebug_debugid_enabled(MACHDBG_CODE(DBG_MACH_COALITION,
3040 MACH_COALITION_ADOPT))) {
3041 for (i = 0; i < COALITION_NUM_TYPES; i++) {
3042 if (coal[i] != COALITION_NULL) {
3043 /*
3044 * On 32-bit targets, uniqueid
3045 * will get truncated to 32 bits
3046 */
3047 KDBG_RELEASE(MACHDBG_CODE(
3048 DBG_MACH_COALITION,
3049 MACH_COALITION_ADOPT),
3050 coalition_id(coal[i]),
3051 get_task_uniqueid(new_task));
3052 }
3053 }
3054 }
3055 }
3056
3057 /* drop our references and activations - fork1() now holds them */
3058 spawn_coalitions_release_all(coal);
3059 #endif /* CONFIG_COALITIONS */
3060 if (error != 0) {
3061 goto bad;
3062 }
3063 imgp->ip_flags |= IMGPF_SPAWN; /* spawn w/o exec */
3064 spawn_no_exec = TRUE; /* used in later tests */
3065 } else {
3066 /*
3067 * For execve case, create a new task and thread
3068 * which points to current_proc. The current_proc will point
3069 * to the new task after image activation and proc ref drain.
3070 *
3071 * proc (current_proc) <----- old_task (current_task)
3072 * ^ | ^
3073 * | | |
3074 * | ----------------------------------
3075 * |
3076 * --------- new_task (task marked as TF_EXEC_COPY)
3077 *
3078 * After image activation, the proc will point to the new task
3079 * and would look like following.
3080 *
3081 * proc (current_proc) <----- old_task (current_task, marked as TPF_DID_EXEC)
3082 * ^ |
3083 * | |
3084 * | ----------> new_task
3085 * | |
3086 * -----------------
3087 *
3088 * During exec any transition from new_task -> proc is fine, but don't allow
3089 * transition from proc->task, since it will modify old_task.
3090 */
3091 imgp->ip_new_thread = fork_create_child(old_task,
3092 NULL,
3093 p,
3094 FALSE,
3095 p->p_flag & P_LP64,
3096 task_get_64bit_data(old_task),
3097 TRUE);
3098 /* task and thread ref returned by fork_create_child */
3099 if (imgp->ip_new_thread == NULL) {
3100 error = ENOMEM;
3101 goto bad;
3102 }
3103
3104 new_task = get_threadtask(imgp->ip_new_thread);
3105 imgp->ip_flags |= IMGPF_EXEC;
3106 }
3107
3108 if (spawn_no_exec) {
3109 p = (proc_t)get_bsdthreadtask_info(imgp->ip_new_thread);
3110
3111 /*
3112 * We had to wait until this point before firing the
3113 * proc:::create probe, otherwise p would not point to the
3114 * child process.
3115 */
3116 DTRACE_PROC1(create, proc_t, p);
3117 }
3118 assert(p != NULL);
3119
3120 context.vc_thread = imgp->ip_new_thread;
3121 context.vc_ucred = p->p_ucred; /* XXX must NOT be kauth_cred_get() */
3122
3123 /*
3124 * Post fdcopy(), pre exec_handle_sugid() - this is where we want
3125 * to handle the file_actions. Since vfork() also ends up setting
3126 * us into the parent process group, and saved off the signal flags,
3127 * this is also where we want to handle the spawn flags.
3128 */
3129
3130 /* Has spawn file actions? */
3131 if (imgp->ip_px_sfa != NULL) {
3132 /*
3133 * The POSIX_SPAWN_CLOEXEC_DEFAULT flag
3134 * is handled in exec_handle_file_actions().
3135 */
3136 if ((error = exec_handle_file_actions(imgp,
3137 imgp->ip_px_sa != NULL ? px_sa.psa_flags : 0)) != 0) {
3138 goto bad;
3139 }
3140 }
3141
3142 /* Has spawn port actions? */
3143 if (imgp->ip_px_spa != NULL) {
3144 if ((error = exec_handle_port_actions(imgp, &port_actions)) != 0) {
3145 goto bad;
3146 }
3147 }
3148
3149 /* Has spawn attr? */
3150 if (imgp->ip_px_sa != NULL) {
3151 /*
3152 * Reset UID/GID to parent's RUID/RGID; This works only
3153 * because the operation occurs *after* the vfork() and
3154 * before the call to exec_handle_sugid() by the image
3155 * activator called from exec_activate_image(). POSIX
3156 * requires that any setuid/setgid bits on the process
3157 * image will take precedence over the spawn attributes
3158 * (re)setting them.
3159 *
3160 * Modifications to p_ucred must be guarded using the
3161 * proc's ucred lock. This prevents others from accessing
3162 * a garbage credential.
3163 */
3164 if (px_sa.psa_flags & POSIX_SPAWN_RESETIDS) {
3165 apply_kauth_cred_update(p, ^kauth_cred_t (kauth_cred_t my_cred){
3166 return kauth_cred_setuidgid(my_cred,
3167 kauth_cred_getruid(my_cred),
3168 kauth_cred_getrgid(my_cred));
3169 });
3170 }
3171
3172 if (imgp->ip_px_pcred_info) {
3173 if (!spawn_no_exec) {
3174 error = ENOTSUP;
3175 goto bad;
3176 }
3177
3178 error = spawn_posix_cred_adopt(p, imgp->ip_px_pcred_info);
3179 if (error != 0) {
3180 goto bad;
3181 }
3182 }
3183
3184 #if CONFIG_PERSONAS
3185 if (imgp->ip_px_persona != NULL) {
3186 if (!spawn_no_exec) {
3187 error = ENOTSUP;
3188 goto bad;
3189 }
3190
3191 /*
3192 * If we were asked to spawn a process into a new persona,
3193 * do the credential switch now (which may override the UID/GID
3194 * inherit done just above). It's important to do this switch
3195 * before image activation both for reasons stated above, and
3196 * to ensure that the new persona has access to the image/file
3197 * being executed.
3198 */
3199 error = spawn_persona_adopt(p, imgp->ip_px_persona);
3200 if (error != 0) {
3201 goto bad;
3202 }
3203 }
3204 #endif /* CONFIG_PERSONAS */
3205 #if !SECURE_KERNEL
3206 /*
3207 * Disable ASLR for the spawned process.
3208 *
3209 * But only do so if we are not embedded + RELEASE.
3210 * While embedded allows for a boot-arg (-disable_aslr)
3211 * to deal with this (which itself is only honored on
3212 * DEVELOPMENT or DEBUG builds of xnu), it is often
3213 * useful or necessary to disable ASLR on a per-process
3214 * basis for unit testing and debugging.
3215 */
3216 if (px_sa.psa_flags & _POSIX_SPAWN_DISABLE_ASLR) {
3217 OSBitOrAtomic(P_DISABLE_ASLR, &p->p_flag);
3218 }
3219 #endif /* !SECURE_KERNEL */
3220
3221 /* Randomize high bits of ASLR slide */
3222 if (px_sa.psa_flags & _POSIX_SPAWN_HIGH_BITS_ASLR) {
3223 imgp->ip_flags |= IMGPF_HIGH_BITS_ASLR;
3224 }
3225
3226 #if !SECURE_KERNEL
3227 /*
3228 * Forcibly disallow execution from data pages for the spawned process
3229 * even if it would otherwise be permitted by the architecture default.
3230 */
3231 if (px_sa.psa_flags & _POSIX_SPAWN_ALLOW_DATA_EXEC) {
3232 imgp->ip_flags |= IMGPF_ALLOW_DATA_EXEC;
3233 }
3234 #endif /* !SECURE_KERNEL */
3235
3236 if ((px_sa.psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK) ==
3237 POSIX_SPAWN_PROC_TYPE_DRIVER) {
3238 imgp->ip_flags |= IMGPF_DRIVER;
3239 }
3240 }
3241
3242 /*
3243 * Disable ASLR during image activation. This occurs either if the
3244 * _POSIX_SPAWN_DISABLE_ASLR attribute was found above or if
3245 * P_DISABLE_ASLR was inherited from the parent process.
3246 */
3247 if (p->p_flag & P_DISABLE_ASLR) {
3248 imgp->ip_flags |= IMGPF_DISABLE_ASLR;
3249 }
3250
3251 /*
3252 * Clear transition flag so we won't hang if exec_activate_image() causes
3253 * an automount (and launchd does a proc sysctl to service it).
3254 *
3255 * <rdar://problem/6848672>, <rdar://problem/5959568>.
3256 */
3257 if (spawn_no_exec) {
3258 proc_transend(p, 0);
3259 proc_transit_set = 0;
3260 }
3261
3262 #if MAC_SPAWN /* XXX */
3263 if (uap->mac_p != USER_ADDR_NULL) {
3264 error = mac_execve_enter(uap->mac_p, imgp);
3265 if (error) {
3266 goto bad;
3267 }
3268 }
3269 #endif
3270
3271 /*
3272 * Activate the image
3273 */
3274 error = exec_activate_image(imgp);
3275 #if defined(HAS_APPLE_PAC)
3276 ml_task_set_disable_user_jop(new_task, imgp->ip_flags & IMGPF_NOJOP ? TRUE : FALSE);
3277 ml_thread_set_disable_user_jop(imgp->ip_new_thread, imgp->ip_flags & IMGPF_NOJOP ? TRUE : FALSE);
3278 #endif
3279
3280 if (error == 0 && !spawn_no_exec) {
3281 p = proc_exec_switch_task(p, old_task, new_task, imgp->ip_new_thread);
3282 /* proc ref returned */
3283 should_release_proc_ref = TRUE;
3284
3285 /*
3286 * Need to transfer pending watch port boosts to the new task while still making
3287 * sure that the old task remains in the importance linkage. Create an importance
3288 * linkage from old task to new task, then switch the task importance base
3289 * of old task and new task. After the switch the port watch boost will be
3290 * boosting the new task and new task will be donating importance to old task.
3291 */
3292 inherit = ipc_importance_exec_switch_task(old_task, new_task);
3293 }
3294
3295 if (error == 0) {
3296 /* process completed the exec */
3297 exec_done = TRUE;
3298 } else if (error == -1) {
3299 /* Image not claimed by any activator? */
3300 error = ENOEXEC;
3301 }
3302
3303 if (!error && imgp->ip_px_sa != NULL) {
3304 thread_t child_thread = imgp->ip_new_thread;
3305 uthread_t child_uthread = get_bsdthread_info(child_thread);
3306
3307 /*
3308 * Because of POSIX_SPAWN_SETEXEC, we need to handle this after image
3309 * activation, else when image activation fails (before the point of no
3310 * return) would leave the parent process in a modified state.
3311 */
3312 if (px_sa.psa_flags & POSIX_SPAWN_SETPGROUP) {
3313 struct setpgid_args spga;
3314 spga.pid = p->p_pid;
3315 spga.pgid = px_sa.psa_pgroup;
3316 /*
3317 * Effectively, call setpgid() system call; works
3318 * because there are no pointer arguments.
3319 */
3320 if ((error = setpgid(p, &spga, ival)) != 0) {
3321 goto bad;
3322 }
3323 }
3324
3325 if (px_sa.psa_flags & POSIX_SPAWN_SETSID) {
3326 error = setsid_internal(p);
3327 if (error != 0) {
3328 goto bad;
3329 }
3330 }
3331
3332 /*
3333 * If we have a spawn attr, and it contains signal related flags,
3334 * the we need to process them in the "context" of the new child
3335 * process, so we have to process it following image activation,
3336 * prior to making the thread runnable in user space. This is
3337 * necessitated by some signal information being per-thread rather
3338 * than per-process, and we don't have the new allocation in hand
3339 * until after the image is activated.
3340 */
3341
3342 /*
3343 * Mask a list of signals, instead of them being unmasked, if
3344 * they were unmasked in the parent; note that some signals
3345 * are not maskable.
3346 */
3347 if (px_sa.psa_flags & POSIX_SPAWN_SETSIGMASK) {
3348 child_uthread->uu_sigmask = (px_sa.psa_sigmask & ~sigcantmask);
3349 }
3350 /*
3351 * Default a list of signals instead of ignoring them, if
3352 * they were ignored in the parent. Note that we pass
3353 * spawn_no_exec to setsigvec() to indicate that we called
3354 * fork1() and therefore do not need to call proc_signalstart()
3355 * internally.
3356 */
3357 if (px_sa.psa_flags & POSIX_SPAWN_SETSIGDEF) {
3358 vec.sa_handler = SIG_DFL;
3359 vec.sa_tramp = 0;
3360 vec.sa_mask = 0;
3361 vec.sa_flags = 0;
3362 for (sig = 1; sig < NSIG; sig++) {
3363 if (px_sa.psa_sigdefault & (1 << (sig - 1))) {
3364 error = setsigvec(p, child_thread, sig, &vec, spawn_no_exec);
3365 }
3366 }
3367 }
3368
3369 /*
3370 * Activate the CPU usage monitor, if requested. This is done via a task-wide, per-thread CPU
3371 * usage limit, which will generate a resource exceeded exception if any one thread exceeds the
3372 * limit.
3373 *
3374 * Userland gives us interval in seconds, and the kernel SPI expects nanoseconds.
3375 */
3376 if (px_sa.psa_cpumonitor_percent != 0) {
3377 /*
3378 * Always treat a CPU monitor activation coming from spawn as entitled. Requiring
3379 * an entitlement to configure the monitor a certain way seems silly, since
3380 * whomever is turning it on could just as easily choose not to do so.
3381 */
3382 error = proc_set_task_ruse_cpu(p->task,
3383 TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC,
3384 px_sa.psa_cpumonitor_percent,
3385 px_sa.psa_cpumonitor_interval * NSEC_PER_SEC,
3386 0, TRUE);
3387 }
3388
3389
3390 if (px_pcred_info &&
3391 (px_pcred_info->pspci_flags & POSIX_SPAWN_POSIX_CRED_LOGIN)) {
3392 /*
3393 * setlogin() must happen after setsid()
3394 */
3395 setlogin_internal(p, px_pcred_info->pspci_login);
3396 }
3397 }
3398
3399 bad:
3400
3401 if (error == 0) {
3402 /* reset delay idle sleep status if set */
3403 #if !CONFIG_EMBEDDED
3404 if ((p->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP) {
3405 OSBitAndAtomic(~((uint32_t)P_DELAYIDLESLEEP), &p->p_flag);
3406 }
3407 #endif /* !CONFIG_EMBEDDED */
3408 /* upon successful spawn, re/set the proc control state */
3409 if (imgp->ip_px_sa != NULL) {
3410 switch (px_sa.psa_pcontrol) {
3411 case POSIX_SPAWN_PCONTROL_THROTTLE:
3412 p->p_pcaction = P_PCTHROTTLE;
3413 break;
3414 case POSIX_SPAWN_PCONTROL_SUSPEND:
3415 p->p_pcaction = P_PCSUSP;
3416 break;
3417 case POSIX_SPAWN_PCONTROL_KILL:
3418 p->p_pcaction = P_PCKILL;
3419 break;
3420 case POSIX_SPAWN_PCONTROL_NONE:
3421 default:
3422 p->p_pcaction = 0;
3423 break;
3424 }
3425 ;
3426 }
3427 exec_resettextvp(p, imgp);
3428
3429 #if CONFIG_MEMORYSTATUS
3430 /* Set jetsam priority for DriverKit processes */
3431 if (px_sa.psa_apptype == POSIX_SPAWN_PROC_TYPE_DRIVER) {
3432 px_sa.psa_priority = JETSAM_PRIORITY_DRIVER_APPLE;
3433 }
3434
3435 /* Has jetsam attributes? */
3436 if (imgp->ip_px_sa != NULL && (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_SET)) {
3437 /*
3438 * With 2-level high-water-mark support, POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND is no
3439 * longer relevant, as background limits are described via the inactive limit slots.
3440 *
3441 * That said, however, if the POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND is passed in,
3442 * we attempt to mimic previous behavior by forcing the BG limit data into the
3443 * inactive/non-fatal mode and force the active slots to hold system_wide/fatal mode.
3444 */
3445
3446 if (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND) {
3447 memorystatus_update(p, px_sa.psa_priority, 0, FALSE, /* assertion priority */
3448 (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY),
3449 TRUE,
3450 -1, TRUE,
3451 px_sa.psa_memlimit_inactive, FALSE);
3452 } else {
3453 memorystatus_update(p, px_sa.psa_priority, 0, FALSE, /* assertion priority */
3454 (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY),
3455 TRUE,
3456 px_sa.psa_memlimit_active,
3457 (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_MEMLIMIT_ACTIVE_FATAL),
3458 px_sa.psa_memlimit_inactive,
3459 (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_MEMLIMIT_INACTIVE_FATAL));
3460 }
3461 }
3462
3463 /* Has jetsam relaunch behavior? */
3464 if (imgp->ip_px_sa != NULL && (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_MASK)) {
3465 /*
3466 * Launchd has passed in data indicating the behavior of this process in response to jetsam.
3467 * This data would be used by the jetsam subsystem to determine the position and protection
3468 * offered to this process on dirty -> clean transitions.
3469 */
3470 int relaunch_flags = P_MEMSTAT_RELAUNCH_UNKNOWN;
3471 switch (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_MASK) {
3472 case POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_LOW:
3473 relaunch_flags = P_MEMSTAT_RELAUNCH_LOW;
3474 break;
3475 case POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_MED:
3476 relaunch_flags = P_MEMSTAT_RELAUNCH_MED;
3477 break;
3478 case POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_HIGH:
3479 relaunch_flags = P_MEMSTAT_RELAUNCH_HIGH;
3480 break;
3481 default:
3482 break;
3483 }
3484 memorystatus_relaunch_flags_update(p, relaunch_flags);
3485 }
3486
3487 #endif /* CONFIG_MEMORYSTATUS */
3488 if (imgp->ip_px_sa != NULL && px_sa.psa_thread_limit > 0) {
3489 task_set_thread_limit(new_task, (uint16_t)px_sa.psa_thread_limit);
3490 }
3491 }
3492
3493 /*
3494 * If we successfully called fork1(), we always need to do this;
3495 * we identify this case by noting the IMGPF_SPAWN flag. This is
3496 * because we come back from that call with signals blocked in the
3497 * child, and we have to unblock them, but we want to wait until
3498 * after we've performed any spawn actions. This has to happen
3499 * before check_for_signature(), which uses psignal.
3500 */
3501 if (spawn_no_exec) {
3502 if (proc_transit_set) {
3503 proc_transend(p, 0);
3504 }
3505
3506 /*
3507 * Drop the signal lock on the child which was taken on our
3508 * behalf by forkproc()/cloneproc() to prevent signals being
3509 * received by the child in a partially constructed state.
3510 */
3511 proc_signalend(p, 0);
3512
3513 /* flag the 'fork' has occurred */
3514 proc_knote(p->p_pptr, NOTE_FORK | p->p_pid);
3515 }
3516
3517 /* flag exec has occurred, notify only if it has not failed due to FP Key error */
3518 if (!error && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0)) {
3519 proc_knote(p, NOTE_EXEC);
3520 }
3521
3522
3523 if (error == 0) {
3524 /*
3525 * We need to initialize the bank context behind the protection of
3526 * the proc_trans lock to prevent a race with exit. We can't do this during
3527 * exec_activate_image because task_bank_init checks entitlements that
3528 * aren't loaded until subsequent calls (including exec_resettextvp).
3529 */
3530 error = proc_transstart(p, 0, 0);
3531
3532 if (error == 0) {
3533 task_bank_init(new_task);
3534 proc_transend(p, 0);
3535 }
3536
3537 #if __arm64__
3538 proc_legacy_footprint_entitled(p, new_task, __FUNCTION__);
3539 #endif /* __arm64__ */
3540 }
3541
3542 /* Inherit task role from old task to new task for exec */
3543 if (error == 0 && !spawn_no_exec) {
3544 proc_inherit_task_role(new_task, old_task);
3545 }
3546
3547 #if CONFIG_ARCADE
3548 if (error == 0) {
3549 /*
3550 * Check to see if we need to trigger an arcade upcall AST now
3551 * that the vnode has been reset on the task.
3552 */
3553 arcade_prepare(new_task, imgp->ip_new_thread);
3554 }
3555 #endif /* CONFIG_ARCADE */
3556
3557 /* Clear the initial wait on the thread before handling spawn policy */
3558 if (imgp && imgp->ip_new_thread) {
3559 task_clear_return_wait(get_threadtask(imgp->ip_new_thread), TCRW_CLEAR_INITIAL_WAIT);
3560 }
3561
3562 /*
3563 * Apply the spawnattr policy, apptype (which primes the task for importance donation),
3564 * and bind any portwatch ports to the new task.
3565 * This must be done after the exec so that the child's thread is ready,
3566 * and after the in transit state has been released, because priority is
3567 * dropped here so we need to be prepared for a potentially long preemption interval
3568 *
3569 * TODO: Consider splitting this up into separate phases
3570 */
3571 if (error == 0 && imgp->ip_px_sa != NULL) {
3572 struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
3573
3574 error = exec_handle_spawnattr_policy(p, imgp->ip_new_thread, psa->psa_apptype, psa->psa_qos_clamp,
3575 psa->psa_darwin_role, &port_actions);
3576 }
3577
3578 /* Transfer the turnstile watchport boost to new task if in exec */
3579 if (error == 0 && !spawn_no_exec) {
3580 task_transfer_turnstile_watchports(old_task, new_task, imgp->ip_new_thread);
3581 }
3582
3583 /*
3584 * Apply the requested maximum address.
3585 */
3586 if (error == 0 && imgp->ip_px_sa != NULL) {
3587 struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
3588
3589 if (psa->psa_max_addr) {
3590 vm_map_set_max_addr(get_task_map(new_task), psa->psa_max_addr);
3591 }
3592 }
3593
3594 if (error == 0) {
3595 /* Apply the main thread qos */
3596 thread_t main_thread = imgp->ip_new_thread;
3597 task_set_main_thread_qos(new_task, main_thread);
3598
3599 #if CONFIG_MACF
3600 /*
3601 * Processes with the MAP_JIT entitlement are permitted to have
3602 * a jumbo-size map.
3603 */
3604 if (mac_proc_check_map_anon(p, 0, 0, 0, MAP_JIT, NULL) == 0) {
3605 vm_map_set_jumbo(get_task_map(new_task));
3606 vm_map_set_jit_entitled(get_task_map(new_task));
3607 }
3608 #endif /* CONFIG_MACF */
3609 }
3610
3611 /*
3612 * Release any ports we kept around for binding to the new task
3613 * We need to release the rights even if the posix_spawn has failed.
3614 */
3615 if (imgp->ip_px_spa != NULL) {
3616 exec_port_actions_destroy(&port_actions);
3617 }
3618
3619 /*
3620 * We have to delay operations which might throw a signal until after
3621 * the signals have been unblocked; however, we want that to happen
3622 * after exec_resettextvp() so that the textvp is correct when they
3623 * fire.
3624 */
3625 if (error == 0) {
3626 error = check_for_signature(p, imgp);
3627
3628 /*
3629 * Pay for our earlier safety; deliver the delayed signals from
3630 * the incomplete spawn process now that it's complete.
3631 */
3632 if (imgp != NULL && spawn_no_exec && (p->p_lflag & P_LTRACED)) {
3633 psignal_vfork(p, p->task, imgp->ip_new_thread, SIGTRAP);
3634 }
3635
3636 if (error == 0 && !spawn_no_exec) {
3637 KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXEC),
3638 p->p_pid);
3639 }
3640 }
3641
3642
3643 if (imgp != NULL) {
3644 if (imgp->ip_vp) {
3645 vnode_put(imgp->ip_vp);
3646 }
3647 if (imgp->ip_scriptvp) {
3648 vnode_put(imgp->ip_scriptvp);
3649 }
3650 if (imgp->ip_strings) {
3651 execargs_free(imgp);
3652 }
3653 if (imgp->ip_px_sfa != NULL) {
3654 FREE(imgp->ip_px_sfa, M_TEMP);
3655 }
3656 if (imgp->ip_px_spa != NULL) {
3657 FREE(imgp->ip_px_spa, M_TEMP);
3658 }
3659 #if CONFIG_PERSONAS
3660 if (imgp->ip_px_persona != NULL) {
3661 FREE(imgp->ip_px_persona, M_TEMP);
3662 }
3663 #endif
3664 if (imgp->ip_px_pcred_info != NULL) {
3665 FREE(imgp->ip_px_pcred_info, M_TEMP);
3666 }
3667 #if CONFIG_MACF
3668 if (imgp->ip_px_smpx != NULL) {
3669 spawn_free_macpolicyinfo(imgp->ip_px_smpx);
3670 }
3671 if (imgp->ip_execlabelp) {
3672 mac_cred_label_free(imgp->ip_execlabelp);
3673 }
3674 if (imgp->ip_scriptlabelp) {
3675 mac_vnode_label_free(imgp->ip_scriptlabelp);
3676 }
3677 if (imgp->ip_cs_error != OS_REASON_NULL) {
3678 os_reason_free(imgp->ip_cs_error);
3679 imgp->ip_cs_error = OS_REASON_NULL;
3680 }
3681 #endif
3682 }
3683
3684 #if CONFIG_DTRACE
3685 if (spawn_no_exec) {
3686 /*
3687 * In the original DTrace reference implementation,
3688 * posix_spawn() was a libc routine that just
3689 * did vfork(2) then exec(2). Thus the proc::: probes
3690 * are very fork/exec oriented. The details of this
3691 * in-kernel implementation of posix_spawn() is different
3692 * (while producing the same process-observable effects)
3693 * particularly w.r.t. errors, and which thread/process
3694 * is constructing what on behalf of whom.
3695 */
3696 if (error) {
3697 DTRACE_PROC1(spawn__failure, int, error);
3698 } else {
3699 DTRACE_PROC(spawn__success);
3700 /*
3701 * Some DTrace scripts, e.g. newproc.d in
3702 * /usr/bin, rely on the the 'exec-success'
3703 * probe being fired in the child after the
3704 * new process image has been constructed
3705 * in order to determine the associated pid.
3706 *
3707 * So, even though the parent built the image
3708 * here, for compatibility, mark the new thread
3709 * so 'exec-success' fires on it as it leaves
3710 * the kernel.
3711 */
3712 dtrace_thread_didexec(imgp->ip_new_thread);
3713 }
3714 } else {
3715 if (error) {
3716 DTRACE_PROC1(exec__failure, int, error);
3717 } else {
3718 dtrace_thread_didexec(imgp->ip_new_thread);
3719 }
3720 }
3721
3722 if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL) {
3723 (*dtrace_proc_waitfor_hook)(p);
3724 }
3725 #endif
3726
3727 #if CONFIG_AUDIT
3728 if (!error && AUDIT_ENABLED() && p) {
3729 /* Add the CDHash of the new process to the audit record */
3730 uint8_t *cdhash = cs_get_cdhash(p);
3731 if (cdhash) {
3732 AUDIT_ARG(data, cdhash, sizeof(uint8_t), CS_CDHASH_LEN);
3733 }
3734 }
3735 #endif
3736
3737 /*
3738 * clear bsd_info from old task if it did exec.
3739 */
3740 if (task_did_exec(old_task)) {
3741 set_bsdtask_info(old_task, NULL);
3742 }
3743
3744 /* clear bsd_info from new task and terminate it if exec failed */
3745 if (new_task != NULL && task_is_exec_copy(new_task)) {
3746 set_bsdtask_info(new_task, NULL);
3747 task_terminate_internal(new_task);
3748 }
3749
3750 /* Return to both the parent and the child? */
3751 if (imgp != NULL && spawn_no_exec) {
3752 /*
3753 * If the parent wants the pid, copy it out
3754 */
3755 if (pid != USER_ADDR_NULL) {
3756 _Static_assert(sizeof(p->p_pid) == 4, "posix_spawn() assumes a 32-bit pid_t");
3757 bool aligned = (pid & 3) == 0;
3758 if (aligned) {
3759 (void)copyout_atomic32(p->p_pid, pid);
3760 } else {
3761 (void)suword(pid, p->p_pid);
3762 }
3763 }
3764 retval[0] = error;
3765
3766 /*
3767 * If we had an error, perform an internal reap ; this is
3768 * entirely safe, as we have a real process backing us.
3769 */
3770 if (error) {
3771 proc_list_lock();
3772 p->p_listflag |= P_LIST_DEADPARENT;
3773 proc_list_unlock();
3774 proc_lock(p);
3775 /* make sure no one else has killed it off... */
3776 if (p->p_stat != SZOMB && p->exit_thread == NULL) {
3777 p->exit_thread = current_thread();
3778 proc_unlock(p);
3779 exit1(p, 1, (int *)NULL);
3780 } else {
3781 /* someone is doing it for us; just skip it */
3782 proc_unlock(p);
3783 }
3784 }
3785 }
3786
3787 /*
3788 * Do not terminate the current task, if proc_exec_switch_task did not
3789 * switch the tasks, terminating the current task without the switch would
3790 * result in loosing the SIGKILL status.
3791 */
3792 if (task_did_exec(old_task)) {
3793 /* Terminate the current task, since exec will start in new task */
3794 task_terminate_internal(old_task);
3795 }
3796
3797 /* Release the thread ref returned by fork_create_child/fork1 */
3798 if (imgp != NULL && imgp->ip_new_thread) {
3799 /* wake up the new thread */
3800 task_clear_return_wait(get_threadtask(imgp->ip_new_thread), TCRW_CLEAR_FINAL_WAIT);
3801 thread_deallocate(imgp->ip_new_thread);
3802 imgp->ip_new_thread = NULL;
3803 }
3804
3805 /* Release the ref returned by fork_create_child/fork1 */
3806 if (new_task) {
3807 task_deallocate(new_task);
3808 new_task = NULL;
3809 }
3810
3811 if (should_release_proc_ref) {
3812 proc_rele(p);
3813 }
3814
3815 if (bufp != NULL) {
3816 FREE(bufp, M_TEMP);
3817 }
3818
3819 if (inherit != NULL) {
3820 ipc_importance_release(inherit);
3821 }
3822
3823 return error;
3824 }
3825
3826 /*
3827 * proc_exec_switch_task
3828 *
3829 * Parameters: p proc
3830 * old_task task before exec
3831 * new_task task after exec
3832 * new_thread thread in new task
3833 *
3834 * Returns: proc.
3835 *
3836 * Note: The function will switch the task pointer of proc
3837 * from old task to new task. The switch needs to happen
3838 * after draining all proc refs and inside a proc translock.
3839 * In the case of failure to switch the task, which might happen
3840 * if the process received a SIGKILL or jetsam killed it, it will make
3841 * sure that the new tasks terminates. User proc ref returned
3842 * to caller.
3843 *
3844 * This function is called after point of no return, in the case
3845 * failure to switch, it will terminate the new task and swallow the
3846 * error and let the terminated process complete exec and die.
3847 */
3848 proc_t
3849 proc_exec_switch_task(proc_t p, task_t old_task, task_t new_task, thread_t new_thread)
3850 {
3851 int error = 0;
3852 boolean_t task_active;
3853 boolean_t proc_active;
3854 boolean_t thread_active;
3855 thread_t old_thread = current_thread();
3856
3857 /*
3858 * Switch the task pointer of proc to new task.
3859 * Before switching the task, wait for proc_refdrain.
3860 * After the switch happens, the proc can disappear,
3861 * take a ref before it disappears. Waiting for
3862 * proc_refdrain in exec will block all other threads
3863 * trying to take a proc ref, boost the current thread
3864 * to avoid priority inversion.
3865 */
3866 thread_set_exec_promotion(old_thread);
3867 p = proc_refdrain_with_refwait(p, TRUE);
3868 /* extra proc ref returned to the caller */
3869
3870 assert(get_threadtask(new_thread) == new_task);
3871 task_active = task_is_active(new_task);
3872
3873 /* Take the proc_translock to change the task ptr */
3874 proc_lock(p);
3875 proc_active = !(p->p_lflag & P_LEXIT);
3876
3877 /* Check if the current thread is not aborted due to SIGKILL */
3878 thread_active = thread_is_active(old_thread);
3879
3880 /*
3881 * Do not switch the task if the new task or proc is already terminated
3882 * as a result of error in exec past point of no return
3883 */
3884 if (proc_active && task_active && thread_active) {
3885 error = proc_transstart(p, 1, 0);
3886 if (error == 0) {
3887 uthread_t new_uthread = get_bsdthread_info(new_thread);
3888 uthread_t old_uthread = get_bsdthread_info(current_thread());
3889
3890 /*
3891 * bsd_info of old_task will get cleared in execve and posix_spawn
3892 * after firing exec-success/error dtrace probe.
3893 */
3894 p->task = new_task;
3895
3896 /* Clear dispatchqueue and workloop ast offset */
3897 p->p_dispatchqueue_offset = 0;
3898 p->p_dispatchqueue_serialno_offset = 0;
3899 p->p_dispatchqueue_label_offset = 0;
3900 p->p_return_to_kernel_offset = 0;
3901
3902 /* Copy the signal state, dtrace state and set bsd ast on new thread */
3903 act_set_astbsd(new_thread);
3904 new_uthread->uu_siglist = old_uthread->uu_siglist;
3905 new_uthread->uu_sigwait = old_uthread->uu_sigwait;
3906 new_uthread->uu_sigmask = old_uthread->uu_sigmask;
3907 new_uthread->uu_oldmask = old_uthread->uu_oldmask;
3908 new_uthread->uu_vforkmask = old_uthread->uu_vforkmask;
3909 new_uthread->uu_exit_reason = old_uthread->uu_exit_reason;
3910 #if CONFIG_DTRACE
3911 new_uthread->t_dtrace_sig = old_uthread->t_dtrace_sig;
3912 new_uthread->t_dtrace_stop = old_uthread->t_dtrace_stop;
3913 new_uthread->t_dtrace_resumepid = old_uthread->t_dtrace_resumepid;
3914 assert(new_uthread->t_dtrace_scratch == NULL);
3915 new_uthread->t_dtrace_scratch = old_uthread->t_dtrace_scratch;
3916
3917 old_uthread->t_dtrace_sig = 0;
3918 old_uthread->t_dtrace_stop = 0;
3919 old_uthread->t_dtrace_resumepid = 0;
3920 old_uthread->t_dtrace_scratch = NULL;
3921 #endif
3922 /* Copy the resource accounting info */
3923 thread_copy_resource_info(new_thread, current_thread());
3924
3925 /* Clear the exit reason and signal state on old thread */
3926 old_uthread->uu_exit_reason = NULL;
3927 old_uthread->uu_siglist = 0;
3928
3929 /* Add the new uthread to proc uthlist and remove the old one */
3930 TAILQ_INSERT_TAIL(&p->p_uthlist, new_uthread, uu_list);
3931 TAILQ_REMOVE(&p->p_uthlist, old_uthread, uu_list);
3932
3933 task_set_did_exec_flag(old_task);
3934 task_clear_exec_copy_flag(new_task);
3935
3936 task_copy_fields_for_exec(new_task, old_task);
3937
3938 proc_transend(p, 1);
3939 }
3940 }
3941
3942 proc_unlock(p);
3943 proc_refwake(p);
3944 thread_clear_exec_promotion(old_thread);
3945
3946 if (error != 0 || !task_active || !proc_active || !thread_active) {
3947 task_terminate_internal(new_task);
3948 }
3949
3950 return p;
3951 }
3952
3953 /*
3954 * execve
3955 *
3956 * Parameters: uap->fname File name to exec
3957 * uap->argp Argument list
3958 * uap->envp Environment list
3959 *
3960 * Returns: 0 Success
3961 * __mac_execve:EINVAL Invalid argument
3962 * __mac_execve:ENOTSUP Invalid argument
3963 * __mac_execve:EACCES Permission denied
3964 * __mac_execve:EINTR Interrupted function
3965 * __mac_execve:ENOMEM Not enough space
3966 * __mac_execve:EFAULT Bad address
3967 * __mac_execve:ENAMETOOLONG Filename too long
3968 * __mac_execve:ENOEXEC Executable file format error
3969 * __mac_execve:ETXTBSY Text file busy [misuse of error code]
3970 * __mac_execve:???
3971 *
3972 * TODO: Dynamic linker header address on stack is copied via suword()
3973 */
3974 /* ARGSUSED */
3975 int
3976 execve(proc_t p, struct execve_args *uap, int32_t *retval)
3977 {
3978 struct __mac_execve_args muap;
3979 int err;
3980
3981 memoryshot(VM_EXECVE, DBG_FUNC_NONE);
3982
3983 muap.fname = uap->fname;
3984 muap.argp = uap->argp;
3985 muap.envp = uap->envp;
3986 muap.mac_p = USER_ADDR_NULL;
3987 err = __mac_execve(p, &muap, retval);
3988
3989 return err;
3990 }
3991
3992 /*
3993 * __mac_execve
3994 *
3995 * Parameters: uap->fname File name to exec
3996 * uap->argp Argument list
3997 * uap->envp Environment list
3998 * uap->mac_p MAC label supplied by caller
3999 *
4000 * Returns: 0 Success
4001 * EINVAL Invalid argument
4002 * ENOTSUP Not supported
4003 * ENOEXEC Executable file format error
4004 * exec_activate_image:EINVAL Invalid argument
4005 * exec_activate_image:EACCES Permission denied
4006 * exec_activate_image:EINTR Interrupted function
4007 * exec_activate_image:ENOMEM Not enough space
4008 * exec_activate_image:EFAULT Bad address
4009 * exec_activate_image:ENAMETOOLONG Filename too long
4010 * exec_activate_image:ENOEXEC Executable file format error
4011 * exec_activate_image:ETXTBSY Text file busy [misuse of error code]
4012 * exec_activate_image:EBADEXEC The executable is corrupt/unknown
4013 * exec_activate_image:???
4014 * mac_execve_enter:???
4015 *
4016 * TODO: Dynamic linker header address on stack is copied via suword()
4017 */
4018 int
4019 __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval)
4020 {
4021 char *bufp = NULL;
4022 struct image_params *imgp;
4023 struct vnode_attr *vap;
4024 struct vnode_attr *origvap;
4025 int error;
4026 int is_64 = IS_64BIT_PROCESS(p);
4027 struct vfs_context context;
4028 struct uthread *uthread;
4029 task_t old_task = current_task();
4030 task_t new_task = NULL;
4031 boolean_t should_release_proc_ref = FALSE;
4032 boolean_t exec_done = FALSE;
4033 boolean_t in_vfexec = FALSE;
4034 void *inherit = NULL;
4035
4036 context.vc_thread = current_thread();
4037 context.vc_ucred = kauth_cred_proc_ref(p); /* XXX must NOT be kauth_cred_get() */
4038
4039 /* Allocate a big chunk for locals instead of using stack since these
4040 * structures a pretty big.
4041 */
4042 MALLOC(bufp, char *, (sizeof(*imgp) + sizeof(*vap) + sizeof(*origvap)), M_TEMP, M_WAITOK | M_ZERO);
4043 imgp = (struct image_params *) bufp;
4044 if (bufp == NULL) {
4045 error = ENOMEM;
4046 goto exit_with_error;
4047 }
4048 vap = (struct vnode_attr *) (bufp + sizeof(*imgp));
4049 origvap = (struct vnode_attr *) (bufp + sizeof(*imgp) + sizeof(*vap));
4050
4051 /* Initialize the common data in the image_params structure */
4052 imgp->ip_user_fname = uap->fname;
4053 imgp->ip_user_argv = uap->argp;
4054 imgp->ip_user_envv = uap->envp;
4055 imgp->ip_vattr = vap;
4056 imgp->ip_origvattr = origvap;
4057 imgp->ip_vfs_context = &context;
4058 imgp->ip_flags = (is_64 ? IMGPF_WAS_64BIT_ADDR : IMGPF_NONE) | ((p->p_flag & P_DISABLE_ASLR) ? IMGPF_DISABLE_ASLR : IMGPF_NONE);
4059 imgp->ip_seg = (is_64 ? UIO_USERSPACE64 : UIO_USERSPACE32);
4060 imgp->ip_mac_return = 0;
4061 imgp->ip_cs_error = OS_REASON_NULL;
4062 imgp->ip_simulator_binary = IMGPF_SB_DEFAULT;
4063
4064 #if CONFIG_MACF
4065 if (uap->mac_p != USER_ADDR_NULL) {
4066 error = mac_execve_enter(uap->mac_p, imgp);
4067 if (error) {
4068 kauth_cred_unref(&context.vc_ucred);
4069 goto exit_with_error;
4070 }
4071 }
4072 #endif
4073 uthread = get_bsdthread_info(current_thread());
4074 if (uthread->uu_flag & UT_VFORK) {
4075 imgp->ip_flags |= IMGPF_VFORK_EXEC;
4076 in_vfexec = TRUE;
4077 } else {
4078 imgp->ip_flags |= IMGPF_EXEC;
4079
4080 /*
4081 * For execve case, create a new task and thread
4082 * which points to current_proc. The current_proc will point
4083 * to the new task after image activation and proc ref drain.
4084 *
4085 * proc (current_proc) <----- old_task (current_task)
4086 * ^ | ^
4087 * | | |
4088 * | ----------------------------------
4089 * |
4090 * --------- new_task (task marked as TF_EXEC_COPY)
4091 *
4092 * After image activation, the proc will point to the new task
4093 * and would look like following.
4094 *
4095 * proc (current_proc) <----- old_task (current_task, marked as TPF_DID_EXEC)
4096 * ^ |
4097 * | |
4098 * | ----------> new_task
4099 * | |
4100 * -----------------
4101 *
4102 * During exec any transition from new_task -> proc is fine, but don't allow
4103 * transition from proc->task, since it will modify old_task.
4104 */
4105 imgp->ip_new_thread = fork_create_child(old_task,
4106 NULL,
4107 p,
4108 FALSE,
4109 p->p_flag & P_LP64,
4110 task_get_64bit_data(old_task),
4111 TRUE);
4112 /* task and thread ref returned by fork_create_child */
4113 if (imgp->ip_new_thread == NULL) {
4114 error = ENOMEM;
4115 goto exit_with_error;
4116 }
4117
4118 new_task = get_threadtask(imgp->ip_new_thread);
4119 context.vc_thread = imgp->ip_new_thread;
4120 }
4121
4122 error = exec_activate_image(imgp);
4123 /* thread and task ref returned for vfexec case */
4124
4125 if (imgp->ip_new_thread != NULL) {
4126 /*
4127 * task reference might be returned by exec_activate_image
4128 * for vfexec.
4129 */
4130 new_task = get_threadtask(imgp->ip_new_thread);
4131 #if defined(HAS_APPLE_PAC)
4132 ml_task_set_disable_user_jop(new_task, imgp->ip_flags & IMGPF_NOJOP ? TRUE : FALSE);
4133 ml_thread_set_disable_user_jop(imgp->ip_new_thread, imgp->ip_flags & IMGPF_NOJOP ? TRUE : FALSE);
4134 #endif
4135 }
4136
4137 if (!error && !in_vfexec) {
4138 p = proc_exec_switch_task(p, old_task, new_task, imgp->ip_new_thread);
4139 /* proc ref returned */
4140 should_release_proc_ref = TRUE;
4141
4142 /*
4143 * Need to transfer pending watch port boosts to the new task while still making
4144 * sure that the old task remains in the importance linkage. Create an importance
4145 * linkage from old task to new task, then switch the task importance base
4146 * of old task and new task. After the switch the port watch boost will be
4147 * boosting the new task and new task will be donating importance to old task.
4148 */
4149 inherit = ipc_importance_exec_switch_task(old_task, new_task);
4150 }
4151
4152 kauth_cred_unref(&context.vc_ucred);
4153
4154 /* Image not claimed by any activator? */
4155 if (error == -1) {
4156 error = ENOEXEC;
4157 }
4158
4159 if (!error) {
4160 exec_done = TRUE;
4161 assert(imgp->ip_new_thread != NULL);
4162
4163 exec_resettextvp(p, imgp);
4164 error = check_for_signature(p, imgp);
4165 }
4166
4167 /* flag exec has occurred, notify only if it has not failed due to FP Key error */
4168 if (exec_done && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0)) {
4169 proc_knote(p, NOTE_EXEC);
4170 }
4171
4172 if (imgp->ip_vp != NULLVP) {
4173 vnode_put(imgp->ip_vp);
4174 }
4175 if (imgp->ip_scriptvp != NULLVP) {
4176 vnode_put(imgp->ip_scriptvp);
4177 }
4178 if (imgp->ip_strings) {
4179 execargs_free(imgp);
4180 }
4181 #if CONFIG_MACF
4182 if (imgp->ip_execlabelp) {
4183 mac_cred_label_free(imgp->ip_execlabelp);
4184 }
4185 if (imgp->ip_scriptlabelp) {
4186 mac_vnode_label_free(imgp->ip_scriptlabelp);
4187 }
4188 #endif
4189 if (imgp->ip_cs_error != OS_REASON_NULL) {
4190 os_reason_free(imgp->ip_cs_error);
4191 imgp->ip_cs_error = OS_REASON_NULL;
4192 }
4193
4194 if (!error) {
4195 /*
4196 * We need to initialize the bank context behind the protection of
4197 * the proc_trans lock to prevent a race with exit. We can't do this during
4198 * exec_activate_image because task_bank_init checks entitlements that
4199 * aren't loaded until subsequent calls (including exec_resettextvp).
4200 */
4201 error = proc_transstart(p, 0, 0);
4202 }
4203
4204 if (!error) {
4205 task_bank_init(new_task);
4206 proc_transend(p, 0);
4207
4208 #if __arm64__
4209 proc_legacy_footprint_entitled(p, new_task, __FUNCTION__);
4210 #endif /* __arm64__ */
4211
4212 /* Sever any extant thread affinity */
4213 thread_affinity_exec(current_thread());
4214
4215 /* Inherit task role from old task to new task for exec */
4216 if (!in_vfexec) {
4217 proc_inherit_task_role(new_task, old_task);
4218 }
4219
4220 thread_t main_thread = imgp->ip_new_thread;
4221
4222 task_set_main_thread_qos(new_task, main_thread);
4223
4224 #if CONFIG_ARCADE
4225 /*
4226 * Check to see if we need to trigger an arcade upcall AST now
4227 * that the vnode has been reset on the task.
4228 */
4229 arcade_prepare(new_task, imgp->ip_new_thread);
4230 #endif /* CONFIG_ARCADE */
4231
4232 #if CONFIG_MACF
4233 /*
4234 * Processes with the MAP_JIT entitlement are permitted to have
4235 * a jumbo-size map.
4236 */
4237 if (mac_proc_check_map_anon(p, 0, 0, 0, MAP_JIT, NULL) == 0) {
4238 vm_map_set_jumbo(get_task_map(new_task));
4239 vm_map_set_jit_entitled(get_task_map(new_task));
4240 }
4241 #endif /* CONFIG_MACF */
4242
4243 if (vm_darkwake_mode == TRUE) {
4244 /*
4245 * This process is being launched when the system
4246 * is in darkwake. So mark it specially. This will
4247 * cause all its pages to be entered in the background Q.
4248 */
4249 task_set_darkwake_mode(new_task, vm_darkwake_mode);
4250 }
4251
4252 #if CONFIG_DTRACE
4253 dtrace_thread_didexec(imgp->ip_new_thread);
4254
4255 if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL) {
4256 (*dtrace_proc_waitfor_hook)(p);
4257 }
4258 #endif
4259
4260 #if CONFIG_AUDIT
4261 if (!error && AUDIT_ENABLED() && p) {
4262 /* Add the CDHash of the new process to the audit record */
4263 uint8_t *cdhash = cs_get_cdhash(p);
4264 if (cdhash) {
4265 AUDIT_ARG(data, cdhash, sizeof(uint8_t), CS_CDHASH_LEN);
4266 }
4267 }
4268 #endif
4269
4270 if (in_vfexec) {
4271 vfork_return(p, retval, p->p_pid);
4272 }
4273 } else {
4274 DTRACE_PROC1(exec__failure, int, error);
4275 }
4276
4277 exit_with_error:
4278
4279 /*
4280 * clear bsd_info from old task if it did exec.
4281 */
4282 if (task_did_exec(old_task)) {
4283 set_bsdtask_info(old_task, NULL);
4284 }
4285
4286 /* clear bsd_info from new task and terminate it if exec failed */
4287 if (new_task != NULL && task_is_exec_copy(new_task)) {
4288 set_bsdtask_info(new_task, NULL);
4289 task_terminate_internal(new_task);
4290 }
4291
4292 if (imgp != NULL) {
4293 /* Clear the initial wait on the thread transferring watchports */
4294 if (imgp->ip_new_thread) {
4295 task_clear_return_wait(get_threadtask(imgp->ip_new_thread), TCRW_CLEAR_INITIAL_WAIT);
4296 }
4297
4298 /* Transfer the watchport boost to new task */
4299 if (!error && !in_vfexec) {
4300 task_transfer_turnstile_watchports(old_task,
4301 new_task, imgp->ip_new_thread);
4302 }
4303 /*
4304 * Do not terminate the current task, if proc_exec_switch_task did not
4305 * switch the tasks, terminating the current task without the switch would
4306 * result in loosing the SIGKILL status.
4307 */
4308 if (task_did_exec(old_task)) {
4309 /* Terminate the current task, since exec will start in new task */
4310 task_terminate_internal(old_task);
4311 }
4312
4313 /* Release the thread ref returned by fork_create_child */
4314 if (imgp->ip_new_thread) {
4315 /* wake up the new exec thread */
4316 task_clear_return_wait(get_threadtask(imgp->ip_new_thread), TCRW_CLEAR_FINAL_WAIT);
4317 thread_deallocate(imgp->ip_new_thread);
4318 imgp->ip_new_thread = NULL;
4319 }
4320 }
4321
4322 /* Release the ref returned by fork_create_child */
4323 if (new_task) {
4324 task_deallocate(new_task);
4325 new_task = NULL;
4326 }
4327
4328 if (should_release_proc_ref) {
4329 proc_rele(p);
4330 }
4331
4332 if (bufp != NULL) {
4333 FREE(bufp, M_TEMP);
4334 }
4335
4336 if (inherit != NULL) {
4337 ipc_importance_release(inherit);
4338 }
4339
4340 return error;
4341 }
4342
4343
4344 /*
4345 * copyinptr
4346 *
4347 * Description: Copy a pointer in from user space to a user_addr_t in kernel
4348 * space, based on 32/64 bitness of the user space
4349 *
4350 * Parameters: froma User space address
4351 * toptr Address of kernel space user_addr_t
4352 * ptr_size 4/8, based on 'froma' address space
4353 *
4354 * Returns: 0 Success
4355 * EFAULT Bad 'froma'
4356 *
4357 * Implicit returns:
4358 * *ptr_size Modified
4359 */
4360 static int
4361 copyinptr(user_addr_t froma, user_addr_t *toptr, int ptr_size)
4362 {
4363 int error;
4364
4365 if (ptr_size == 4) {
4366 /* 64 bit value containing 32 bit address */
4367 unsigned int i = 0;
4368
4369 error = copyin(froma, &i, 4);
4370 *toptr = CAST_USER_ADDR_T(i); /* SAFE */
4371 } else {
4372 error = copyin(froma, toptr, 8);
4373 }
4374 return error;
4375 }
4376
4377
4378 /*
4379 * copyoutptr
4380 *
4381 * Description: Copy a pointer out from a user_addr_t in kernel space to
4382 * user space, based on 32/64 bitness of the user space
4383 *
4384 * Parameters: ua User space address to copy to
4385 * ptr Address of kernel space user_addr_t
4386 * ptr_size 4/8, based on 'ua' address space
4387 *
4388 * Returns: 0 Success
4389 * EFAULT Bad 'ua'
4390 *
4391 */
4392 static int
4393 copyoutptr(user_addr_t ua, user_addr_t ptr, int ptr_size)
4394 {
4395 int error;
4396
4397 if (ptr_size == 4) {
4398 /* 64 bit value containing 32 bit address */
4399 unsigned int i = CAST_DOWN_EXPLICIT(unsigned int, ua); /* SAFE */
4400
4401 error = copyout(&i, ptr, 4);
4402 } else {
4403 error = copyout(&ua, ptr, 8);
4404 }
4405 return error;
4406 }
4407
4408
4409 /*
4410 * exec_copyout_strings
4411 *
4412 * Copy out the strings segment to user space. The strings segment is put
4413 * on a preinitialized stack frame.
4414 *
4415 * Parameters: struct image_params * the image parameter block
4416 * int * a pointer to the stack offset variable
4417 *
4418 * Returns: 0 Success
4419 * !0 Faiure: errno
4420 *
4421 * Implicit returns:
4422 * (*stackp) The stack offset, modified
4423 *
4424 * Note: The strings segment layout is backward, from the beginning
4425 * of the top of the stack to consume the minimal amount of
4426 * space possible; the returned stack pointer points to the
4427 * end of the area consumed (stacks grow downward).
4428 *
4429 * argc is an int; arg[i] are pointers; env[i] are pointers;
4430 * the 0's are (void *)NULL's
4431 *
4432 * The stack frame layout is:
4433 *
4434 * +-------------+ <- p->user_stack
4435 * | 16b |
4436 * +-------------+
4437 * | STRING AREA |
4438 * | : |
4439 * | : |
4440 * | : |
4441 * +- -- -- -- --+
4442 * | PATH AREA |
4443 * +-------------+
4444 * | 0 |
4445 * +-------------+
4446 * | applev[n] |
4447 * +-------------+
4448 * :
4449 * :
4450 * +-------------+
4451 * | applev[1] |
4452 * +-------------+
4453 * | exec_path / |
4454 * | applev[0] |
4455 * +-------------+
4456 * | 0 |
4457 * +-------------+
4458 * | env[n] |
4459 * +-------------+
4460 * :
4461 * :
4462 * +-------------+
4463 * | env[0] |
4464 * +-------------+
4465 * | 0 |
4466 * +-------------+
4467 * | arg[argc-1] |
4468 * +-------------+
4469 * :
4470 * :
4471 * +-------------+
4472 * | arg[0] |
4473 * +-------------+
4474 * | argc |
4475 * sp-> +-------------+
4476 *
4477 * Although technically a part of the STRING AREA, we treat the PATH AREA as
4478 * a separate entity. This allows us to align the beginning of the PATH AREA
4479 * to a pointer boundary so that the exec_path, env[i], and argv[i] pointers
4480 * which preceed it on the stack are properly aligned.
4481 */
4482
4483 static int
4484 exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp)
4485 {
4486 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
4487 int ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4;
4488 int ptr_area_size;
4489 void *ptr_buffer_start, *ptr_buffer;
4490 int string_size;
4491
4492 user_addr_t string_area; /* *argv[], *env[] */
4493 user_addr_t ptr_area; /* argv[], env[], applev[] */
4494 user_addr_t argc_area; /* argc */
4495 user_addr_t stack;
4496 int error;
4497
4498 unsigned i;
4499 struct copyout_desc {
4500 char *start_string;
4501 int count;
4502 #if CONFIG_DTRACE
4503 user_addr_t *dtrace_cookie;
4504 #endif
4505 boolean_t null_term;
4506 } descriptors[] = {
4507 {
4508 .start_string = imgp->ip_startargv,
4509 .count = imgp->ip_argc,
4510 #if CONFIG_DTRACE
4511 .dtrace_cookie = &p->p_dtrace_argv,
4512 #endif
4513 .null_term = TRUE
4514 },
4515 {
4516 .start_string = imgp->ip_endargv,
4517 .count = imgp->ip_envc,
4518 #if CONFIG_DTRACE
4519 .dtrace_cookie = &p->p_dtrace_envp,
4520 #endif
4521 .null_term = TRUE
4522 },
4523 {
4524 .start_string = imgp->ip_strings,
4525 .count = 1,
4526 #if CONFIG_DTRACE
4527 .dtrace_cookie = NULL,
4528 #endif
4529 .null_term = FALSE
4530 },
4531 {
4532 .start_string = imgp->ip_endenvv,
4533 .count = imgp->ip_applec - 1, /* exec_path handled above */
4534 #if CONFIG_DTRACE
4535 .dtrace_cookie = NULL,
4536 #endif
4537 .null_term = TRUE
4538 }
4539 };
4540
4541 stack = *stackp;
4542
4543 /*
4544 * All previous contributors to the string area
4545 * should have aligned their sub-area
4546 */
4547 if (imgp->ip_strspace % ptr_size != 0) {
4548 error = EINVAL;
4549 goto bad;
4550 }
4551
4552 /* Grow the stack down for the strings we've been building up */
4553 string_size = imgp->ip_strendp - imgp->ip_strings;
4554 stack -= string_size;
4555 string_area = stack;
4556
4557 /*
4558 * Need room for one pointer for each string, plus
4559 * one for the NULLs terminating the argv, envv, and apple areas.
4560 */
4561 ptr_area_size = (imgp->ip_argc + imgp->ip_envc + imgp->ip_applec + 3) * ptr_size;
4562 stack -= ptr_area_size;
4563 ptr_area = stack;
4564
4565 /* We'll construct all the pointer arrays in our string buffer,
4566 * which we already know is aligned properly, and ip_argspace
4567 * was used to verify we have enough space.
4568 */
4569 ptr_buffer_start = ptr_buffer = (void *)imgp->ip_strendp;
4570
4571 /*
4572 * Need room for pointer-aligned argc slot.
4573 */
4574 stack -= ptr_size;
4575 argc_area = stack;
4576
4577 /*
4578 * Record the size of the arguments area so that sysctl_procargs()
4579 * can return the argument area without having to parse the arguments.
4580 */
4581 proc_lock(p);
4582 p->p_argc = imgp->ip_argc;
4583 p->p_argslen = (int)(*stackp - string_area);
4584 proc_unlock(p);
4585
4586 /* Return the initial stack address: the location of argc */
4587 *stackp = stack;
4588
4589 /*
4590 * Copy out the entire strings area.
4591 */
4592 error = copyout(imgp->ip_strings, string_area,
4593 string_size);
4594 if (error) {
4595 goto bad;
4596 }
4597
4598 for (i = 0; i < sizeof(descriptors) / sizeof(descriptors[0]); i++) {
4599 char *cur_string = descriptors[i].start_string;
4600 int j;
4601
4602 #if CONFIG_DTRACE
4603 if (descriptors[i].dtrace_cookie) {
4604 proc_lock(p);
4605 *descriptors[i].dtrace_cookie = ptr_area + ((uintptr_t)ptr_buffer - (uintptr_t)ptr_buffer_start); /* dtrace convenience */
4606 proc_unlock(p);
4607 }
4608 #endif /* CONFIG_DTRACE */
4609
4610 /*
4611 * For each segment (argv, envv, applev), copy as many pointers as requested
4612 * to our pointer buffer.
4613 */
4614 for (j = 0; j < descriptors[i].count; j++) {
4615 user_addr_t cur_address = string_area + (cur_string - imgp->ip_strings);
4616
4617 /* Copy out the pointer to the current string. Alignment has been verified */
4618 if (ptr_size == 8) {
4619 *(uint64_t *)ptr_buffer = (uint64_t)cur_address;
4620 } else {
4621 *(uint32_t *)ptr_buffer = (uint32_t)cur_address;
4622 }
4623
4624 ptr_buffer = (void *)((uintptr_t)ptr_buffer + ptr_size);
4625 cur_string += strlen(cur_string) + 1; /* Only a NUL between strings in the same area */
4626 }
4627
4628 if (descriptors[i].null_term) {
4629 if (ptr_size == 8) {
4630 *(uint64_t *)ptr_buffer = 0ULL;
4631 } else {
4632 *(uint32_t *)ptr_buffer = 0;
4633 }
4634
4635 ptr_buffer = (void *)((uintptr_t)ptr_buffer + ptr_size);
4636 }
4637 }
4638
4639 /*
4640 * Copy out all our pointer arrays in bulk.
4641 */
4642 error = copyout(ptr_buffer_start, ptr_area,
4643 ptr_area_size);
4644 if (error) {
4645 goto bad;
4646 }
4647
4648 /* argc (int32, stored in a ptr_size area) */
4649 error = copyoutptr((user_addr_t)imgp->ip_argc, argc_area, ptr_size);
4650 if (error) {
4651 goto bad;
4652 }
4653
4654 bad:
4655 return error;
4656 }
4657
4658
4659 /*
4660 * exec_extract_strings
4661 *
4662 * Copy arguments and environment from user space into work area; we may
4663 * have already copied some early arguments into the work area, and if
4664 * so, any arguments opied in are appended to those already there.
4665 * This function is the primary manipulator of ip_argspace, since
4666 * these are the arguments the client of execve(2) knows about. After
4667 * each argv[]/envv[] string is copied, we charge the string length
4668 * and argv[]/envv[] pointer slot to ip_argspace, so that we can
4669 * full preflight the arg list size.
4670 *
4671 * Parameters: struct image_params * the image parameter block
4672 *
4673 * Returns: 0 Success
4674 * !0 Failure: errno
4675 *
4676 * Implicit returns;
4677 * (imgp->ip_argc) Count of arguments, updated
4678 * (imgp->ip_envc) Count of environment strings, updated
4679 * (imgp->ip_argspace) Count of remaining of NCARGS
4680 * (imgp->ip_interp_buffer) Interpreter and args (mutated in place)
4681 *
4682 *
4683 * Note: The argument and environment vectors are user space pointers
4684 * to arrays of user space pointers.
4685 */
4686 static int
4687 exec_extract_strings(struct image_params *imgp)
4688 {
4689 int error = 0;
4690 int ptr_size = (imgp->ip_flags & IMGPF_WAS_64BIT_ADDR) ? 8 : 4;
4691 int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4;
4692 user_addr_t argv = imgp->ip_user_argv;
4693 user_addr_t envv = imgp->ip_user_envv;
4694
4695 /*
4696 * Adjust space reserved for the path name by however much padding it
4697 * needs. Doing this here since we didn't know if this would be a 32-
4698 * or 64-bit process back in exec_save_path.
4699 */
4700 while (imgp->ip_strspace % new_ptr_size != 0) {
4701 *imgp->ip_strendp++ = '\0';
4702 imgp->ip_strspace--;
4703 /* imgp->ip_argspace--; not counted towards exec args total */
4704 }
4705
4706 /*
4707 * From now on, we start attributing string space to ip_argspace
4708 */
4709 imgp->ip_startargv = imgp->ip_strendp;
4710 imgp->ip_argc = 0;
4711
4712 if ((imgp->ip_flags & IMGPF_INTERPRET) != 0) {
4713 user_addr_t arg;
4714 char *argstart, *ch;
4715
4716 /* First, the arguments in the "#!" string are tokenized and extracted. */
4717 argstart = imgp->ip_interp_buffer;
4718 while (argstart) {
4719 ch = argstart;
4720 while (*ch && !IS_WHITESPACE(*ch)) {
4721 ch++;
4722 }
4723
4724 if (*ch == '\0') {
4725 /* last argument, no need to NUL-terminate */
4726 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(argstart), UIO_SYSSPACE, TRUE);
4727 argstart = NULL;
4728 } else {
4729 /* NUL-terminate */
4730 *ch = '\0';
4731 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(argstart), UIO_SYSSPACE, TRUE);
4732
4733 /*
4734 * Find the next string. We know spaces at the end of the string have already
4735 * been stripped.
4736 */
4737 argstart = ch + 1;
4738 while (IS_WHITESPACE(*argstart)) {
4739 argstart++;
4740 }
4741 }
4742
4743 /* Error-check, regardless of whether this is the last interpreter arg or not */
4744 if (error) {
4745 goto bad;
4746 }
4747 if (imgp->ip_argspace < new_ptr_size) {
4748 error = E2BIG;
4749 goto bad;
4750 }
4751 imgp->ip_argspace -= new_ptr_size; /* to hold argv[] entry */
4752 imgp->ip_argc++;
4753 }
4754
4755 if (argv != 0LL) {
4756 /*
4757 * If we are running an interpreter, replace the av[0] that was
4758 * passed to execve() with the path name that was
4759 * passed to execve() for interpreters which do not use the PATH
4760 * to locate their script arguments.
4761 */
4762 error = copyinptr(argv, &arg, ptr_size);
4763 if (error) {
4764 goto bad;
4765 }
4766 if (arg != 0LL) {
4767 argv += ptr_size; /* consume without using */
4768 }
4769 }
4770
4771 if (imgp->ip_interp_sugid_fd != -1) {
4772 char temp[19]; /* "/dev/fd/" + 10 digits + NUL */
4773 snprintf(temp, sizeof(temp), "/dev/fd/%d", imgp->ip_interp_sugid_fd);
4774 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(temp), UIO_SYSSPACE, TRUE);
4775 } else {
4776 error = exec_add_user_string(imgp, imgp->ip_user_fname, imgp->ip_seg, TRUE);
4777 }
4778
4779 if (error) {
4780 goto bad;
4781 }
4782 if (imgp->ip_argspace < new_ptr_size) {
4783 error = E2BIG;
4784 goto bad;
4785 }
4786 imgp->ip_argspace -= new_ptr_size; /* to hold argv[] entry */
4787 imgp->ip_argc++;
4788 }
4789
4790 while (argv != 0LL) {
4791 user_addr_t arg;
4792
4793 error = copyinptr(argv, &arg, ptr_size);
4794 if (error) {
4795 goto bad;
4796 }
4797
4798 if (arg == 0LL) {
4799 break;
4800 }
4801
4802 argv += ptr_size;
4803
4804 /*
4805 * av[n...] = arg[n]
4806 */
4807 error = exec_add_user_string(imgp, arg, imgp->ip_seg, TRUE);
4808 if (error) {
4809 goto bad;
4810 }
4811 if (imgp->ip_argspace < new_ptr_size) {
4812 error = E2BIG;
4813 goto bad;
4814 }
4815 imgp->ip_argspace -= new_ptr_size; /* to hold argv[] entry */
4816 imgp->ip_argc++;
4817 }
4818
4819 /* Save space for argv[] NULL terminator */
4820 if (imgp->ip_argspace < new_ptr_size) {
4821 error = E2BIG;
4822 goto bad;
4823 }
4824 imgp->ip_argspace -= new_ptr_size;
4825
4826 /* Note where the args ends and env begins. */
4827 imgp->ip_endargv = imgp->ip_strendp;
4828 imgp->ip_envc = 0;
4829
4830 /* Now, get the environment */
4831 while (envv != 0LL) {
4832 user_addr_t env;
4833
4834 error = copyinptr(envv, &env, ptr_size);
4835 if (error) {
4836 goto bad;
4837 }
4838
4839 envv += ptr_size;
4840 if (env == 0LL) {
4841 break;
4842 }
4843 /*
4844 * av[n...] = env[n]
4845 */
4846 error = exec_add_user_string(imgp, env, imgp->ip_seg, TRUE);
4847 if (error) {
4848 goto bad;
4849 }
4850 if (imgp->ip_argspace < new_ptr_size) {
4851 error = E2BIG;
4852 goto bad;
4853 }
4854 imgp->ip_argspace -= new_ptr_size; /* to hold envv[] entry */
4855 imgp->ip_envc++;
4856 }
4857
4858 /* Save space for envv[] NULL terminator */
4859 if (imgp->ip_argspace < new_ptr_size) {
4860 error = E2BIG;
4861 goto bad;
4862 }
4863 imgp->ip_argspace -= new_ptr_size;
4864
4865 /* Align the tail of the combined argv+envv area */
4866 while (imgp->ip_strspace % new_ptr_size != 0) {
4867 if (imgp->ip_argspace < 1) {
4868 error = E2BIG;
4869 goto bad;
4870 }
4871 *imgp->ip_strendp++ = '\0';
4872 imgp->ip_strspace--;
4873 imgp->ip_argspace--;
4874 }
4875
4876 /* Note where the envv ends and applev begins. */
4877 imgp->ip_endenvv = imgp->ip_strendp;
4878
4879 /*
4880 * From now on, we are no longer charging argument
4881 * space to ip_argspace.
4882 */
4883
4884 bad:
4885 return error;
4886 }
4887
4888 /*
4889 * Libc has an 8-element array set up for stack guard values. It only fills
4890 * in one of those entries, and both gcc and llvm seem to use only a single
4891 * 8-byte guard. Until somebody needs more than an 8-byte guard value, don't
4892 * do the work to construct them.
4893 */
4894 #define GUARD_VALUES 1
4895 #define GUARD_KEY "stack_guard="
4896
4897 /*
4898 * System malloc needs some entropy when it is initialized.
4899 */
4900 #define ENTROPY_VALUES 2
4901 #define ENTROPY_KEY "malloc_entropy="
4902
4903 /*
4904 * libplatform needs a random pointer-obfuscation value when it is initialized.
4905 */
4906 #define PTR_MUNGE_VALUES 1
4907 #define PTR_MUNGE_KEY "ptr_munge="
4908
4909 /*
4910 * System malloc engages nanozone for UIAPP.
4911 */
4912 #define NANO_ENGAGE_KEY "MallocNanoZone=1"
4913
4914 #define PFZ_KEY "pfz="
4915 extern user32_addr_t commpage_text32_location;
4916 extern user64_addr_t commpage_text64_location;
4917
4918 #define MAIN_STACK_VALUES 4
4919 #define MAIN_STACK_KEY "main_stack="
4920
4921 #define FSID_KEY "executable_file="
4922 #define DYLD_FSID_KEY "dyld_file="
4923 #define CDHASH_KEY "executable_cdhash="
4924 #define DYLD_FLAGS_KEY "dyld_flags="
4925
4926 #define FSID_MAX_STRING "0x1234567890abcdef,0x1234567890abcdef"
4927
4928 #define HEX_STR_LEN 18 // 64-bit hex value "0x0123456701234567"
4929
4930 static int
4931 exec_add_entropy_key(struct image_params *imgp,
4932 const char *key,
4933 int values,
4934 boolean_t embedNUL)
4935 {
4936 const int limit = 8;
4937 uint64_t entropy[limit];
4938 char str[strlen(key) + (HEX_STR_LEN + 1) * limit + 1];
4939 if (values > limit) {
4940 values = limit;
4941 }
4942
4943 read_random(entropy, sizeof(entropy[0]) * values);
4944
4945 if (embedNUL) {
4946 entropy[0] &= ~(0xffull << 8);
4947 }
4948
4949 int len = snprintf(str, sizeof(str), "%s0x%llx", key, entropy[0]);
4950 int remaining = sizeof(str) - len;
4951 for (int i = 1; i < values && remaining > 0; ++i) {
4952 int start = sizeof(str) - remaining;
4953 len = snprintf(&str[start], remaining, ",0x%llx", entropy[i]);
4954 remaining -= len;
4955 }
4956
4957 return exec_add_user_string(imgp, CAST_USER_ADDR_T(str), UIO_SYSSPACE, FALSE);
4958 }
4959
4960 /*
4961 * Build up the contents of the apple[] string vector
4962 */
4963 #if (DEVELOPMENT || DEBUG)
4964 uint64_t dyld_flags = 0;
4965 #endif
4966
4967 static int
4968 exec_add_apple_strings(struct image_params *imgp,
4969 const load_result_t *load_result)
4970 {
4971 int error;
4972 int img_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4;
4973
4974 /* exec_save_path stored the first string */
4975 imgp->ip_applec = 1;
4976
4977 /* adding the pfz string */
4978 {
4979 char pfz_string[strlen(PFZ_KEY) + HEX_STR_LEN + 1];
4980
4981 if (img_ptr_size == 8) {
4982 snprintf(pfz_string, sizeof(pfz_string), PFZ_KEY "0x%llx", commpage_text64_location);
4983 } else {
4984 snprintf(pfz_string, sizeof(pfz_string), PFZ_KEY "0x%x", commpage_text32_location);
4985 }
4986 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(pfz_string), UIO_SYSSPACE, FALSE);
4987 if (error) {
4988 goto bad;
4989 }
4990 imgp->ip_applec++;
4991 }
4992
4993 /* adding the NANO_ENGAGE_KEY key */
4994 if (imgp->ip_px_sa) {
4995 int proc_flags = (((struct _posix_spawnattr *) imgp->ip_px_sa)->psa_flags);
4996
4997 if ((proc_flags & _POSIX_SPAWN_NANO_ALLOCATOR) == _POSIX_SPAWN_NANO_ALLOCATOR) {
4998 const char *nano_string = NANO_ENGAGE_KEY;
4999 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(nano_string), UIO_SYSSPACE, FALSE);
5000 if (error) {
5001 goto bad;
5002 }
5003 imgp->ip_applec++;
5004 }
5005 }
5006
5007 /*
5008 * Supply libc with a collection of random values to use when
5009 * implementing -fstack-protector.
5010 *
5011 * (The first random string always contains an embedded NUL so that
5012 * __stack_chk_guard also protects against C string vulnerabilities)
5013 */
5014 error = exec_add_entropy_key(imgp, GUARD_KEY, GUARD_VALUES, TRUE);
5015 if (error) {
5016 goto bad;
5017 }
5018 imgp->ip_applec++;
5019
5020 /*
5021 * Supply libc with entropy for system malloc.
5022 */
5023 error = exec_add_entropy_key(imgp, ENTROPY_KEY, ENTROPY_VALUES, FALSE);
5024 if (error) {
5025 goto bad;
5026 }
5027 imgp->ip_applec++;
5028
5029 /*
5030 * Supply libpthread & libplatform with a random value to use for pointer
5031 * obfuscation.
5032 */
5033 error = exec_add_entropy_key(imgp, PTR_MUNGE_KEY, PTR_MUNGE_VALUES, FALSE);
5034 if (error) {
5035 goto bad;
5036 }
5037 imgp->ip_applec++;
5038
5039 /*
5040 * Add MAIN_STACK_KEY: Supplies the address and size of the main thread's
5041 * stack if it was allocated by the kernel.
5042 *
5043 * The guard page is not included in this stack size as libpthread
5044 * expects to add it back in after receiving this value.
5045 */
5046 if (load_result->unixproc) {
5047 char stack_string[strlen(MAIN_STACK_KEY) + (HEX_STR_LEN + 1) * MAIN_STACK_VALUES + 1];
5048 snprintf(stack_string, sizeof(stack_string),
5049 MAIN_STACK_KEY "0x%llx,0x%llx,0x%llx,0x%llx",
5050 (uint64_t)load_result->user_stack,
5051 (uint64_t)load_result->user_stack_size,
5052 (uint64_t)load_result->user_stack_alloc,
5053 (uint64_t)load_result->user_stack_alloc_size);
5054 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(stack_string), UIO_SYSSPACE, FALSE);
5055 if (error) {
5056 goto bad;
5057 }
5058 imgp->ip_applec++;
5059 }
5060
5061 if (imgp->ip_vattr) {
5062 uint64_t fsid = vnode_get_va_fsid(imgp->ip_vattr);
5063 uint64_t fsobjid = imgp->ip_vattr->va_fileid;
5064
5065 char fsid_string[strlen(FSID_KEY) + strlen(FSID_MAX_STRING) + 1];
5066 snprintf(fsid_string, sizeof(fsid_string),
5067 FSID_KEY "0x%llx,0x%llx", fsid, fsobjid);
5068 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(fsid_string), UIO_SYSSPACE, FALSE);
5069 if (error) {
5070 goto bad;
5071 }
5072 imgp->ip_applec++;
5073 }
5074
5075 if (imgp->ip_dyld_fsid || imgp->ip_dyld_fsobjid) {
5076 char fsid_string[strlen(DYLD_FSID_KEY) + strlen(FSID_MAX_STRING) + 1];
5077 snprintf(fsid_string, sizeof(fsid_string),
5078 DYLD_FSID_KEY "0x%llx,0x%llx", imgp->ip_dyld_fsid, imgp->ip_dyld_fsobjid);
5079 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(fsid_string), UIO_SYSSPACE, FALSE);
5080 if (error) {
5081 goto bad;
5082 }
5083 imgp->ip_applec++;
5084 }
5085
5086 uint8_t cdhash[SHA1_RESULTLEN];
5087 int cdhash_errror = ubc_cs_getcdhash(imgp->ip_vp, imgp->ip_arch_offset, cdhash);
5088 if (cdhash_errror == 0) {
5089 char hash_string[strlen(CDHASH_KEY) + 2 * SHA1_RESULTLEN + 1];
5090 strncpy(hash_string, CDHASH_KEY, sizeof(hash_string));
5091 char *p = hash_string + sizeof(CDHASH_KEY) - 1;
5092 for (int i = 0; i < SHA1_RESULTLEN; i++) {
5093 snprintf(p, 3, "%02x", (int) cdhash[i]);
5094 p += 2;
5095 }
5096 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(hash_string), UIO_SYSSPACE, FALSE);
5097 if (error) {
5098 goto bad;
5099 }
5100 imgp->ip_applec++;
5101 }
5102 #if (DEVELOPMENT || DEBUG)
5103 if (dyld_flags) {
5104 char dyld_flags_string[strlen(DYLD_FLAGS_KEY) + HEX_STR_LEN + 1];
5105 snprintf(dyld_flags_string, sizeof(dyld_flags_string), DYLD_FLAGS_KEY "0x%llx", dyld_flags);
5106 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(dyld_flags_string), UIO_SYSSPACE, FALSE);
5107 if (error) {
5108 goto bad;
5109 }
5110 imgp->ip_applec++;
5111 }
5112 #endif
5113
5114 /* Align the tail of the combined applev area */
5115 while (imgp->ip_strspace % img_ptr_size != 0) {
5116 *imgp->ip_strendp++ = '\0';
5117 imgp->ip_strspace--;
5118 }
5119
5120 bad:
5121 return error;
5122 }
5123
5124 #define unix_stack_size(p) (p->p_rlimit[RLIMIT_STACK].rlim_cur)
5125
5126 /*
5127 * exec_check_permissions
5128 *
5129 * Description: Verify that the file that is being attempted to be executed
5130 * is in fact allowed to be executed based on it POSIX file
5131 * permissions and other access control criteria
5132 *
5133 * Parameters: struct image_params * the image parameter block
5134 *
5135 * Returns: 0 Success
5136 * EACCES Permission denied
5137 * ENOEXEC Executable file format error
5138 * ETXTBSY Text file busy [misuse of error code]
5139 * vnode_getattr:???
5140 * vnode_authorize:???
5141 */
5142 static int
5143 exec_check_permissions(struct image_params *imgp)
5144 {
5145 struct vnode *vp = imgp->ip_vp;
5146 struct vnode_attr *vap = imgp->ip_vattr;
5147 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
5148 int error;
5149 kauth_action_t action;
5150
5151 /* Only allow execution of regular files */
5152 if (!vnode_isreg(vp)) {
5153 return EACCES;
5154 }
5155
5156 /* Get the file attributes that we will be using here and elsewhere */
5157 VATTR_INIT(vap);
5158 VATTR_WANTED(vap, va_uid);
5159 VATTR_WANTED(vap, va_gid);
5160 VATTR_WANTED(vap, va_mode);
5161 VATTR_WANTED(vap, va_fsid);
5162 VATTR_WANTED(vap, va_fsid64);
5163 VATTR_WANTED(vap, va_fileid);
5164 VATTR_WANTED(vap, va_data_size);
5165 if ((error = vnode_getattr(vp, vap, imgp->ip_vfs_context)) != 0) {
5166 return error;
5167 }
5168
5169 /*
5170 * Ensure that at least one execute bit is on - otherwise root
5171 * will always succeed, and we don't want to happen unless the
5172 * file really is executable.
5173 */
5174 if (!vfs_authopaque(vnode_mount(vp)) && ((vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0)) {
5175 return EACCES;
5176 }
5177
5178 /* Disallow zero length files */
5179 if (vap->va_data_size == 0) {
5180 return ENOEXEC;
5181 }
5182
5183 imgp->ip_arch_offset = (user_size_t)0;
5184 imgp->ip_arch_size = vap->va_data_size;
5185
5186 /* Disable setuid-ness for traced programs or if MNT_NOSUID */
5187 if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_lflag & P_LTRACED)) {
5188 vap->va_mode &= ~(VSUID | VSGID);
5189 }
5190
5191 /*
5192 * Disable _POSIX_SPAWN_ALLOW_DATA_EXEC and _POSIX_SPAWN_DISABLE_ASLR
5193 * flags for setuid/setgid binaries.
5194 */
5195 if (vap->va_mode & (VSUID | VSGID)) {
5196 imgp->ip_flags &= ~(IMGPF_ALLOW_DATA_EXEC | IMGPF_DISABLE_ASLR);
5197 }
5198
5199 #if CONFIG_MACF
5200 error = mac_vnode_check_exec(imgp->ip_vfs_context, vp, imgp);
5201 if (error) {
5202 return error;
5203 }
5204 #endif
5205
5206 /* Check for execute permission */
5207 action = KAUTH_VNODE_EXECUTE;
5208 /* Traced images must also be readable */
5209 if (p->p_lflag & P_LTRACED) {
5210 action |= KAUTH_VNODE_READ_DATA;
5211 }
5212 if ((error = vnode_authorize(vp, NULL, action, imgp->ip_vfs_context)) != 0) {
5213 return error;
5214 }
5215
5216 #if 0
5217 /* Don't let it run if anyone had it open for writing */
5218 vnode_lock(vp);
5219 if (vp->v_writecount) {
5220 panic("going to return ETXTBSY %x", vp);
5221 vnode_unlock(vp);
5222 return ETXTBSY;
5223 }
5224 vnode_unlock(vp);
5225 #endif
5226
5227
5228 /* XXX May want to indicate to underlying FS that vnode is open */
5229
5230 return error;
5231 }
5232
5233
5234 /*
5235 * exec_handle_sugid
5236 *
5237 * Initially clear the P_SUGID in the process flags; if an SUGID process is
5238 * exec'ing a non-SUGID image, then this is the point of no return.
5239 *
5240 * If the image being activated is SUGID, then replace the credential with a
5241 * copy, disable tracing (unless the tracing process is root), reset the
5242 * mach task port to revoke it, set the P_SUGID bit,
5243 *
5244 * If the saved user and group ID will be changing, then make sure it happens
5245 * to a new credential, rather than a shared one.
5246 *
5247 * Set the security token (this is probably obsolete, given that the token
5248 * should not technically be separate from the credential itself).
5249 *
5250 * Parameters: struct image_params * the image parameter block
5251 *
5252 * Returns: void No failure indication
5253 *
5254 * Implicit returns:
5255 * <process credential> Potentially modified/replaced
5256 * <task port> Potentially revoked
5257 * <process flags> P_SUGID bit potentially modified
5258 * <security token> Potentially modified
5259 */
5260 static int
5261 exec_handle_sugid(struct image_params *imgp)
5262 {
5263 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
5264 kauth_cred_t cred = vfs_context_ucred(imgp->ip_vfs_context);
5265 int i;
5266 int leave_sugid_clear = 0;
5267 int mac_reset_ipc = 0;
5268 int error = 0;
5269 task_t task = NULL;
5270 #if CONFIG_MACF
5271 int mac_transition, disjoint_cred = 0;
5272 int label_update_return = 0;
5273
5274 /*
5275 * Determine whether a call to update the MAC label will result in the
5276 * credential changing.
5277 *
5278 * Note: MAC policies which do not actually end up modifying
5279 * the label subsequently are strongly encouraged to
5280 * return 0 for this check, since a non-zero answer will
5281 * slow down the exec fast path for normal binaries.
5282 */
5283 mac_transition = mac_cred_check_label_update_execve(
5284 imgp->ip_vfs_context,
5285 imgp->ip_vp,
5286 imgp->ip_arch_offset,
5287 imgp->ip_scriptvp,
5288 imgp->ip_scriptlabelp,
5289 imgp->ip_execlabelp,
5290 p,
5291 imgp->ip_px_smpx);
5292 #endif
5293
5294 OSBitAndAtomic(~((uint32_t)P_SUGID), &p->p_flag);
5295
5296 /*
5297 * Order of the following is important; group checks must go last,
5298 * as we use the success of the 'ismember' check combined with the
5299 * failure of the explicit match to indicate that we will be setting
5300 * the egid of the process even though the new process did not
5301 * require VSUID/VSGID bits in order for it to set the new group as
5302 * its egid.
5303 *
5304 * Note: Technically, by this we are implying a call to
5305 * setegid() in the new process, rather than implying
5306 * it used its VSGID bit to set the effective group,
5307 * even though there is no code in that process to make
5308 * such a call.
5309 */
5310 if (((imgp->ip_origvattr->va_mode & VSUID) != 0 &&
5311 kauth_cred_getuid(cred) != imgp->ip_origvattr->va_uid) ||
5312 ((imgp->ip_origvattr->va_mode & VSGID) != 0 &&
5313 ((kauth_cred_ismember_gid(cred, imgp->ip_origvattr->va_gid, &leave_sugid_clear) || !leave_sugid_clear) ||
5314 (kauth_cred_getgid(cred) != imgp->ip_origvattr->va_gid)))) {
5315 #if CONFIG_MACF
5316 /* label for MAC transition and neither VSUID nor VSGID */
5317 handle_mac_transition:
5318 #endif
5319
5320 #if !SECURE_KERNEL
5321 /*
5322 * Replace the credential with a copy of itself if euid or
5323 * egid change.
5324 *
5325 * Note: setuid binaries will automatically opt out of
5326 * group resolver participation as a side effect
5327 * of this operation. This is an intentional
5328 * part of the security model, which requires a
5329 * participating credential be established by
5330 * escalating privilege, setting up all other
5331 * aspects of the credential including whether
5332 * or not to participate in external group
5333 * membership resolution, then dropping their
5334 * effective privilege to that of the desired
5335 * final credential state.
5336 *
5337 * Modifications to p_ucred must be guarded using the
5338 * proc's ucred lock. This prevents others from accessing
5339 * a garbage credential.
5340 */
5341 if (imgp->ip_origvattr->va_mode & VSUID) {
5342 apply_kauth_cred_update(p, ^kauth_cred_t (kauth_cred_t my_cred) {
5343 return kauth_cred_setresuid(my_cred,
5344 KAUTH_UID_NONE,
5345 imgp->ip_origvattr->va_uid,
5346 imgp->ip_origvattr->va_uid,
5347 KAUTH_UID_NONE);
5348 });
5349 }
5350
5351 if (imgp->ip_origvattr->va_mode & VSGID) {
5352 apply_kauth_cred_update(p, ^kauth_cred_t (kauth_cred_t my_cred) {
5353 return kauth_cred_setresgid(my_cred,
5354 KAUTH_GID_NONE,
5355 imgp->ip_origvattr->va_gid,
5356 imgp->ip_origvattr->va_gid);
5357 });
5358 }
5359 #endif /* !SECURE_KERNEL */
5360
5361 #if CONFIG_MACF
5362 /*
5363 * If a policy has indicated that it will transition the label,
5364 * before making the call into the MAC policies, get a new
5365 * duplicate credential, so they can modify it without
5366 * modifying any others sharing it.
5367 */
5368 if (mac_transition) {
5369 /*
5370 * This hook may generate upcalls that require
5371 * importance donation from the kernel.
5372 * (23925818)
5373 */
5374 thread_t thread = current_thread();
5375 thread_enable_send_importance(thread, TRUE);
5376 kauth_proc_label_update_execve(p,
5377 imgp->ip_vfs_context,
5378 imgp->ip_vp,
5379 imgp->ip_arch_offset,
5380 imgp->ip_scriptvp,
5381 imgp->ip_scriptlabelp,
5382 imgp->ip_execlabelp,
5383 &imgp->ip_csflags,
5384 imgp->ip_px_smpx,
5385 &disjoint_cred, /* will be non zero if disjoint */
5386 &label_update_return);
5387 thread_enable_send_importance(thread, FALSE);
5388
5389 if (disjoint_cred) {
5390 /*
5391 * If updating the MAC label resulted in a
5392 * disjoint credential, flag that we need to
5393 * set the P_SUGID bit. This protects
5394 * against debuggers being attached by an
5395 * insufficiently privileged process onto the
5396 * result of a transition to a more privileged
5397 * credential.
5398 */
5399 leave_sugid_clear = 0;
5400 }
5401
5402 imgp->ip_mac_return = label_update_return;
5403 }
5404
5405 mac_reset_ipc = mac_proc_check_inherit_ipc_ports(p, p->p_textvp, p->p_textoff, imgp->ip_vp, imgp->ip_arch_offset, imgp->ip_scriptvp);
5406
5407 #endif /* CONFIG_MACF */
5408
5409 /*
5410 * If 'leave_sugid_clear' is non-zero, then we passed the
5411 * VSUID and MACF checks, and successfully determined that
5412 * the previous cred was a member of the VSGID group, but
5413 * that it was not the default at the time of the execve,
5414 * and that the post-labelling credential was not disjoint.
5415 * So we don't set the P_SUGID or reset mach ports and fds
5416 * on the basis of simply running this code.
5417 */
5418 if (mac_reset_ipc || !leave_sugid_clear) {
5419 /*
5420 * Have mach reset the task and thread ports.
5421 * We don't want anyone who had the ports before
5422 * a setuid exec to be able to access/control the
5423 * task/thread after.
5424 */
5425 ipc_task_reset((imgp->ip_new_thread != NULL) ?
5426 get_threadtask(imgp->ip_new_thread) : p->task);
5427 ipc_thread_reset((imgp->ip_new_thread != NULL) ?
5428 imgp->ip_new_thread : current_thread());
5429 }
5430
5431 if (!leave_sugid_clear) {
5432 /*
5433 * Flag the process as setuid.
5434 */
5435 OSBitOrAtomic(P_SUGID, &p->p_flag);
5436
5437 /*
5438 * Radar 2261856; setuid security hole fix
5439 * XXX For setuid processes, attempt to ensure that
5440 * stdin, stdout, and stderr are already allocated.
5441 * We do not want userland to accidentally allocate
5442 * descriptors in this range which has implied meaning
5443 * to libc.
5444 */
5445 for (i = 0; i < 3; i++) {
5446 if (p->p_fd->fd_ofiles[i] != NULL) {
5447 continue;
5448 }
5449
5450 /*
5451 * Do the kernel equivalent of
5452 *
5453 * if i == 0
5454 * (void) open("/dev/null", O_RDONLY);
5455 * else
5456 * (void) open("/dev/null", O_WRONLY);
5457 */
5458
5459 struct fileproc *fp;
5460 int indx;
5461 int flag;
5462 struct nameidata *ndp = NULL;
5463
5464 if (i == 0) {
5465 flag = FREAD;
5466 } else {
5467 flag = FWRITE;
5468 }
5469
5470 if ((error = falloc(p,
5471 &fp, &indx, imgp->ip_vfs_context)) != 0) {
5472 continue;
5473 }
5474
5475 MALLOC(ndp, struct nameidata *, sizeof(*ndp), M_TEMP, M_WAITOK | M_ZERO);
5476 if (ndp == NULL) {
5477 fp_free(p, indx, fp);
5478 error = ENOMEM;
5479 break;
5480 }
5481
5482 NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW, UIO_SYSSPACE,
5483 CAST_USER_ADDR_T("/dev/null"),
5484 imgp->ip_vfs_context);
5485
5486 if ((error = vn_open(ndp, flag, 0)) != 0) {
5487 fp_free(p, indx, fp);
5488 FREE(ndp, M_TEMP);
5489 break;
5490 }
5491
5492 struct fileglob *fg = fp->f_fglob;
5493
5494 fg->fg_flag = flag;
5495 fg->fg_ops = &vnops;
5496 fg->fg_data = ndp->ni_vp;
5497
5498 vnode_put(ndp->ni_vp);
5499
5500 proc_fdlock(p);
5501 procfdtbl_releasefd(p, indx, NULL);
5502 fp_drop(p, indx, fp, 1);
5503 proc_fdunlock(p);
5504
5505 FREE(ndp, M_TEMP);
5506 }
5507 }
5508 }
5509 #if CONFIG_MACF
5510 else {
5511 /*
5512 * We are here because we were told that the MAC label will
5513 * be transitioned, and the binary is not VSUID or VSGID; to
5514 * deal with this case, we could either duplicate a lot of
5515 * code, or we can indicate we want to default the P_SUGID
5516 * bit clear and jump back up.
5517 */
5518 if (mac_transition) {
5519 leave_sugid_clear = 1;
5520 goto handle_mac_transition;
5521 }
5522 }
5523
5524 #endif /* CONFIG_MACF */
5525
5526 /*
5527 * Implement the semantic where the effective user and group become
5528 * the saved user and group in exec'ed programs.
5529 *
5530 * Modifications to p_ucred must be guarded using the
5531 * proc's ucred lock. This prevents others from accessing
5532 * a garbage credential.
5533 */
5534 apply_kauth_cred_update(p, ^kauth_cred_t (kauth_cred_t my_cred) {
5535 return kauth_cred_setsvuidgid(my_cred,
5536 kauth_cred_getuid(my_cred),
5537 kauth_cred_getgid(my_cred));
5538 });
5539
5540 /* Update the process' identity version and set the security token */
5541 p->p_idversion = OSIncrementAtomic(&nextpidversion);
5542
5543 if (imgp->ip_new_thread != NULL) {
5544 task = get_threadtask(imgp->ip_new_thread);
5545 } else {
5546 task = p->task;
5547 }
5548 set_security_token_task_internal(p, task);
5549
5550 return error;
5551 }
5552
5553
5554 /*
5555 * create_unix_stack
5556 *
5557 * Description: Set the user stack address for the process to the provided
5558 * address. If a custom stack was not set as a result of the
5559 * load process (i.e. as specified by the image file for the
5560 * executable), then allocate the stack in the provided map and
5561 * set up appropriate guard pages for enforcing administrative
5562 * limits on stack growth, if they end up being needed.
5563 *
5564 * Parameters: p Process to set stack on
5565 * load_result Information from mach-o load commands
5566 * map Address map in which to allocate the new stack
5567 *
5568 * Returns: KERN_SUCCESS Stack successfully created
5569 * !KERN_SUCCESS Mach failure code
5570 */
5571 static kern_return_t
5572 create_unix_stack(vm_map_t map, load_result_t* load_result,
5573 proc_t p)
5574 {
5575 mach_vm_size_t size, prot_size;
5576 mach_vm_offset_t addr, prot_addr;
5577 kern_return_t kr;
5578
5579 mach_vm_address_t user_stack = load_result->user_stack;
5580
5581 proc_lock(p);
5582 p->user_stack = user_stack;
5583 if (load_result->custom_stack) {
5584 p->p_lflag |= P_LCUSTOM_STACK;
5585 }
5586 proc_unlock(p);
5587
5588 if (load_result->user_stack_alloc_size > 0) {
5589 /*
5590 * Allocate enough space for the maximum stack size we
5591 * will ever authorize and an extra page to act as
5592 * a guard page for stack overflows. For default stacks,
5593 * vm_initial_limit_stack takes care of the extra guard page.
5594 * Otherwise we must allocate it ourselves.
5595 */
5596 if (mach_vm_round_page_overflow(load_result->user_stack_alloc_size, &size)) {
5597 return KERN_INVALID_ARGUMENT;
5598 }
5599 addr = mach_vm_trunc_page(load_result->user_stack - size);
5600 kr = mach_vm_allocate_kernel(map, &addr, size,
5601 VM_FLAGS_FIXED, VM_MEMORY_STACK);
5602 if (kr != KERN_SUCCESS) {
5603 // Can't allocate at default location, try anywhere
5604 addr = 0;
5605 kr = mach_vm_allocate_kernel(map, &addr, size,
5606 VM_FLAGS_ANYWHERE, VM_MEMORY_STACK);
5607 if (kr != KERN_SUCCESS) {
5608 return kr;
5609 }
5610
5611 user_stack = addr + size;
5612 load_result->user_stack = user_stack;
5613
5614 proc_lock(p);
5615 p->user_stack = user_stack;
5616 proc_unlock(p);
5617 }
5618
5619 load_result->user_stack_alloc = addr;
5620
5621 /*
5622 * And prevent access to what's above the current stack
5623 * size limit for this process.
5624 */
5625 if (load_result->user_stack_size == 0) {
5626 proc_list_lock();
5627 load_result->user_stack_size = unix_stack_size(p);
5628 proc_list_unlock();
5629 prot_size = mach_vm_trunc_page(size - load_result->user_stack_size);
5630 } else {
5631 prot_size = PAGE_SIZE;
5632 }
5633
5634 prot_addr = addr;
5635 kr = mach_vm_protect(map,
5636 prot_addr,
5637 prot_size,
5638 FALSE,
5639 VM_PROT_NONE);
5640 if (kr != KERN_SUCCESS) {
5641 (void)mach_vm_deallocate(map, addr, size);
5642 return kr;
5643 }
5644 }
5645
5646 return KERN_SUCCESS;
5647 }
5648
5649 #include <sys/reboot.h>
5650
5651 /*
5652 * load_init_program_at_path
5653 *
5654 * Description: Load the "init" program; in most cases, this will be "launchd"
5655 *
5656 * Parameters: p Process to call execve() to create
5657 * the "init" program
5658 * scratch_addr Page in p, scratch space
5659 * path NULL terminated path
5660 *
5661 * Returns: KERN_SUCCESS Success
5662 * !KERN_SUCCESS See execve/mac_execve for error codes
5663 *
5664 * Notes: The process that is passed in is the first manufactured
5665 * process on the system, and gets here via bsd_ast() firing
5666 * for the first time. This is done to ensure that bsd_init()
5667 * has run to completion.
5668 *
5669 * The address map of the first manufactured process matches the
5670 * word width of the kernel. Once the self-exec completes, the
5671 * initproc might be different.
5672 */
5673 static int
5674 load_init_program_at_path(proc_t p, user_addr_t scratch_addr, const char* path)
5675 {
5676 int retval[2];
5677 int error;
5678 struct execve_args init_exec_args;
5679 user_addr_t argv0 = USER_ADDR_NULL, argv1 = USER_ADDR_NULL;
5680
5681 /*
5682 * Validate inputs and pre-conditions
5683 */
5684 assert(p);
5685 assert(scratch_addr);
5686 assert(path);
5687
5688 /*
5689 * Copy out program name.
5690 */
5691 size_t path_length = strlen(path) + 1;
5692 argv0 = scratch_addr;
5693 error = copyout(path, argv0, path_length);
5694 if (error) {
5695 return error;
5696 }
5697
5698 scratch_addr = USER_ADDR_ALIGN(scratch_addr + path_length, sizeof(user_addr_t));
5699
5700 /*
5701 * Put out first (and only) argument, similarly.
5702 * Assumes everything fits in a page as allocated above.
5703 */
5704 if (boothowto & RB_SINGLE) {
5705 const char *init_args = "-s";
5706 size_t init_args_length = strlen(init_args) + 1;
5707
5708 argv1 = scratch_addr;
5709 error = copyout(init_args, argv1, init_args_length);
5710 if (error) {
5711 return error;
5712 }
5713
5714 scratch_addr = USER_ADDR_ALIGN(scratch_addr + init_args_length, sizeof(user_addr_t));
5715 }
5716
5717 if (proc_is64bit(p)) {
5718 user64_addr_t argv64bit[3] = {};
5719
5720 argv64bit[0] = argv0;
5721 argv64bit[1] = argv1;
5722 argv64bit[2] = USER_ADDR_NULL;
5723
5724 error = copyout(argv64bit, scratch_addr, sizeof(argv64bit));
5725 if (error) {
5726 return error;
5727 }
5728 } else {
5729 user32_addr_t argv32bit[3] = {};
5730
5731 argv32bit[0] = (user32_addr_t)argv0;
5732 argv32bit[1] = (user32_addr_t)argv1;
5733 argv32bit[2] = USER_ADDR_NULL;
5734
5735 error = copyout(argv32bit, scratch_addr, sizeof(argv32bit));
5736 if (error) {
5737 return error;
5738 }
5739 }
5740
5741 /*
5742 * Set up argument block for fake call to execve.
5743 */
5744 init_exec_args.fname = argv0;
5745 init_exec_args.argp = scratch_addr;
5746 init_exec_args.envp = USER_ADDR_NULL;
5747
5748 /*
5749 * So that init task is set with uid,gid 0 token
5750 */
5751 set_security_token(p);
5752
5753 return execve(p, &init_exec_args, retval);
5754 }
5755
5756 static const char * init_programs[] = {
5757 #if DEBUG
5758 "/usr/local/sbin/launchd.debug",
5759 #endif
5760 #if DEVELOPMENT || DEBUG
5761 "/usr/local/sbin/launchd.development",
5762 #endif
5763 "/sbin/launchd",
5764 };
5765
5766 /*
5767 * load_init_program
5768 *
5769 * Description: Load the "init" program; in most cases, this will be "launchd"
5770 *
5771 * Parameters: p Process to call execve() to create
5772 * the "init" program
5773 *
5774 * Returns: (void)
5775 *
5776 * Notes: The process that is passed in is the first manufactured
5777 * process on the system, and gets here via bsd_ast() firing
5778 * for the first time. This is done to ensure that bsd_init()
5779 * has run to completion.
5780 *
5781 * In DEBUG & DEVELOPMENT builds, the launchdsuffix boot-arg
5782 * may be used to select a specific launchd executable. As with
5783 * the kcsuffix boot-arg, setting launchdsuffix to "" or "release"
5784 * will force /sbin/launchd to be selected.
5785 *
5786 * Search order by build:
5787 *
5788 * DEBUG DEVELOPMENT RELEASE PATH
5789 * ----------------------------------------------------------------------------------
5790 * 1 1 NA /usr/local/sbin/launchd.$LAUNCHDSUFFIX
5791 * 2 NA NA /usr/local/sbin/launchd.debug
5792 * 3 2 NA /usr/local/sbin/launchd.development
5793 * 4 3 1 /sbin/launchd
5794 */
5795 void
5796 load_init_program(proc_t p)
5797 {
5798 uint32_t i;
5799 int error;
5800 vm_map_t map = current_map();
5801 mach_vm_offset_t scratch_addr = 0;
5802 mach_vm_size_t map_page_size = vm_map_page_size(map);
5803
5804 (void) mach_vm_allocate_kernel(map, &scratch_addr, map_page_size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_NONE);
5805 #if CONFIG_MEMORYSTATUS
5806 (void) memorystatus_init_at_boot_snapshot();
5807 #endif /* CONFIG_MEMORYSTATUS */
5808
5809 #if DEBUG || DEVELOPMENT
5810 /* Check for boot-arg suffix first */
5811 char launchd_suffix[64];
5812 if (PE_parse_boot_argn("launchdsuffix", launchd_suffix, sizeof(launchd_suffix))) {
5813 char launchd_path[128];
5814 boolean_t is_release_suffix = ((launchd_suffix[0] == 0) ||
5815 (strcmp(launchd_suffix, "release") == 0));
5816
5817 if (is_release_suffix) {
5818 printf("load_init_program: attempting to load /sbin/launchd\n");
5819 error = load_init_program_at_path(p, (user_addr_t)scratch_addr, "/sbin/launchd");
5820 if (!error) {
5821 return;
5822 }
5823
5824 panic("Process 1 exec of launchd.release failed, errno %d", error);
5825 } else {
5826 strlcpy(launchd_path, "/usr/local/sbin/launchd.", sizeof(launchd_path));
5827 strlcat(launchd_path, launchd_suffix, sizeof(launchd_path));
5828
5829 printf("load_init_program: attempting to load %s\n", launchd_path);
5830 error = load_init_program_at_path(p, (user_addr_t)scratch_addr, launchd_path);
5831 if (!error) {
5832 return;
5833 } else {
5834 printf("load_init_program: failed loading %s: errno %d\n", launchd_path, error);
5835 }
5836 }
5837 }
5838 #endif
5839
5840 error = ENOENT;
5841 for (i = 0; i < sizeof(init_programs) / sizeof(init_programs[0]); i++) {
5842 printf("load_init_program: attempting to load %s\n", init_programs[i]);
5843 error = load_init_program_at_path(p, (user_addr_t)scratch_addr, init_programs[i]);
5844 if (!error) {
5845 return;
5846 } else {
5847 printf("load_init_program: failed loading %s: errno %d\n", init_programs[i], error);
5848 }
5849 }
5850
5851 panic("Process 1 exec of %s failed, errno %d", ((i == 0) ? "<null>" : init_programs[i - 1]), error);
5852 }
5853
5854 /*
5855 * load_return_to_errno
5856 *
5857 * Description: Convert a load_return_t (Mach error) to an errno (BSD error)
5858 *
5859 * Parameters: lrtn Mach error number
5860 *
5861 * Returns: (int) BSD error number
5862 * 0 Success
5863 * EBADARCH Bad architecture
5864 * EBADMACHO Bad Mach object file
5865 * ESHLIBVERS Bad shared library version
5866 * ENOMEM Out of memory/resource shortage
5867 * EACCES Access denied
5868 * ENOENT Entry not found (usually "file does
5869 * does not exist")
5870 * EIO An I/O error occurred
5871 * EBADEXEC The executable is corrupt/unknown
5872 */
5873 static int
5874 load_return_to_errno(load_return_t lrtn)
5875 {
5876 switch (lrtn) {
5877 case LOAD_SUCCESS:
5878 return 0;
5879 case LOAD_BADARCH:
5880 return EBADARCH;
5881 case LOAD_BADMACHO:
5882 case LOAD_BADMACHO_UPX:
5883 return EBADMACHO;
5884 case LOAD_SHLIB:
5885 return ESHLIBVERS;
5886 case LOAD_NOSPACE:
5887 case LOAD_RESOURCE:
5888 return ENOMEM;
5889 case LOAD_PROTECT:
5890 return EACCES;
5891 case LOAD_ENOENT:
5892 return ENOENT;
5893 case LOAD_IOERROR:
5894 return EIO;
5895 case LOAD_DECRYPTFAIL:
5896 return EAUTH;
5897 case LOAD_FAILURE:
5898 default:
5899 return EBADEXEC;
5900 }
5901 }
5902
5903 #include <mach/mach_types.h>
5904 #include <mach/vm_prot.h>
5905 #include <mach/semaphore.h>
5906 #include <mach/sync_policy.h>
5907 #include <kern/clock.h>
5908 #include <mach/kern_return.h>
5909
5910 /*
5911 * execargs_alloc
5912 *
5913 * Description: Allocate the block of memory used by the execve arguments.
5914 * At the same time, we allocate a page so that we can read in
5915 * the first page of the image.
5916 *
5917 * Parameters: struct image_params * the image parameter block
5918 *
5919 * Returns: 0 Success
5920 * EINVAL Invalid argument
5921 * EACCES Permission denied
5922 * EINTR Interrupted function
5923 * ENOMEM Not enough space
5924 *
5925 * Notes: This is a temporary allocation into the kernel address space
5926 * to enable us to copy arguments in from user space. This is
5927 * necessitated by not mapping the process calling execve() into
5928 * the kernel address space during the execve() system call.
5929 *
5930 * We assemble the argument and environment, etc., into this
5931 * region before copying it as a single block into the child
5932 * process address space (at the top or bottom of the stack,
5933 * depending on which way the stack grows; see the function
5934 * exec_copyout_strings() for details).
5935 *
5936 * This ends up with a second (possibly unnecessary) copy compared
5937 * with assembing the data directly into the child address space,
5938 * instead, but since we cannot be guaranteed that the parent has
5939 * not modified its environment, we can't really know that it's
5940 * really a block there as well.
5941 */
5942
5943
5944 static int execargs_waiters = 0;
5945 lck_mtx_t *execargs_cache_lock;
5946
5947 static void
5948 execargs_lock_lock(void)
5949 {
5950 lck_mtx_lock_spin(execargs_cache_lock);
5951 }
5952
5953 static void
5954 execargs_lock_unlock(void)
5955 {
5956 lck_mtx_unlock(execargs_cache_lock);
5957 }
5958
5959 static wait_result_t
5960 execargs_lock_sleep(void)
5961 {
5962 return lck_mtx_sleep(execargs_cache_lock, LCK_SLEEP_DEFAULT, &execargs_free_count, THREAD_INTERRUPTIBLE);
5963 }
5964
5965 static kern_return_t
5966 execargs_purgeable_allocate(char **execarg_address)
5967 {
5968 kern_return_t kr = vm_allocate_kernel(bsd_pageable_map, (vm_offset_t *)execarg_address, BSD_PAGEABLE_SIZE_PER_EXEC, VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE, VM_KERN_MEMORY_NONE);
5969 assert(kr == KERN_SUCCESS);
5970 return kr;
5971 }
5972
5973 static kern_return_t
5974 execargs_purgeable_reference(void *execarg_address)
5975 {
5976 int state = VM_PURGABLE_NONVOLATILE;
5977 kern_return_t kr = vm_purgable_control(bsd_pageable_map, (vm_offset_t) execarg_address, VM_PURGABLE_SET_STATE, &state);
5978
5979 assert(kr == KERN_SUCCESS);
5980 return kr;
5981 }
5982
5983 static kern_return_t
5984 execargs_purgeable_volatilize(void *execarg_address)
5985 {
5986 int state = VM_PURGABLE_VOLATILE | VM_PURGABLE_ORDERING_OBSOLETE;
5987 kern_return_t kr;
5988 kr = vm_purgable_control(bsd_pageable_map, (vm_offset_t) execarg_address, VM_PURGABLE_SET_STATE, &state);
5989
5990 assert(kr == KERN_SUCCESS);
5991
5992 return kr;
5993 }
5994
5995 static void
5996 execargs_wakeup_waiters(void)
5997 {
5998 thread_wakeup(&execargs_free_count);
5999 }
6000
6001 static int
6002 execargs_alloc(struct image_params *imgp)
6003 {
6004 kern_return_t kret;
6005 wait_result_t res;
6006 int i, cache_index = -1;
6007
6008 execargs_lock_lock();
6009
6010 while (execargs_free_count == 0) {
6011 execargs_waiters++;
6012 res = execargs_lock_sleep();
6013 execargs_waiters--;
6014 if (res != THREAD_AWAKENED) {
6015 execargs_lock_unlock();
6016 return EINTR;
6017 }
6018 }
6019
6020 execargs_free_count--;
6021
6022 for (i = 0; i < execargs_cache_size; i++) {
6023 vm_offset_t element = execargs_cache[i];
6024 if (element) {
6025 cache_index = i;
6026 imgp->ip_strings = (char *)(execargs_cache[i]);
6027 execargs_cache[i] = 0;
6028 break;
6029 }
6030 }
6031
6032 assert(execargs_free_count >= 0);
6033
6034 execargs_lock_unlock();
6035
6036 if (cache_index == -1) {
6037 kret = execargs_purgeable_allocate(&imgp->ip_strings);
6038 } else {
6039 kret = execargs_purgeable_reference(imgp->ip_strings);
6040 }
6041
6042 assert(kret == KERN_SUCCESS);
6043 if (kret != KERN_SUCCESS) {
6044 return ENOMEM;
6045 }
6046
6047 /* last page used to read in file headers */
6048 imgp->ip_vdata = imgp->ip_strings + (NCARGS + PAGE_SIZE);
6049 imgp->ip_strendp = imgp->ip_strings;
6050 imgp->ip_argspace = NCARGS;
6051 imgp->ip_strspace = (NCARGS + PAGE_SIZE);
6052
6053 return 0;
6054 }
6055
6056 /*
6057 * execargs_free
6058 *
6059 * Description: Free the block of memory used by the execve arguments and the
6060 * first page of the executable by a previous call to the function
6061 * execargs_alloc().
6062 *
6063 * Parameters: struct image_params * the image parameter block
6064 *
6065 * Returns: 0 Success
6066 * EINVAL Invalid argument
6067 * EINTR Oeration interrupted
6068 */
6069 static int
6070 execargs_free(struct image_params *imgp)
6071 {
6072 kern_return_t kret;
6073 int i;
6074 boolean_t needs_wakeup = FALSE;
6075
6076 kret = execargs_purgeable_volatilize(imgp->ip_strings);
6077
6078 execargs_lock_lock();
6079 execargs_free_count++;
6080
6081 for (i = 0; i < execargs_cache_size; i++) {
6082 vm_offset_t element = execargs_cache[i];
6083 if (element == 0) {
6084 execargs_cache[i] = (vm_offset_t) imgp->ip_strings;
6085 imgp->ip_strings = NULL;
6086 break;
6087 }
6088 }
6089
6090 assert(imgp->ip_strings == NULL);
6091
6092 if (execargs_waiters > 0) {
6093 needs_wakeup = TRUE;
6094 }
6095
6096 execargs_lock_unlock();
6097
6098 if (needs_wakeup == TRUE) {
6099 execargs_wakeup_waiters();
6100 }
6101
6102 return kret == KERN_SUCCESS ? 0 : EINVAL;
6103 }
6104
6105 static void
6106 exec_resettextvp(proc_t p, struct image_params *imgp)
6107 {
6108 vnode_t vp;
6109 off_t offset;
6110 vnode_t tvp = p->p_textvp;
6111 int ret;
6112
6113 vp = imgp->ip_vp;
6114 offset = imgp->ip_arch_offset;
6115
6116 if (vp == NULLVP) {
6117 panic("exec_resettextvp: expected valid vp");
6118 }
6119
6120 ret = vnode_ref(vp);
6121 proc_lock(p);
6122 if (ret == 0) {
6123 p->p_textvp = vp;
6124 p->p_textoff = offset;
6125 } else {
6126 p->p_textvp = NULLVP; /* this is paranoia */
6127 p->p_textoff = 0;
6128 }
6129 proc_unlock(p);
6130
6131 if (tvp != NULLVP) {
6132 if (vnode_getwithref(tvp) == 0) {
6133 vnode_rele(tvp);
6134 vnode_put(tvp);
6135 }
6136 }
6137 }
6138
6139 // Includes the 0-byte (therefore "SIZE" instead of "LEN").
6140 static const size_t CS_CDHASH_STRING_SIZE = CS_CDHASH_LEN * 2 + 1;
6141
6142 static void
6143 cdhash_to_string(char str[CS_CDHASH_STRING_SIZE], uint8_t const * const cdhash)
6144 {
6145 static char const nibble[] = "0123456789abcdef";
6146
6147 /* Apparently still the safest way to get a hex representation
6148 * of binary data.
6149 * xnu's printf routines have %*D/%20D in theory, but "not really", see:
6150 * <rdar://problem/33328859> confusion around %*D/%nD in printf
6151 */
6152 for (int i = 0; i < CS_CDHASH_LEN; ++i) {
6153 str[i * 2] = nibble[(cdhash[i] & 0xf0) >> 4];
6154 str[i * 2 + 1] = nibble[cdhash[i] & 0x0f];
6155 }
6156 str[CS_CDHASH_STRING_SIZE - 1] = 0;
6157 }
6158
6159 /*
6160 * __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__
6161 *
6162 * Description: Waits for the userspace daemon to respond to the request
6163 * we made. Function declared non inline to be visible in
6164 * stackshots and spindumps as well as debugging.
6165 */
6166 __attribute__((noinline)) int
6167 __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(mach_port_t task_access_port, int32_t new_pid)
6168 {
6169 return find_code_signature(task_access_port, new_pid);
6170 }
6171
6172 static int
6173 check_for_signature(proc_t p, struct image_params *imgp)
6174 {
6175 mach_port_t port = IPC_PORT_NULL;
6176 kern_return_t kr = KERN_FAILURE;
6177 int error = EACCES;
6178 boolean_t unexpected_failure = FALSE;
6179 struct cs_blob *csb;
6180 boolean_t require_success = FALSE;
6181 int spawn = (imgp->ip_flags & IMGPF_SPAWN);
6182 int vfexec = (imgp->ip_flags & IMGPF_VFORK_EXEC);
6183 os_reason_t signature_failure_reason = OS_REASON_NULL;
6184
6185 /*
6186 * Override inherited code signing flags with the
6187 * ones for the process that is being successfully
6188 * loaded
6189 */
6190 proc_lock(p);
6191 p->p_csflags = imgp->ip_csflags;
6192 proc_unlock(p);
6193
6194 /* Set the switch_protect flag on the map */
6195 if (p->p_csflags & (CS_HARD | CS_KILL)) {
6196 vm_map_switch_protect(get_task_map(p->task), TRUE);
6197 }
6198
6199 /*
6200 * image activation may be failed due to policy
6201 * which is unexpected but security framework does not
6202 * approve of exec, kill and return immediately.
6203 */
6204 if (imgp->ip_mac_return != 0) {
6205 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
6206 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_SECURITY_POLICY, 0, 0);
6207 signature_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_SECURITY_POLICY);
6208 error = imgp->ip_mac_return;
6209 unexpected_failure = TRUE;
6210 goto done;
6211 }
6212
6213 if (imgp->ip_cs_error != OS_REASON_NULL) {
6214 signature_failure_reason = imgp->ip_cs_error;
6215 imgp->ip_cs_error = OS_REASON_NULL;
6216 error = EACCES;
6217 goto done;
6218 }
6219
6220 /* If the code signature came through the image activation path, we skip the
6221 * taskgated / externally attached path. */
6222 if (imgp->ip_csflags & CS_SIGNED) {
6223 error = 0;
6224 goto done;
6225 }
6226
6227 /* The rest of the code is for signatures that either already have been externally
6228 * attached (likely, but not necessarily by a previous run through the taskgated
6229 * path), or that will now be attached by taskgated. */
6230
6231 kr = task_get_task_access_port(p->task, &port);
6232 if (KERN_SUCCESS != kr || !IPC_PORT_VALID(port)) {
6233 error = 0;
6234 if (require_success) {
6235 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
6236 p->p_pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASK_ACCESS_PORT, 0, 0);
6237 signature_failure_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASK_ACCESS_PORT);
6238 error = EACCES;
6239 }
6240 goto done;
6241 }
6242
6243 /*
6244 * taskgated returns KERN_SUCCESS if it has completed its work
6245 * and the exec should continue, KERN_FAILURE if the exec should
6246 * fail, or it may error out with different error code in an
6247 * event of mig failure (e.g. process was signalled during the
6248 * rpc call, taskgated died, mig server died etc.).
6249 */
6250
6251 kr = __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(port, p->p_pid);
6252 switch (kr) {
6253 case KERN_SUCCESS:
6254 error = 0;
6255 break;
6256 case KERN_FAILURE:
6257 error = EACCES;
6258
6259 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
6260 p->p_pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG, 0, 0);
6261 signature_failure_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG);
6262 goto done;
6263 default:
6264 error = EACCES;
6265
6266 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
6267 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_TASKGATED_OTHER, 0, 0);
6268 signature_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_TASKGATED_OTHER);
6269 unexpected_failure = TRUE;
6270 goto done;
6271 }
6272
6273 /* Only do this if exec_resettextvp() did not fail */
6274 if (p->p_textvp != NULLVP) {
6275 csb = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff);
6276
6277 if (csb != NULL) {
6278 /* As the enforcement we can do here is very limited, we only allow things that
6279 * are the only reason why this code path still exists:
6280 * Adhoc signed non-platform binaries without special cs_flags and without any
6281 * entitlements (unrestricted ones still pass AMFI). */
6282 if (
6283 /* Revalidate the blob if necessary through bumped generation count. */
6284 (ubc_cs_generation_check(p->p_textvp) == 0 ||
6285 ubc_cs_blob_revalidate(p->p_textvp, csb, imgp, 0) == 0) &&
6286 /* Only CS_ADHOC, no CS_KILL, CS_HARD etc. */
6287 (csb->csb_flags & CS_ALLOWED_MACHO) == CS_ADHOC &&
6288 /* If it has a CMS blob, it's not adhoc. The CS_ADHOC flag can lie. */
6289 csblob_find_blob_bytes((const uint8_t *)csb->csb_mem_kaddr, csb->csb_mem_size,
6290 CSSLOT_SIGNATURESLOT,
6291 CSMAGIC_BLOBWRAPPER) == NULL &&
6292 /* It could still be in a trust cache (unlikely with CS_ADHOC), or a magic path. */
6293 csb->csb_platform_binary == 0 &&
6294 /* No entitlements, not even unrestricted ones. */
6295 csb->csb_entitlements_blob == NULL) {
6296 proc_lock(p);
6297 p->p_csflags |= CS_SIGNED | CS_VALID;
6298 proc_unlock(p);
6299 } else {
6300 uint8_t cdhash[CS_CDHASH_LEN];
6301 char cdhash_string[CS_CDHASH_STRING_SIZE];
6302 proc_getcdhash(p, cdhash);
6303 cdhash_to_string(cdhash_string, cdhash);
6304 printf("ignoring detached code signature on '%s' with cdhash '%s' "
6305 "because it is invalid, or not a simple adhoc signature.\n",
6306 p->p_name, cdhash_string);
6307 }
6308 }
6309 }
6310
6311 done:
6312 if (0 == error) {
6313 /* The process's code signature related properties are
6314 * fully set up, so this is an opportune moment to log
6315 * platform binary execution, if desired. */
6316 if (platform_exec_logging != 0 && csproc_get_platform_binary(p)) {
6317 uint8_t cdhash[CS_CDHASH_LEN];
6318 char cdhash_string[CS_CDHASH_STRING_SIZE];
6319 proc_getcdhash(p, cdhash);
6320 cdhash_to_string(cdhash_string, cdhash);
6321
6322 os_log(peLog, "CS Platform Exec Logging: Executing platform signed binary "
6323 "'%s' with cdhash %s\n", p->p_name, cdhash_string);
6324 }
6325 } else {
6326 if (!unexpected_failure) {
6327 p->p_csflags |= CS_KILLED;
6328 }
6329 /* make very sure execution fails */
6330 if (vfexec || spawn) {
6331 assert(signature_failure_reason != OS_REASON_NULL);
6332 psignal_vfork_with_reason(p, p->task, imgp->ip_new_thread,
6333 SIGKILL, signature_failure_reason);
6334 signature_failure_reason = OS_REASON_NULL;
6335 error = 0;
6336 } else {
6337 assert(signature_failure_reason != OS_REASON_NULL);
6338 psignal_with_reason(p, SIGKILL, signature_failure_reason);
6339 signature_failure_reason = OS_REASON_NULL;
6340 }
6341 }
6342
6343 if (port != IPC_PORT_NULL) {
6344 ipc_port_release_send(port);
6345 }
6346
6347 /* If we hit this, we likely would have leaked an exit reason */
6348 assert(signature_failure_reason == OS_REASON_NULL);
6349 return error;
6350 }
6351
6352 /*
6353 * Typically as soon as we start executing this process, the
6354 * first instruction will trigger a VM fault to bring the text
6355 * pages (as executable) into the address space, followed soon
6356 * thereafter by dyld data structures (for dynamic executable).
6357 * To optimize this, as well as improve support for hardware
6358 * debuggers that can only access resident pages present
6359 * in the process' page tables, we prefault some pages if
6360 * possible. Errors are non-fatal.
6361 */
6362 static void
6363 exec_prefault_data(proc_t p __unused, struct image_params *imgp, load_result_t *load_result)
6364 {
6365 int ret;
6366 size_t expected_all_image_infos_size;
6367
6368 /*
6369 * Prefault executable or dyld entry point.
6370 */
6371 vm_fault(current_map(),
6372 vm_map_trunc_page(load_result->entry_point,
6373 vm_map_page_mask(current_map())),
6374 VM_PROT_READ | VM_PROT_EXECUTE,
6375 FALSE, VM_KERN_MEMORY_NONE,
6376 THREAD_UNINT, NULL, 0);
6377
6378 if (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) {
6379 expected_all_image_infos_size = sizeof(struct user64_dyld_all_image_infos);
6380 } else {
6381 expected_all_image_infos_size = sizeof(struct user32_dyld_all_image_infos);
6382 }
6383
6384 /* Decode dyld anchor structure from <mach-o/dyld_images.h> */
6385 if (load_result->dynlinker &&
6386 load_result->all_image_info_addr &&
6387 load_result->all_image_info_size >= expected_all_image_infos_size) {
6388 union {
6389 struct user64_dyld_all_image_infos infos64;
6390 struct user32_dyld_all_image_infos infos32;
6391 } all_image_infos;
6392
6393 /*
6394 * Pre-fault to avoid copyin() going through the trap handler
6395 * and recovery path.
6396 */
6397 vm_fault(current_map(),
6398 vm_map_trunc_page(load_result->all_image_info_addr,
6399 vm_map_page_mask(current_map())),
6400 VM_PROT_READ | VM_PROT_WRITE,
6401 FALSE, VM_KERN_MEMORY_NONE,
6402 THREAD_UNINT, NULL, 0);
6403 if ((load_result->all_image_info_addr & PAGE_MASK) + expected_all_image_infos_size > PAGE_SIZE) {
6404 /* all_image_infos straddles a page */
6405 vm_fault(current_map(),
6406 vm_map_trunc_page(load_result->all_image_info_addr + expected_all_image_infos_size - 1,
6407 vm_map_page_mask(current_map())),
6408 VM_PROT_READ | VM_PROT_WRITE,
6409 FALSE, VM_KERN_MEMORY_NONE,
6410 THREAD_UNINT, NULL, 0);
6411 }
6412
6413 ret = copyin(load_result->all_image_info_addr,
6414 &all_image_infos,
6415 expected_all_image_infos_size);
6416 if (ret == 0 && all_image_infos.infos32.version >= DYLD_ALL_IMAGE_INFOS_ADDRESS_MINIMUM_VERSION) {
6417 user_addr_t notification_address;
6418 user_addr_t dyld_image_address;
6419 user_addr_t dyld_version_address;
6420 user_addr_t dyld_all_image_infos_address;
6421 user_addr_t dyld_slide_amount;
6422
6423 if (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) {
6424 notification_address = all_image_infos.infos64.notification;
6425 dyld_image_address = all_image_infos.infos64.dyldImageLoadAddress;
6426 dyld_version_address = all_image_infos.infos64.dyldVersion;
6427 dyld_all_image_infos_address = all_image_infos.infos64.dyldAllImageInfosAddress;
6428 } else {
6429 notification_address = all_image_infos.infos32.notification;
6430 dyld_image_address = all_image_infos.infos32.dyldImageLoadAddress;
6431 dyld_version_address = all_image_infos.infos32.dyldVersion;
6432 dyld_all_image_infos_address = all_image_infos.infos32.dyldAllImageInfosAddress;
6433 }
6434
6435 /*
6436 * dyld statically sets up the all_image_infos in its Mach-O
6437 * binary at static link time, with pointers relative to its default
6438 * load address. Since ASLR might slide dyld before its first
6439 * instruction is executed, "dyld_slide_amount" tells us how far
6440 * dyld was loaded compared to its default expected load address.
6441 * All other pointers into dyld's image should be adjusted by this
6442 * amount. At some point later, dyld will fix up pointers to take
6443 * into account the slide, at which point the all_image_infos_address
6444 * field in the structure will match the runtime load address, and
6445 * "dyld_slide_amount" will be 0, if we were to consult it again.
6446 */
6447
6448 dyld_slide_amount = load_result->all_image_info_addr - dyld_all_image_infos_address;
6449
6450 #if 0
6451 kprintf("exec_prefault: 0x%016llx 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
6452 (uint64_t)load_result->all_image_info_addr,
6453 all_image_infos.infos32.version,
6454 (uint64_t)notification_address,
6455 (uint64_t)dyld_image_address,
6456 (uint64_t)dyld_version_address,
6457 (uint64_t)dyld_all_image_infos_address);
6458 #endif
6459
6460 vm_fault(current_map(),
6461 vm_map_trunc_page(notification_address + dyld_slide_amount,
6462 vm_map_page_mask(current_map())),
6463 VM_PROT_READ | VM_PROT_EXECUTE,
6464 FALSE, VM_KERN_MEMORY_NONE,
6465 THREAD_UNINT, NULL, 0);
6466 vm_fault(current_map(),
6467 vm_map_trunc_page(dyld_image_address + dyld_slide_amount,
6468 vm_map_page_mask(current_map())),
6469 VM_PROT_READ | VM_PROT_EXECUTE,
6470 FALSE, VM_KERN_MEMORY_NONE,
6471 THREAD_UNINT, NULL, 0);
6472 vm_fault(current_map(),
6473 vm_map_trunc_page(dyld_version_address + dyld_slide_amount,
6474 vm_map_page_mask(current_map())),
6475 VM_PROT_READ,
6476 FALSE, VM_KERN_MEMORY_NONE,
6477 THREAD_UNINT, NULL, 0);
6478 vm_fault(current_map(),
6479 vm_map_trunc_page(dyld_all_image_infos_address + dyld_slide_amount,
6480 vm_map_page_mask(current_map())),
6481 VM_PROT_READ | VM_PROT_WRITE,
6482 FALSE, VM_KERN_MEMORY_NONE,
6483 THREAD_UNINT, NULL, 0);
6484 }
6485 }
6486 }