]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/kern/kern_exec.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / kern_exec.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Mach Operating System
31 * Copyright (c) 1987 Carnegie-Mellon University
32 * All rights reserved. The CMU software License Agreement specifies
33 * the terms and conditions for use and redistribution.
34 */
35
36/*-
37 * Copyright (c) 1982, 1986, 1991, 1993
38 * The Regents of the University of California. All rights reserved.
39 * (c) UNIX System Laboratories, Inc.
40 * All or some portions of this file are derived from material licensed
41 * to the University of California by American Telephone and Telegraph
42 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
43 * the permission of UNIX System Laboratories, Inc.
44 *
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 * 3. All advertising materials mentioning features or use of this software
54 * must display the following acknowledgement:
55 * This product includes software developed by the University of
56 * California, Berkeley and its contributors.
57 * 4. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * from: @(#)kern_exec.c 8.1 (Berkeley) 6/10/93
74 */
75/*
76 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
77 * support for mandatory and extensible security protections. This notice
78 * is included in support of clause 2.2 (b) of the Apple Public License,
79 * Version 2.0.
80 */
81#include <machine/reg.h>
82#include <machine/cpu_capabilities.h>
83
84#include <sys/param.h>
85#include <sys/systm.h>
86#include <sys/filedesc.h>
87#include <sys/kernel.h>
88#include <sys/proc_internal.h>
89#include <sys/kauth.h>
90#include <sys/user.h>
91#include <sys/socketvar.h>
92#include <sys/malloc.h>
93#include <sys/namei.h>
94#include <sys/mount_internal.h>
95#include <sys/vnode_internal.h>
96#include <sys/file_internal.h>
97#include <sys/stat.h>
98#include <sys/uio_internal.h>
99#include <sys/acct.h>
100#include <sys/exec.h>
101#include <sys/kdebug.h>
102#include <sys/signal.h>
103#include <sys/aio_kern.h>
104#include <sys/sysproto.h>
105#include <sys/persona.h>
106#include <sys/reason.h>
107#if SYSV_SHM
108#include <sys/shm_internal.h> /* shmexec() */
109#endif
110#include <sys/ubc_internal.h> /* ubc_map() */
111#include <sys/spawn.h>
112#include <sys/spawn_internal.h>
113#include <sys/process_policy.h>
114#include <sys/codesign.h>
115#include <sys/random.h>
116#include <crypto/sha1.h>
117
118#include <libkern/libkern.h>
119#include <libkern/crypto/sha2.h>
120#include <security/audit/audit.h>
121
122#include <ipc/ipc_types.h>
123
124#include <mach/mach_param.h>
125#include <mach/mach_types.h>
126#include <mach/port.h>
127#include <mach/task.h>
128#include <mach/task_access.h>
129#include <mach/thread_act.h>
130#include <mach/vm_map.h>
131#include <mach/mach_vm.h>
132#include <mach/vm_param.h>
133
134#include <kern/sched_prim.h> /* thread_wakeup() */
135#include <kern/affinity.h>
136#include <kern/assert.h>
137#include <kern/task.h>
138#include <kern/coalition.h>
139#include <kern/policy_internal.h>
140#include <kern/kalloc.h>
141
142#include <os/log.h>
143
144#if CONFIG_MACF
145#include <security/mac_framework.h>
146#include <security/mac_mach_internal.h>
147#endif
148
149#if CONFIG_AUDIT
150#include <bsm/audit_kevents.h>
151#endif
152
153#if CONFIG_ARCADE
154#include <kern/arcade.h>
155#endif
156
157#include <vm/vm_map.h>
158#include <vm/vm_kern.h>
159#include <vm/vm_protos.h>
160#include <vm/vm_kern.h>
161#include <vm/vm_fault.h>
162#include <vm/vm_pageout.h>
163
164#include <kdp/kdp_dyld.h>
165
166#include <machine/machine_routines.h>
167#include <machine/pal_routines.h>
168
169#include <pexpert/pexpert.h>
170
171#if CONFIG_MEMORYSTATUS
172#include <sys/kern_memorystatus.h>
173#endif
174
175#include <IOKit/IOBSD.h>
176
177extern boolean_t vm_darkwake_mode;
178
179extern int bootarg_execfailurereports; /* bsd_init.c */
180boolean_t unentitled_ios_sim_launch = FALSE;
181
182#if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
183static TUNABLE(bool, bootarg_arm64e_preview_abi, "-arm64e_preview_abi", false);
184#endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */
185
186#if CONFIG_DTRACE
187/* Do not include dtrace.h, it redefines kmem_[alloc/free] */
188extern void dtrace_proc_exec(proc_t);
189extern void (*dtrace_proc_waitfor_exec_ptr)(proc_t);
190
191/*
192 * Since dtrace_proc_waitfor_exec_ptr can be added/removed in dtrace_subr.c,
193 * we will store its value before actually calling it.
194 */
195static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL;
196
197#include <sys/dtrace_ptss.h>
198#endif
199
200#if __has_feature(ptrauth_calls)
201static int vm_shared_region_per_team_id = 1;
202static int vm_shared_region_by_entitlement = 1;
203
204/* Flag to control whether shared cache randomized resliding is enabled */
205#if DEVELOPMENT || DEBUG || XNU_TARGET_OS_IOS
206static int vm_shared_region_reslide_aslr = 1;
207#else /* DEVELOPMENT || DEBUG || XNU_TARGET_OS_IOS */
208static int vm_shared_region_reslide_aslr = 0;
209#endif /* DEVELOPMENT || DEBUG || XNU_TARGET_OS_IOS */
210/*
211 * Flag to control what processes should get shared cache randomize resliding
212 * after a fault in the shared cache region:
213 *
214 * 0 - all processes get a new randomized slide
215 * 1 - only platform processes get a new randomized slide
216 */
217int vm_shared_region_reslide_restrict = 1;
218
219#if DEVELOPMENT || DEBUG
220SYSCTL_INT(_vm, OID_AUTO, vm_shared_region_per_team_id, CTLFLAG_RW, &vm_shared_region_per_team_id, 0, "");
221SYSCTL_INT(_vm, OID_AUTO, vm_shared_region_by_entitlement, CTLFLAG_RW, &vm_shared_region_by_entitlement, 0, "");
222SYSCTL_INT(_vm, OID_AUTO, vm_shared_region_reslide_restrict, CTLFLAG_RW, &vm_shared_region_reslide_restrict, 0, "");
223SYSCTL_INT(_vm, OID_AUTO, vm_shared_region_reslide_aslr, CTLFLAG_RW, &vm_shared_region_reslide_aslr, 0, "");
224#endif
225
226#endif /* __has_feature(ptrauth_calls) */
227
228/* support for child creation in exec after vfork */
229thread_t fork_create_child(task_t parent_task,
230 coalition_t *parent_coalition,
231 proc_t child_proc,
232 int inherit_memory,
233 int is_64bit_addr,
234 int is_64bit_data,
235 int in_exec);
236void vfork_exit(proc_t p, int rv);
237extern void proc_apply_task_networkbg_internal(proc_t, thread_t);
238extern void task_set_did_exec_flag(task_t task);
239extern void task_clear_exec_copy_flag(task_t task);
240proc_t proc_exec_switch_task(proc_t p, task_t old_task, task_t new_task, thread_t new_thread, void **inherit);
241boolean_t task_is_active(task_t);
242boolean_t thread_is_active(thread_t thread);
243void thread_copy_resource_info(thread_t dst_thread, thread_t src_thread);
244void *ipc_importance_exec_switch_task(task_t old_task, task_t new_task);
245extern void ipc_importance_release(void *elem);
246extern boolean_t task_has_watchports(task_t task);
247extern void task_set_no_smt(task_t task);
248#if defined(HAS_APPLE_PAC)
249char *task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *jop_pid);
250#endif
251task_t convert_port_to_task(ipc_port_t port);
252
253/*
254 * Mach things for which prototypes are unavailable from Mach headers
255 */
256#define IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND 0x1
257void ipc_task_reset(
258 task_t task);
259void ipc_thread_reset(
260 thread_t thread);
261kern_return_t ipc_object_copyin(
262 ipc_space_t space,
263 mach_port_name_t name,
264 mach_msg_type_name_t msgt_name,
265 ipc_object_t *objectp,
266 mach_port_context_t context,
267 mach_msg_guard_flags_t *guard_flags,
268 uint32_t kmsg_flags);
269void ipc_port_release_send(ipc_port_t);
270
271#if DEVELOPMENT || DEBUG
272void task_importance_update_owner_info(task_t);
273#endif
274
275extern struct savearea *get_user_regs(thread_t);
276
277__attribute__((noinline)) int __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(mach_port_t task_access_port, int32_t new_pid);
278
279#include <kern/thread.h>
280#include <kern/task.h>
281#include <kern/ast.h>
282#include <kern/mach_loader.h>
283#include <kern/mach_fat.h>
284#include <mach-o/fat.h>
285#include <mach-o/loader.h>
286#include <machine/vmparam.h>
287#include <sys/imgact.h>
288
289#include <sys/sdt.h>
290
291
292/*
293 * EAI_ITERLIMIT The maximum number of times to iterate an image
294 * activator in exec_activate_image() before treating
295 * it as malformed/corrupt.
296 */
297#define EAI_ITERLIMIT 3
298
299/*
300 * For #! interpreter parsing
301 */
302#define IS_WHITESPACE(ch) ((ch == ' ') || (ch == '\t'))
303#define IS_EOL(ch) ((ch == '#') || (ch == '\n'))
304
305extern vm_map_t bsd_pageable_map;
306extern const struct fileops vnops;
307extern int nextpidversion;
308
309
310#define USER_ADDR_ALIGN(addr, val) \
311 ( ( (user_addr_t)(addr) + (val) - 1) \
312 & ~((val) - 1) )
313
314/*
315 * For subsystem root support
316 */
317#define SPAWN_SUBSYSTEM_ROOT_ENTITLEMENT "com.apple.private.spawn-subsystem-root"
318
319/* Platform Code Exec Logging */
320static int platform_exec_logging = 0;
321
322SYSCTL_DECL(_security_mac);
323
324SYSCTL_INT(_security_mac, OID_AUTO, platform_exec_logging, CTLFLAG_RW, &platform_exec_logging, 0,
325 "log cdhashes for all platform binary executions");
326
327static os_log_t peLog = OS_LOG_DEFAULT;
328
329struct exec_port_actions {
330 uint32_t portwatch_count;
331 uint32_t registered_count;
332 ipc_port_t *portwatch_array;
333 ipc_port_t *registered_array;
334};
335
336struct image_params; /* Forward */
337static int exec_activate_image(struct image_params *imgp);
338static int exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp);
339static int load_return_to_errno(load_return_t lrtn);
340static int execargs_alloc(struct image_params *imgp);
341static int execargs_free(struct image_params *imgp);
342static int exec_check_permissions(struct image_params *imgp);
343static int exec_extract_strings(struct image_params *imgp);
344static int exec_add_apple_strings(struct image_params *imgp, const load_result_t *load_result);
345static int exec_handle_sugid(struct image_params *imgp);
346static int sugid_scripts = 0;
347SYSCTL_INT(_kern, OID_AUTO, sugid_scripts, CTLFLAG_RW | CTLFLAG_LOCKED, &sugid_scripts, 0, "");
348static kern_return_t create_unix_stack(vm_map_t map, load_result_t* load_result, proc_t p);
349static int copyoutptr(user_addr_t ua, user_addr_t ptr, int ptr_size);
350static void exec_resettextvp(proc_t, struct image_params *);
351static int check_for_signature(proc_t, struct image_params *);
352static void exec_prefault_data(proc_t, struct image_params *, load_result_t *);
353static errno_t exec_handle_port_actions(struct image_params *imgp,
354 struct exec_port_actions *port_actions);
355static errno_t exec_handle_spawnattr_policy(proc_t p, thread_t thread, int psa_apptype, uint64_t psa_qos_clamp,
356 task_role_t psa_darwin_role, struct exec_port_actions *port_actions);
357static void exec_port_actions_destroy(struct exec_port_actions *port_actions);
358
359/*
360 * exec_add_user_string
361 *
362 * Add the requested string to the string space area.
363 *
364 * Parameters; struct image_params * image parameter block
365 * user_addr_t string to add to strings area
366 * int segment from which string comes
367 * boolean_t TRUE if string contributes to NCARGS
368 *
369 * Returns: 0 Success
370 * !0 Failure errno from copyinstr()
371 *
372 * Implicit returns:
373 * (imgp->ip_strendp) updated location of next add, if any
374 * (imgp->ip_strspace) updated byte count of space remaining
375 * (imgp->ip_argspace) updated byte count of space in NCARGS
376 */
377__attribute__((noinline))
378static int
379exec_add_user_string(struct image_params *imgp, user_addr_t str, int seg, boolean_t is_ncargs)
380{
381 int error = 0;
382
383 do {
384 size_t len = 0;
385 int space;
386
387 if (is_ncargs) {
388 space = imgp->ip_argspace; /* by definition smaller than ip_strspace */
389 } else {
390 space = imgp->ip_strspace;
391 }
392
393 if (space <= 0) {
394 error = E2BIG;
395 break;
396 }
397
398 if (!UIO_SEG_IS_USER_SPACE(seg)) {
399 char *kstr = CAST_DOWN(char *, str); /* SAFE */
400 error = copystr(kstr, imgp->ip_strendp, space, &len);
401 } else {
402 error = copyinstr(str, imgp->ip_strendp, space, &len);
403 }
404
405 imgp->ip_strendp += len;
406 imgp->ip_strspace -= len;
407 if (is_ncargs) {
408 imgp->ip_argspace -= len;
409 }
410 } while (error == ENAMETOOLONG);
411
412 return error;
413}
414
415/*
416 * dyld is now passed the executable path as a getenv-like variable
417 * in the same fashion as the stack_guard and malloc_entropy keys.
418 */
419#define EXECUTABLE_KEY "executable_path="
420
421/*
422 * exec_save_path
423 *
424 * To support new app package launching for Mac OS X, the dyld needs the
425 * first argument to execve() stored on the user stack.
426 *
427 * Save the executable path name at the bottom of the strings area and set
428 * the argument vector pointer to the location following that to indicate
429 * the start of the argument and environment tuples, setting the remaining
430 * string space count to the size of the string area minus the path length.
431 *
432 * Parameters; struct image_params * image parameter block
433 * char * path used to invoke program
434 * int segment from which path comes
435 *
436 * Returns: int 0 Success
437 * EFAULT Bad address
438 * copy[in]str:EFAULT Bad address
439 * copy[in]str:ENAMETOOLONG Filename too long
440 *
441 * Implicit returns:
442 * (imgp->ip_strings) saved path
443 * (imgp->ip_strspace) space remaining in ip_strings
444 * (imgp->ip_strendp) start of remaining copy area
445 * (imgp->ip_argspace) space remaining of NCARGS
446 * (imgp->ip_applec) Initial applev[0]
447 *
448 * Note: We have to do this before the initial namei() since in the
449 * path contains symbolic links, namei() will overwrite the
450 * original path buffer contents. If the last symbolic link
451 * resolved was a relative pathname, we would lose the original
452 * "path", which could be an absolute pathname. This might be
453 * unacceptable for dyld.
454 */
455static int
456exec_save_path(struct image_params *imgp, user_addr_t path, int seg, const char **excpath)
457{
458 int error;
459 size_t len;
460 char *kpath;
461
462 // imgp->ip_strings can come out of a cache, so we need to obliterate the
463 // old path.
464 memset(imgp->ip_strings, '\0', strlen(EXECUTABLE_KEY) + MAXPATHLEN);
465
466 len = MIN(MAXPATHLEN, imgp->ip_strspace);
467
468 switch (seg) {
469 case UIO_USERSPACE32:
470 case UIO_USERSPACE64: /* Same for copyin()... */
471 error = copyinstr(path, imgp->ip_strings + strlen(EXECUTABLE_KEY), len, &len);
472 break;
473 case UIO_SYSSPACE:
474 kpath = CAST_DOWN(char *, path); /* SAFE */
475 error = copystr(kpath, imgp->ip_strings + strlen(EXECUTABLE_KEY), len, &len);
476 break;
477 default:
478 error = EFAULT;
479 break;
480 }
481
482 if (!error) {
483 bcopy(EXECUTABLE_KEY, imgp->ip_strings, strlen(EXECUTABLE_KEY));
484 len += strlen(EXECUTABLE_KEY);
485
486 imgp->ip_strendp += len;
487 imgp->ip_strspace -= len;
488
489 if (excpath) {
490 *excpath = imgp->ip_strings + strlen(EXECUTABLE_KEY);
491 }
492 }
493
494 return error;
495}
496
497/*
498 * exec_reset_save_path
499 *
500 * If we detect a shell script, we need to reset the string area
501 * state so that the interpreter can be saved onto the stack.
502 *
503 * Parameters; struct image_params * image parameter block
504 *
505 * Returns: int 0 Success
506 *
507 * Implicit returns:
508 * (imgp->ip_strings) saved path
509 * (imgp->ip_strspace) space remaining in ip_strings
510 * (imgp->ip_strendp) start of remaining copy area
511 * (imgp->ip_argspace) space remaining of NCARGS
512 *
513 */
514static int
515exec_reset_save_path(struct image_params *imgp)
516{
517 imgp->ip_strendp = imgp->ip_strings;
518 imgp->ip_argspace = NCARGS;
519 imgp->ip_strspace = (NCARGS + PAGE_SIZE);
520
521 return 0;
522}
523
524/*
525 * exec_shell_imgact
526 *
527 * Image activator for interpreter scripts. If the image begins with
528 * the characters "#!", then it is an interpreter script. Verify the
529 * length of the script line indicating the interpreter is not in
530 * excess of the maximum allowed size. If this is the case, then
531 * break out the arguments, if any, which are separated by white
532 * space, and copy them into the argument save area as if they were
533 * provided on the command line before all other arguments. The line
534 * ends when we encounter a comment character ('#') or newline.
535 *
536 * Parameters; struct image_params * image parameter block
537 *
538 * Returns: -1 not an interpreter (keep looking)
539 * -3 Success: interpreter: relookup
540 * >0 Failure: interpreter: error number
541 *
542 * A return value other than -1 indicates subsequent image activators should
543 * not be given the opportunity to attempt to activate the image.
544 */
545static int
546exec_shell_imgact(struct image_params *imgp)
547{
548 char *vdata = imgp->ip_vdata;
549 char *ihp;
550 char *line_startp, *line_endp;
551 char *interp;
552
553 /*
554 * Make sure it's a shell script. If we've already redirected
555 * from an interpreted file once, don't do it again.
556 */
557 if (vdata[0] != '#' ||
558 vdata[1] != '!' ||
559 (imgp->ip_flags & IMGPF_INTERPRET) != 0) {
560 return -1;
561 }
562
563 if (imgp->ip_origcputype != 0) {
564 /* Fat header previously matched, don't allow shell script inside */
565 return -1;
566 }
567
568 imgp->ip_flags |= IMGPF_INTERPRET;
569 imgp->ip_interp_sugid_fd = -1;
570 imgp->ip_interp_buffer[0] = '\0';
571
572 /* Check to see if SUGID scripts are permitted. If they aren't then
573 * clear the SUGID bits.
574 * imgp->ip_vattr is known to be valid.
575 */
576 if (sugid_scripts == 0) {
577 imgp->ip_origvattr->va_mode &= ~(VSUID | VSGID);
578 }
579
580 /* Try to find the first non-whitespace character */
581 for (ihp = &vdata[2]; ihp < &vdata[IMG_SHSIZE]; ihp++) {
582 if (IS_EOL(*ihp)) {
583 /* Did not find interpreter, "#!\n" */
584 return ENOEXEC;
585 } else if (IS_WHITESPACE(*ihp)) {
586 /* Whitespace, like "#! /bin/sh\n", keep going. */
587 } else {
588 /* Found start of interpreter */
589 break;
590 }
591 }
592
593 if (ihp == &vdata[IMG_SHSIZE]) {
594 /* All whitespace, like "#! " */
595 return ENOEXEC;
596 }
597
598 line_startp = ihp;
599
600 /* Try to find the end of the interpreter+args string */
601 for (; ihp < &vdata[IMG_SHSIZE]; ihp++) {
602 if (IS_EOL(*ihp)) {
603 /* Got it */
604 break;
605 } else {
606 /* Still part of interpreter or args */
607 }
608 }
609
610 if (ihp == &vdata[IMG_SHSIZE]) {
611 /* A long line, like "#! blah blah blah" without end */
612 return ENOEXEC;
613 }
614
615 /* Backtrack until we find the last non-whitespace */
616 while (IS_EOL(*ihp) || IS_WHITESPACE(*ihp)) {
617 ihp--;
618 }
619
620 /* The character after the last non-whitespace is our logical end of line */
621 line_endp = ihp + 1;
622
623 /*
624 * Now we have pointers to the usable part of:
625 *
626 * "#! /usr/bin/int first second third \n"
627 * ^ line_startp ^ line_endp
628 */
629
630 /* copy the interpreter name */
631 interp = imgp->ip_interp_buffer;
632 for (ihp = line_startp; (ihp < line_endp) && !IS_WHITESPACE(*ihp); ihp++) {
633 *interp++ = *ihp;
634 }
635 *interp = '\0';
636
637 exec_reset_save_path(imgp);
638 exec_save_path(imgp, CAST_USER_ADDR_T(imgp->ip_interp_buffer),
639 UIO_SYSSPACE, NULL);
640
641 /* Copy the entire interpreter + args for later processing into argv[] */
642 interp = imgp->ip_interp_buffer;
643 for (ihp = line_startp; (ihp < line_endp); ihp++) {
644 *interp++ = *ihp;
645 }
646 *interp = '\0';
647
648#if CONFIG_SETUID
649 /*
650 * If we have an SUID or SGID script, create a file descriptor
651 * from the vnode and pass /dev/fd/%d instead of the actual
652 * path name so that the script does not get opened twice
653 */
654 if (imgp->ip_origvattr->va_mode & (VSUID | VSGID)) {
655 proc_t p;
656 struct fileproc *fp;
657 int fd;
658 int error;
659
660 p = vfs_context_proc(imgp->ip_vfs_context);
661 error = falloc(p, &fp, &fd, imgp->ip_vfs_context);
662 if (error) {
663 return error;
664 }
665
666 fp->fp_glob->fg_flag = FREAD;
667 fp->fp_glob->fg_ops = &vnops;
668 fp->fp_glob->fg_data = (caddr_t)imgp->ip_vp;
669
670 proc_fdlock(p);
671 procfdtbl_releasefd(p, fd, NULL);
672 fp_drop(p, fd, fp, 1);
673 proc_fdunlock(p);
674 vnode_ref(imgp->ip_vp);
675
676 imgp->ip_interp_sugid_fd = fd;
677 }
678#endif /* CONFIG_SETUID */
679
680 return -3;
681}
682
683
684
685/*
686 * exec_fat_imgact
687 *
688 * Image activator for fat 1.0 binaries. If the binary is fat, then we
689 * need to select an image from it internally, and make that the image
690 * we are going to attempt to execute. At present, this consists of
691 * reloading the first page for the image with a first page from the
692 * offset location indicated by the fat header.
693 *
694 * Parameters; struct image_params * image parameter block
695 *
696 * Returns: -1 not a fat binary (keep looking)
697 * -2 Success: encapsulated binary: reread
698 * >0 Failure: error number
699 *
700 * Important: This image activator is byte order neutral.
701 *
702 * Note: A return value other than -1 indicates subsequent image
703 * activators should not be given the opportunity to attempt
704 * to activate the image.
705 *
706 * If we find an encapsulated binary, we make no assertions
707 * about its validity; instead, we leave that up to a rescan
708 * for an activator to claim it, and, if it is claimed by one,
709 * that activator is responsible for determining validity.
710 */
711static int
712exec_fat_imgact(struct image_params *imgp)
713{
714 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
715 kauth_cred_t cred = kauth_cred_proc_ref(p);
716 struct fat_header *fat_header = (struct fat_header *)imgp->ip_vdata;
717 struct _posix_spawnattr *psa = NULL;
718 struct fat_arch fat_arch;
719 int resid, error;
720 load_return_t lret;
721
722 if (imgp->ip_origcputype != 0) {
723 /* Fat header previously matched, don't allow another fat file inside */
724 error = -1; /* not claimed */
725 goto bad;
726 }
727
728 /* Make sure it's a fat binary */
729 if (OSSwapBigToHostInt32(fat_header->magic) != FAT_MAGIC) {
730 error = -1; /* not claimed */
731 goto bad;
732 }
733
734 /* imgp->ip_vdata has PAGE_SIZE, zerofilled if the file is smaller */
735 lret = fatfile_validate_fatarches((vm_offset_t)fat_header, PAGE_SIZE);
736 if (lret != LOAD_SUCCESS) {
737 error = load_return_to_errno(lret);
738 goto bad;
739 }
740
741 /* If posix_spawn binprefs exist, respect those prefs. */
742 psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
743 if (psa != NULL && psa->psa_binprefs[0] != 0) {
744 uint32_t pr = 0;
745
746 /* Check each preference listed against all arches in header */
747 for (pr = 0; pr < NBINPREFS; pr++) {
748 cpu_type_t pref = psa->psa_binprefs[pr];
749 cpu_type_t subpref = psa->psa_subcpuprefs[pr];
750
751 if (pref == 0) {
752 /* No suitable arch in the pref list */
753 error = EBADARCH;
754 goto bad;
755 }
756
757 if (pref == CPU_TYPE_ANY) {
758 /* Fall through to regular grading */
759 goto regular_grading;
760 }
761
762 lret = fatfile_getbestarch_for_cputype(pref,
763 subpref,
764 (vm_offset_t)fat_header,
765 PAGE_SIZE,
766 imgp,
767 &fat_arch);
768 if (lret == LOAD_SUCCESS) {
769 goto use_arch;
770 }
771 }
772
773 /* Requested binary preference was not honored */
774 error = EBADEXEC;
775 goto bad;
776 }
777
778regular_grading:
779 /* Look up our preferred architecture in the fat file. */
780 lret = fatfile_getbestarch((vm_offset_t)fat_header,
781 PAGE_SIZE,
782 imgp,
783 &fat_arch,
784 (p->p_flag & P_AFFINITY) != 0);
785 if (lret != LOAD_SUCCESS) {
786 error = load_return_to_errno(lret);
787 goto bad;
788 }
789
790use_arch:
791 /* Read the Mach-O header out of fat_arch */
792 error = vn_rdwr(UIO_READ, imgp->ip_vp, imgp->ip_vdata,
793 PAGE_SIZE, fat_arch.offset,
794 UIO_SYSSPACE, (IO_UNIT | IO_NODELOCKED),
795 cred, &resid, p);
796 if (error) {
797 goto bad;
798 }
799
800 if (resid) {
801 memset(imgp->ip_vdata + (PAGE_SIZE - resid), 0x0, resid);
802 }
803
804 /* Success. Indicate we have identified an encapsulated binary */
805 error = -2;
806 imgp->ip_arch_offset = (user_size_t)fat_arch.offset;
807 imgp->ip_arch_size = (user_size_t)fat_arch.size;
808 imgp->ip_origcputype = fat_arch.cputype;
809 imgp->ip_origcpusubtype = fat_arch.cpusubtype;
810
811bad:
812 kauth_cred_unref(&cred);
813 return error;
814}
815
816static int
817activate_exec_state(task_t task, proc_t p, thread_t thread, load_result_t *result)
818{
819 int ret;
820
821 task_set_dyld_info(task, MACH_VM_MIN_ADDRESS, 0);
822 task_set_64bit(task, result->is_64bit_addr, result->is_64bit_data);
823 if (result->is_64bit_addr) {
824 OSBitOrAtomic(P_LP64, &p->p_flag);
825 } else {
826 OSBitAndAtomic(~((uint32_t)P_LP64), &p->p_flag);
827 }
828 task_set_mach_header_address(task, result->mach_header);
829
830 ret = thread_state_initialize(thread);
831 if (ret != KERN_SUCCESS) {
832 return ret;
833 }
834
835 if (result->threadstate) {
836 uint32_t *ts = result->threadstate;
837 uint32_t total_size = (uint32_t)result->threadstate_sz;
838
839 while (total_size > 0) {
840 uint32_t flavor = *ts++;
841 uint32_t size = *ts++;
842
843 ret = thread_setstatus(thread, flavor, (thread_state_t)ts, size);
844 if (ret) {
845 return ret;
846 }
847 ts += size;
848 total_size -= (size + 2) * sizeof(uint32_t);
849 }
850 }
851
852 thread_setentrypoint(thread, result->entry_point);
853
854 return KERN_SUCCESS;
855}
856
857
858/*
859 * Set p->p_comm and p->p_name to the name passed to exec
860 */
861static void
862set_proc_name(struct image_params *imgp, proc_t p)
863{
864 int p_name_len = sizeof(p->p_name) - 1;
865
866 if (imgp->ip_ndp->ni_cnd.cn_namelen > p_name_len) {
867 imgp->ip_ndp->ni_cnd.cn_namelen = p_name_len;
868 }
869
870 bcopy((caddr_t)imgp->ip_ndp->ni_cnd.cn_nameptr, (caddr_t)p->p_name,
871 (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen);
872 p->p_name[imgp->ip_ndp->ni_cnd.cn_namelen] = '\0';
873
874 if (imgp->ip_ndp->ni_cnd.cn_namelen > MAXCOMLEN) {
875 imgp->ip_ndp->ni_cnd.cn_namelen = MAXCOMLEN;
876 }
877
878 bcopy((caddr_t)imgp->ip_ndp->ni_cnd.cn_nameptr, (caddr_t)p->p_comm,
879 (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen);
880 p->p_comm[imgp->ip_ndp->ni_cnd.cn_namelen] = '\0';
881}
882
883#if __has_feature(ptrauth_calls)
884/**
885 * Returns a team ID string that may be used to assign a shared region.
886 *
887 * Platform binaries do not have team IDs and will return NULL. Non-platform
888 * binaries without a team ID will be assigned an artificial team ID of ""
889 * (empty string) so that they will not be assigned to the default shared
890 * region.
891 *
892 * @param imgp image parameter block
893 * @return NULL if this is a platform binary, or an appropriate team ID string
894 * otherwise
895 */
896static inline const char *
897get_teamid_for_shared_region(struct image_params *imgp)
898{
899 assert(imgp->ip_vp != NULL);
900
901 const char *ret = csvnode_get_teamid(imgp->ip_vp, imgp->ip_arch_offset);
902 if (ret) {
903 return ret;
904 }
905
906 struct cs_blob *blob = csvnode_get_blob(imgp->ip_vp, imgp->ip_arch_offset);
907 if (csblob_get_platform_binary(blob)) {
908 return NULL;
909 } else {
910 static const char *NO_TEAM_ID = "";
911 return NO_TEAM_ID;
912 }
913}
914
915/**
916 * Determines whether ptrauth should be enabled for the provided arm64 CPU subtype.
917 *
918 * @param cpusubtype Mach-O style CPU subtype
919 * @return whether the CPU subtype matches arm64e with the current ptrauth ABI
920 */
921static inline bool
922arm64_cpusubtype_uses_ptrauth(cpu_subtype_t cpusubtype)
923{
924 return (cpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E &&
925 CPU_SUBTYPE_ARM64_PTR_AUTH_VERSION(cpusubtype) == CPU_SUBTYPE_ARM64_PTR_AUTH_CURRENT_VERSION;
926}
927
928#endif /* __has_feature(ptrauth_calls) */
929
930/**
931 * Returns whether a type/subtype slice matches the requested
932 * type/subtype.
933 *
934 * @param mask Bits to mask from the requested/tested cpu type
935 * @param req_cpu Requested cpu type
936 * @param req_subcpu Requested cpu subtype
937 * @param test_cpu Tested slice cpu type
938 * @param test_subcpu Tested slice cpu subtype
939 */
940boolean_t
941binary_match(cpu_type_t mask, cpu_type_t req_cpu,
942 cpu_subtype_t req_subcpu, cpu_type_t test_cpu,
943 cpu_subtype_t test_subcpu)
944{
945 if ((test_cpu & ~mask) != (req_cpu & ~mask)) {
946 return FALSE;
947 }
948
949 test_subcpu &= ~CPU_SUBTYPE_MASK;
950 req_subcpu &= ~CPU_SUBTYPE_MASK;
951
952 if (test_subcpu != req_subcpu && req_subcpu != (CPU_SUBTYPE_ANY & ~CPU_SUBTYPE_MASK)) {
953 return FALSE;
954 }
955
956 return TRUE;
957}
958
959
960/*
961 * exec_mach_imgact
962 *
963 * Image activator for mach-o 1.0 binaries.
964 *
965 * Parameters; struct image_params * image parameter block
966 *
967 * Returns: -1 not a fat binary (keep looking)
968 * -2 Success: encapsulated binary: reread
969 * >0 Failure: error number
970 * EBADARCH Mach-o binary, but with an unrecognized
971 * architecture
972 * ENOMEM No memory for child process after -
973 * can only happen after vfork()
974 *
975 * Important: This image activator is NOT byte order neutral.
976 *
977 * Note: A return value other than -1 indicates subsequent image
978 * activators should not be given the opportunity to attempt
979 * to activate the image.
980 *
981 * TODO: More gracefully handle failures after vfork
982 */
983static int
984exec_mach_imgact(struct image_params *imgp)
985{
986 struct mach_header *mach_header = (struct mach_header *)imgp->ip_vdata;
987 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
988 int error = 0;
989 task_t task;
990 task_t new_task = NULL; /* protected by vfexec */
991 thread_t thread;
992 struct uthread *uthread;
993 vm_map_t old_map = VM_MAP_NULL;
994 vm_map_t map = VM_MAP_NULL;
995 load_return_t lret;
996 load_result_t load_result = {};
997 struct _posix_spawnattr *psa = NULL;
998 int spawn = (imgp->ip_flags & IMGPF_SPAWN);
999 int vfexec = (imgp->ip_flags & IMGPF_VFORK_EXEC);
1000 int exec = (imgp->ip_flags & IMGPF_EXEC);
1001 os_reason_t exec_failure_reason = OS_REASON_NULL;
1002 boolean_t reslide = FALSE;
1003
1004 /*
1005 * make sure it's a Mach-O 1.0 or Mach-O 2.0 binary; the difference
1006 * is a reserved field on the end, so for the most part, we can
1007 * treat them as if they were identical. Reverse-endian Mach-O
1008 * binaries are recognized but not compatible.
1009 */
1010 if ((mach_header->magic == MH_CIGAM) ||
1011 (mach_header->magic == MH_CIGAM_64)) {
1012 error = EBADARCH;
1013 goto bad;
1014 }
1015
1016 if ((mach_header->magic != MH_MAGIC) &&
1017 (mach_header->magic != MH_MAGIC_64)) {
1018 error = -1;
1019 goto bad;
1020 }
1021
1022 if (mach_header->filetype != MH_EXECUTE) {
1023 error = -1;
1024 goto bad;
1025 }
1026
1027 if (imgp->ip_origcputype != 0) {
1028 /* Fat header previously had an idea about this thin file */
1029 if (imgp->ip_origcputype != mach_header->cputype ||
1030 imgp->ip_origcpusubtype != mach_header->cpusubtype) {
1031 error = EBADARCH;
1032 goto bad;
1033 }
1034 } else {
1035 imgp->ip_origcputype = mach_header->cputype;
1036 imgp->ip_origcpusubtype = mach_header->cpusubtype;
1037 }
1038
1039 task = current_task();
1040 thread = current_thread();
1041 uthread = get_bsdthread_info(thread);
1042
1043 if ((mach_header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64) {
1044 imgp->ip_flags |= IMGPF_IS_64BIT_ADDR | IMGPF_IS_64BIT_DATA;
1045 }
1046
1047
1048 /* If posix_spawn binprefs exist, respect those prefs. */
1049 psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
1050 if (psa != NULL && psa->psa_binprefs[0] != 0) {
1051 int pr = 0;
1052 for (pr = 0; pr < NBINPREFS; pr++) {
1053 cpu_type_t pref = psa->psa_binprefs[pr];
1054 cpu_subtype_t subpref = psa->psa_subcpuprefs[pr];
1055
1056 if (pref == 0) {
1057 /* No suitable arch in the pref list */
1058 error = EBADARCH;
1059 goto bad;
1060 }
1061
1062 if (pref == CPU_TYPE_ANY) {
1063 /* Jump to regular grading */
1064 goto grade;
1065 }
1066
1067 if (binary_match(CPU_ARCH_MASK, pref, subpref,
1068 imgp->ip_origcputype, imgp->ip_origcpusubtype)) {
1069 goto grade;
1070 }
1071 }
1072 error = EBADARCH;
1073 goto bad;
1074 }
1075grade:
1076 if (!grade_binary(imgp->ip_origcputype, imgp->ip_origcpusubtype & ~CPU_SUBTYPE_MASK,
1077 imgp->ip_origcpusubtype & CPU_SUBTYPE_MASK, TRUE)) {
1078 error = EBADARCH;
1079 goto bad;
1080 }
1081
1082 if (validate_potential_simulator_binary(imgp->ip_origcputype, imgp,
1083 imgp->ip_arch_offset, imgp->ip_arch_size) != LOAD_SUCCESS) {
1084#if __x86_64__
1085 const char *excpath;
1086 error = exec_save_path(imgp, imgp->ip_user_fname, imgp->ip_seg, &excpath);
1087 os_log_error(OS_LOG_DEFAULT, "Unsupported 32-bit executable: \"%s\"", (error) ? imgp->ip_vp->v_name : excpath);
1088#endif
1089 error = EBADARCH;
1090 goto bad;
1091 }
1092
1093#if defined(HAS_APPLE_PAC)
1094 assert(mach_header->cputype == CPU_TYPE_ARM64
1095 );
1096
1097 if ((mach_header->cputype == CPU_TYPE_ARM64 &&
1098 arm64_cpusubtype_uses_ptrauth(mach_header->cpusubtype))
1099 ) {
1100 imgp->ip_flags &= ~IMGPF_NOJOP;
1101 } else {
1102 imgp->ip_flags |= IMGPF_NOJOP;
1103 }
1104#endif
1105
1106 /* Copy in arguments/environment from the old process */
1107 error = exec_extract_strings(imgp);
1108 if (error) {
1109 goto bad;
1110 }
1111
1112 AUDIT_ARG(argv, imgp->ip_startargv, imgp->ip_argc,
1113 imgp->ip_endargv - imgp->ip_startargv);
1114 AUDIT_ARG(envv, imgp->ip_endargv, imgp->ip_envc,
1115 imgp->ip_endenvv - imgp->ip_endargv);
1116
1117 /*
1118 * We are being called to activate an image subsequent to a vfork()
1119 * operation; in this case, we know that our task, thread, and
1120 * uthread are actually those of our parent, and our proc, which we
1121 * obtained indirectly from the image_params vfs_context_t, is the
1122 * new child process.
1123 */
1124 if (vfexec) {
1125 imgp->ip_new_thread = fork_create_child(task,
1126 NULL,
1127 p,
1128 FALSE,
1129 (imgp->ip_flags & IMGPF_IS_64BIT_ADDR),
1130 (imgp->ip_flags & IMGPF_IS_64BIT_DATA),
1131 FALSE);
1132 /* task and thread ref returned, will be released in __mac_execve */
1133 if (imgp->ip_new_thread == NULL) {
1134 error = ENOMEM;
1135 goto bad;
1136 }
1137 }
1138
1139
1140 /* reset local idea of thread, uthread, task */
1141 thread = imgp->ip_new_thread;
1142 uthread = get_bsdthread_info(thread);
1143 task = new_task = get_threadtask(thread);
1144
1145 /*
1146 * Load the Mach-O file.
1147 *
1148 * NOTE: An error after this point indicates we have potentially
1149 * destroyed or overwritten some process state while attempting an
1150 * execve() following a vfork(), which is an unrecoverable condition.
1151 * We send the new process an immediate SIGKILL to avoid it executing
1152 * any instructions in the mutated address space. For true spawns,
1153 * this is not the case, and "too late" is still not too late to
1154 * return an error code to the parent process.
1155 */
1156
1157 /*
1158 * Actually load the image file we previously decided to load.
1159 */
1160 lret = load_machfile(imgp, mach_header, thread, &map, &load_result);
1161 if (lret != LOAD_SUCCESS) {
1162 error = load_return_to_errno(lret);
1163
1164 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1165 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO, 0, 0);
1166 if (lret == LOAD_BADMACHO_UPX) {
1167 set_proc_name(imgp, p);
1168 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_UPX);
1169 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1170 } else {
1171 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO);
1172
1173 if (bootarg_execfailurereports) {
1174 set_proc_name(imgp, p);
1175 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1176 }
1177 }
1178
1179 exec_failure_reason->osr_flags |= OS_REASON_FLAG_CONSISTENT_FAILURE;
1180
1181 goto badtoolate;
1182 }
1183
1184 proc_lock(p);
1185 {
1186 p->p_cputype = imgp->ip_origcputype;
1187 p->p_cpusubtype = imgp->ip_origcpusubtype;
1188 }
1189 p->p_platform = load_result.ip_platform;
1190 p->p_min_sdk = load_result.lr_min_sdk;
1191 p->p_sdk = load_result.lr_sdk;
1192 vm_map_set_user_wire_limit(map, (vm_size_t)proc_limitgetcur(p, RLIMIT_MEMLOCK, FALSE));
1193#if XNU_TARGET_OS_OSX
1194 if (p->p_platform == PLATFORM_IOS) {
1195 assert(vm_map_is_alien(map));
1196 } else {
1197 assert(!vm_map_is_alien(map));
1198 }
1199#endif /* XNU_TARGET_OS_OSX */
1200 proc_unlock(p);
1201
1202 /*
1203 * Set code-signing flags if this binary is signed, or if parent has
1204 * requested them on exec.
1205 */
1206 if (load_result.csflags & CS_VALID) {
1207 imgp->ip_csflags |= load_result.csflags &
1208 (CS_VALID | CS_SIGNED | CS_DEV_CODE | CS_LINKER_SIGNED |
1209 CS_HARD | CS_KILL | CS_RESTRICT | CS_ENFORCEMENT | CS_REQUIRE_LV |
1210 CS_FORCED_LV | CS_ENTITLEMENTS_VALIDATED | CS_DYLD_PLATFORM | CS_RUNTIME |
1211 CS_ENTITLEMENT_FLAGS |
1212 CS_EXEC_SET_HARD | CS_EXEC_SET_KILL | CS_EXEC_SET_ENFORCEMENT);
1213 } else {
1214 imgp->ip_csflags &= ~CS_VALID;
1215 }
1216
1217 if (p->p_csflags & CS_EXEC_SET_HARD) {
1218 imgp->ip_csflags |= CS_HARD;
1219 }
1220 if (p->p_csflags & CS_EXEC_SET_KILL) {
1221 imgp->ip_csflags |= CS_KILL;
1222 }
1223 if (p->p_csflags & CS_EXEC_SET_ENFORCEMENT) {
1224 imgp->ip_csflags |= CS_ENFORCEMENT;
1225 }
1226 if (p->p_csflags & CS_EXEC_INHERIT_SIP) {
1227 if (p->p_csflags & CS_INSTALLER) {
1228 imgp->ip_csflags |= CS_INSTALLER;
1229 }
1230 if (p->p_csflags & CS_DATAVAULT_CONTROLLER) {
1231 imgp->ip_csflags |= CS_DATAVAULT_CONTROLLER;
1232 }
1233 if (p->p_csflags & CS_NVRAM_UNRESTRICTED) {
1234 imgp->ip_csflags |= CS_NVRAM_UNRESTRICTED;
1235 }
1236 }
1237
1238#if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
1239 /*
1240 * ptrauth version 0 is a preview ABI. Developers can opt into running
1241 * their own arm64e binaries for local testing, with the understanding
1242 * that future OSes may break ABI.
1243 */
1244 if ((imgp->ip_origcpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E &&
1245 CPU_SUBTYPE_ARM64_PTR_AUTH_VERSION(imgp->ip_origcpusubtype) == 0 &&
1246 !load_result.platform_binary &&
1247 !bootarg_arm64e_preview_abi) {
1248 static bool logged_once = false;
1249 set_proc_name(imgp, p);
1250
1251 printf("%s: not running binary \"%s\" built against preview arm64e ABI\n", __func__, p->p_name);
1252 if (!os_atomic_xchg(&logged_once, true, relaxed)) {
1253 printf("%s: (to allow this, add \"-arm64e_preview_abi\" to boot-args)\n", __func__);
1254 }
1255
1256 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO);
1257 if (bootarg_execfailurereports) {
1258 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1259 exec_failure_reason->osr_flags |= OS_REASON_FLAG_CONSISTENT_FAILURE;
1260 }
1261 goto badtoolate;
1262 }
1263
1264 if ((imgp->ip_origcpusubtype & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_ARM64E &&
1265 imgp->ip_origcputype == CPU_TYPE_ARM64 &&
1266 load_result.platform_binary &&
1267 (imgp->ip_flags & IMGPF_DRIVER) != 0) {
1268 set_proc_name(imgp, p);
1269 printf("%s: disallowing arm64 platform driverkit binary \"%s\", should be arm64e\n", __func__, p->p_name);
1270 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO);
1271 if (bootarg_execfailurereports) {
1272 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1273 exec_failure_reason->osr_flags |= OS_REASON_FLAG_CONSISTENT_FAILURE;
1274 }
1275 goto badtoolate;
1276 }
1277#endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */
1278
1279 /*
1280 * Set up the shared cache region in the new process.
1281 *
1282 * Normally there is a single shared region per architecture.
1283 * However on systems with Pointer Authentication, we can create
1284 * multiple shared caches with the amount of sharing determined
1285 * by team-id or entitlement. Inherited shared region IDs are used
1286 * for system processes that need to match and be able to inspect
1287 * a pre-existing task.
1288 */
1289 int cpu_subtype = 0; /* all cpu_subtypes use the same shared region */
1290#if __has_feature(ptrauth_calls)
1291 char *shared_region_id = NULL;
1292 size_t len;
1293 char *base;
1294 const char *cbase;
1295#define TEAM_ID_PREFIX "T-"
1296#define ENTITLE_PREFIX "E-"
1297#define SR_PREFIX_LEN 2
1298#define SR_ENTITLEMENT "com.apple.pac.shared_region_id"
1299
1300 if (cpu_type() == CPU_TYPE_ARM64 &&
1301 arm64_cpusubtype_uses_ptrauth(p->p_cpusubtype) &&
1302 (imgp->ip_flags & IMGPF_NOJOP) == 0) {
1303 assertf(p->p_cputype == CPU_TYPE_ARM64,
1304 "p %p cpu_type() 0x%x p->p_cputype 0x%x p->p_cpusubtype 0x%x",
1305 p, cpu_type(), p->p_cputype, p->p_cpusubtype);
1306
1307 /*
1308 * arm64e uses pointer authentication, so request a separate
1309 * shared region for this CPU subtype.
1310 */
1311 cpu_subtype = p->p_cpusubtype & ~CPU_SUBTYPE_MASK;
1312
1313 /*
1314 * Determine which shared cache to select based on being told,
1315 * matching a team-id or matching an entitlement.
1316 */
1317 if (imgp->ip_inherited_shared_region_id) {
1318 len = strlen(imgp->ip_inherited_shared_region_id);
1319 shared_region_id = kheap_alloc(KHEAP_DATA_BUFFERS,
1320 len + 1, Z_WAITOK);
1321 memcpy(shared_region_id, imgp->ip_inherited_shared_region_id, len + 1);
1322 } else if ((cbase = get_teamid_for_shared_region(imgp)) != NULL) {
1323 len = strlen(cbase);
1324 if (vm_shared_region_per_team_id) {
1325 shared_region_id = kheap_alloc(KHEAP_DATA_BUFFERS,
1326 len + SR_PREFIX_LEN + 1, Z_WAITOK);
1327 memcpy(shared_region_id, TEAM_ID_PREFIX, SR_PREFIX_LEN);
1328 memcpy(shared_region_id + SR_PREFIX_LEN, cbase, len + 1);
1329 }
1330 } else if ((base = IOVnodeGetEntitlement(imgp->ip_vp,
1331 (int64_t)imgp->ip_arch_offset, SR_ENTITLEMENT)) != NULL) {
1332 len = strlen(base);
1333 if (vm_shared_region_by_entitlement) {
1334 shared_region_id = kheap_alloc(KHEAP_DATA_BUFFERS,
1335 len + SR_PREFIX_LEN + 1, Z_WAITOK);
1336 memcpy(shared_region_id, ENTITLE_PREFIX, SR_PREFIX_LEN);
1337 memcpy(shared_region_id + SR_PREFIX_LEN, base, len + 1);
1338 }
1339 /* Discard the copy of the entitlement */
1340 kheap_free(KHEAP_DATA_BUFFERS, base, len + 1);
1341 }
1342 }
1343
1344 if (imgp->ip_flags & IMGPF_RESLIDE) {
1345 reslide = TRUE;
1346 }
1347
1348 /* use "" as the default shared_region_id */
1349 if (shared_region_id == NULL) {
1350 shared_region_id = kheap_alloc(KHEAP_DATA_BUFFERS, 1, Z_WAITOK);
1351 *shared_region_id = 0;
1352 }
1353
1354 /* ensure there's a unique pointer signing key for this shared_region_id */
1355 shared_region_key_alloc(shared_region_id,
1356 imgp->ip_inherited_shared_region_id != NULL, imgp->ip_inherited_jop_pid);
1357 task_set_shared_region_id(task, shared_region_id);
1358 shared_region_id = NULL;
1359#endif /* __has_feature(ptrauth_calls) */
1360
1361 int cputype = cpu_type();
1362 vm_map_exec(map, task, load_result.is_64bit_addr, (void *)p->p_fd->fd_rdir, cputype, cpu_subtype, reslide);
1363
1364#if XNU_TARGET_OS_OSX
1365#define SINGLE_JIT_ENTITLEMENT "com.apple.security.cs.single-jit"
1366
1367 if (IOTaskHasEntitlement(task, SINGLE_JIT_ENTITLEMENT)) {
1368 vm_map_single_jit(map);
1369 }
1370#endif /* XNU_TARGET_OS_OSX */
1371
1372 /*
1373 * Close file descriptors which specify close-on-exec.
1374 */
1375 fdexec(p, psa != NULL ? psa->psa_flags : 0, exec);
1376
1377 /*
1378 * deal with set[ug]id.
1379 */
1380 error = exec_handle_sugid(imgp);
1381 if (error) {
1382 vm_map_deallocate(map);
1383
1384 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1385 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_SUGID_FAILURE, 0, 0);
1386
1387 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_SUGID_FAILURE);
1388 if (bootarg_execfailurereports) {
1389 set_proc_name(imgp, p);
1390 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1391 }
1392
1393 goto badtoolate;
1394 }
1395
1396 /*
1397 * Commit to new map.
1398 *
1399 * Swap the new map for the old for target task, which consumes
1400 * our new map reference but each leaves us responsible for the
1401 * old_map reference. That lets us get off the pmap associated
1402 * with it, and then we can release it.
1403 *
1404 * The map needs to be set on the target task which is different
1405 * than current task, thus swap_task_map is used instead of
1406 * vm_map_switch.
1407 */
1408 old_map = swap_task_map(task, thread, map);
1409 vm_map_deallocate(old_map);
1410 old_map = NULL;
1411
1412 lret = activate_exec_state(task, p, thread, &load_result);
1413 if (lret != KERN_SUCCESS) {
1414 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1415 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_ACTV_THREADSTATE, 0, 0);
1416
1417 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_ACTV_THREADSTATE);
1418 if (bootarg_execfailurereports) {
1419 set_proc_name(imgp, p);
1420 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1421 }
1422
1423 goto badtoolate;
1424 }
1425
1426 /*
1427 * deal with voucher on exec-calling thread.
1428 */
1429 if (imgp->ip_new_thread == NULL) {
1430 thread_set_mach_voucher(current_thread(), IPC_VOUCHER_NULL);
1431 }
1432
1433 /* Make sure we won't interrupt ourself signalling a partial process */
1434 if (!vfexec && !spawn && (p->p_lflag & P_LTRACED)) {
1435 psignal(p, SIGTRAP);
1436 }
1437
1438 if (load_result.unixproc &&
1439 create_unix_stack(get_task_map(task),
1440 &load_result,
1441 p) != KERN_SUCCESS) {
1442 error = load_return_to_errno(LOAD_NOSPACE);
1443
1444 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1445 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_STACK_ALLOC, 0, 0);
1446
1447 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_STACK_ALLOC);
1448 if (bootarg_execfailurereports) {
1449 set_proc_name(imgp, p);
1450 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1451 }
1452
1453 goto badtoolate;
1454 }
1455
1456 error = exec_add_apple_strings(imgp, &load_result);
1457 if (error) {
1458 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1459 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_APPLE_STRING_INIT, 0, 0);
1460
1461 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_APPLE_STRING_INIT);
1462 if (bootarg_execfailurereports) {
1463 set_proc_name(imgp, p);
1464 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1465 }
1466 goto badtoolate;
1467 }
1468
1469 /* Switch to target task's map to copy out strings */
1470 old_map = vm_map_switch(get_task_map(task));
1471
1472 if (load_result.unixproc) {
1473 user_addr_t ap;
1474
1475 /*
1476 * Copy the strings area out into the new process address
1477 * space.
1478 */
1479 ap = p->user_stack;
1480 error = exec_copyout_strings(imgp, &ap);
1481 if (error) {
1482 vm_map_switch(old_map);
1483
1484 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1485 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_STRINGS, 0, 0);
1486
1487 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_STRINGS);
1488 if (bootarg_execfailurereports) {
1489 set_proc_name(imgp, p);
1490 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1491 }
1492 goto badtoolate;
1493 }
1494 /* Set the stack */
1495 thread_setuserstack(thread, ap);
1496 }
1497
1498 if (load_result.dynlinker || load_result.is_cambria) {
1499 user_addr_t ap;
1500 int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4;
1501
1502 /* Adjust the stack */
1503 ap = thread_adjuserstack(thread, -new_ptr_size);
1504 error = copyoutptr(load_result.mach_header, ap, new_ptr_size);
1505
1506 if (error) {
1507 vm_map_switch(old_map);
1508
1509 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1510 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_DYNLINKER, 0, 0);
1511
1512 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_DYNLINKER);
1513 if (bootarg_execfailurereports) {
1514 set_proc_name(imgp, p);
1515 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1516 }
1517 goto badtoolate;
1518 }
1519 task_set_dyld_info(task, load_result.all_image_info_addr,
1520 load_result.all_image_info_size);
1521 }
1522
1523
1524 /* Avoid immediate VM faults back into kernel */
1525 exec_prefault_data(p, imgp, &load_result);
1526
1527 vm_map_switch(old_map);
1528
1529 /*
1530 * Reset signal state.
1531 */
1532 execsigs(p, thread);
1533
1534 /*
1535 * need to cancel async IO requests that can be cancelled and wait for those
1536 * already active. MAY BLOCK!
1537 */
1538 _aio_exec( p );
1539
1540#if SYSV_SHM
1541 /* FIXME: Till vmspace inherit is fixed: */
1542 if (!vfexec && p->vm_shm) {
1543 shmexec(p);
1544 }
1545#endif
1546#if SYSV_SEM
1547 /* Clean up the semaphores */
1548 semexit(p);
1549#endif
1550
1551 /*
1552 * Remember file name for accounting.
1553 */
1554 p->p_acflag &= ~AFORK;
1555
1556 set_proc_name(imgp, p);
1557
1558#if CONFIG_SECLUDED_MEMORY
1559 if (secluded_for_apps &&
1560 load_result.platform_binary) {
1561 if (strncmp(p->p_name,
1562 "Camera",
1563 sizeof(p->p_name)) == 0) {
1564 task_set_could_use_secluded_mem(task, TRUE);
1565 } else {
1566 task_set_could_use_secluded_mem(task, FALSE);
1567 }
1568 if (strncmp(p->p_name,
1569 "mediaserverd",
1570 sizeof(p->p_name)) == 0) {
1571 task_set_could_also_use_secluded_mem(task, TRUE);
1572 }
1573 }
1574#endif /* CONFIG_SECLUDED_MEMORY */
1575
1576#if __arm64__
1577 if (load_result.legacy_footprint) {
1578 task_set_legacy_footprint(task);
1579 }
1580#endif /* __arm64__ */
1581
1582 pal_dbg_set_task_name(task);
1583
1584 /*
1585 * The load result will have already been munged by AMFI to include the
1586 * platform binary flag if boot-args dictated it (AMFI will mark anything
1587 * that doesn't go through the upcall path as a platform binary if its
1588 * enforcement is disabled).
1589 */
1590 if (load_result.platform_binary) {
1591 if (cs_debug) {
1592 printf("setting platform binary on task: pid = %d\n", p->p_pid);
1593 }
1594
1595 /*
1596 * We must use 'task' here because the proc's task has not yet been
1597 * switched to the new one.
1598 */
1599 task_set_platform_binary(task, TRUE);
1600 } else {
1601 if (cs_debug) {
1602 printf("clearing platform binary on task: pid = %d\n", p->p_pid);
1603 }
1604
1605 task_set_platform_binary(task, FALSE);
1606 }
1607
1608#if DEVELOPMENT || DEBUG
1609 /*
1610 * Update the pid an proc name for importance base if any
1611 */
1612 task_importance_update_owner_info(task);
1613#endif
1614
1615 memcpy(&p->p_uuid[0], &load_result.uuid[0], sizeof(p->p_uuid));
1616
1617#if CONFIG_DTRACE
1618 dtrace_proc_exec(p);
1619#endif
1620
1621 if (kdebug_enable) {
1622 long args[4] = {};
1623
1624 uintptr_t fsid = 0, fileid = 0;
1625 if (imgp->ip_vattr) {
1626 uint64_t fsid64 = vnode_get_va_fsid(imgp->ip_vattr);
1627 fsid = (uintptr_t)fsid64;
1628 fileid = (uintptr_t)imgp->ip_vattr->va_fileid;
1629 // check for (unexpected) overflow and trace zero in that case
1630 if (fsid != fsid64 || fileid != imgp->ip_vattr->va_fileid) {
1631 fsid = fileid = 0;
1632 }
1633 }
1634 KERNEL_DEBUG_CONSTANT_IST1(TRACE_DATA_EXEC, p->p_pid, fsid, fileid, 0,
1635 (uintptr_t)thread_tid(thread));
1636
1637 /*
1638 * Collect the pathname for tracing
1639 */
1640 kdbg_trace_string(p, &args[0], &args[1], &args[2], &args[3]);
1641 KERNEL_DEBUG_CONSTANT_IST1(TRACE_STRING_EXEC, args[0], args[1],
1642 args[2], args[3], (uintptr_t)thread_tid(thread));
1643 }
1644
1645
1646 /*
1647 * If posix_spawned with the START_SUSPENDED flag, stop the
1648 * process before it runs.
1649 */
1650 if (imgp->ip_px_sa != NULL) {
1651 psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
1652 if (psa->psa_flags & POSIX_SPAWN_START_SUSPENDED) {
1653 proc_lock(p);
1654 p->p_stat = SSTOP;
1655 proc_unlock(p);
1656 (void) task_suspend_internal(task);
1657 }
1658 }
1659
1660 /*
1661 * mark as execed, wakeup the process that vforked (if any) and tell
1662 * it that it now has its own resources back
1663 */
1664 OSBitOrAtomic(P_EXEC, &p->p_flag);
1665 proc_resetregister(p);
1666 if (p->p_pptr && (p->p_lflag & P_LPPWAIT)) {
1667 proc_lock(p);
1668 p->p_lflag &= ~P_LPPWAIT;
1669 proc_unlock(p);
1670 wakeup((caddr_t)p->p_pptr);
1671 }
1672
1673 /*
1674 * Pay for our earlier safety; deliver the delayed signals from
1675 * the incomplete vfexec process now that it's complete.
1676 */
1677 if (vfexec && (p->p_lflag & P_LTRACED)) {
1678 psignal_vfork(p, new_task, thread, SIGTRAP);
1679 }
1680
1681 goto done;
1682
1683badtoolate:
1684 /* Don't allow child process to execute any instructions */
1685 if (!spawn) {
1686 if (vfexec) {
1687 assert(exec_failure_reason != OS_REASON_NULL);
1688 psignal_vfork_with_reason(p, new_task, thread, SIGKILL, exec_failure_reason);
1689 exec_failure_reason = OS_REASON_NULL;
1690 } else {
1691 assert(exec_failure_reason != OS_REASON_NULL);
1692 psignal_with_reason(p, SIGKILL, exec_failure_reason);
1693 exec_failure_reason = OS_REASON_NULL;
1694
1695 if (exec) {
1696 /* Terminate the exec copy task */
1697 task_terminate_internal(task);
1698 }
1699 }
1700
1701 /* We can't stop this system call at this point, so just pretend we succeeded */
1702 error = 0;
1703 } else {
1704 os_reason_free(exec_failure_reason);
1705 exec_failure_reason = OS_REASON_NULL;
1706 }
1707
1708done:
1709 if (load_result.threadstate) {
1710 kfree(load_result.threadstate, load_result.threadstate_sz);
1711 load_result.threadstate = NULL;
1712 }
1713
1714bad:
1715 /* If we hit this, we likely would have leaked an exit reason */
1716 assert(exec_failure_reason == OS_REASON_NULL);
1717 return error;
1718}
1719
1720
1721
1722
1723/*
1724 * Our image activator table; this is the table of the image types we are
1725 * capable of loading. We list them in order of preference to ensure the
1726 * fastest image load speed.
1727 *
1728 * XXX hardcoded, for now; should use linker sets
1729 */
1730struct execsw {
1731 int(*const ex_imgact)(struct image_params *);
1732 const char *ex_name;
1733}const execsw[] = {
1734 { exec_mach_imgact, "Mach-o Binary" },
1735 { exec_fat_imgact, "Fat Binary" },
1736 { exec_shell_imgact, "Interpreter Script" },
1737 { NULL, NULL}
1738};
1739
1740
1741/*
1742 * exec_activate_image
1743 *
1744 * Description: Iterate through the available image activators, and activate
1745 * the image associated with the imgp structure. We start with
1746 * the activator for Mach-o binaries followed by that for Fat binaries
1747 * for Interpreter scripts.
1748 *
1749 * Parameters: struct image_params * Image parameter block
1750 *
1751 * Returns: 0 Success
1752 * EBADEXEC The executable is corrupt/unknown
1753 * execargs_alloc:EINVAL Invalid argument
1754 * execargs_alloc:EACCES Permission denied
1755 * execargs_alloc:EINTR Interrupted function
1756 * execargs_alloc:ENOMEM Not enough space
1757 * exec_save_path:EFAULT Bad address
1758 * exec_save_path:ENAMETOOLONG Filename too long
1759 * exec_check_permissions:EACCES Permission denied
1760 * exec_check_permissions:ENOEXEC Executable file format error
1761 * exec_check_permissions:ETXTBSY Text file busy [misuse of error code]
1762 * exec_check_permissions:???
1763 * namei:???
1764 * vn_rdwr:??? [anything vn_rdwr can return]
1765 * <ex_imgact>:??? [anything an imgact can return]
1766 * EDEADLK Process is being terminated
1767 */
1768static int
1769exec_activate_image(struct image_params *imgp)
1770{
1771 struct nameidata *ndp = NULL;
1772 const char *excpath;
1773 int error;
1774 int resid;
1775 int once = 1; /* save SGUID-ness for interpreted files */
1776 int i;
1777 int itercount = 0;
1778 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
1779
1780 error = execargs_alloc(imgp);
1781 if (error) {
1782 goto bad_notrans;
1783 }
1784
1785 error = exec_save_path(imgp, imgp->ip_user_fname, imgp->ip_seg, &excpath);
1786 if (error) {
1787 goto bad_notrans;
1788 }
1789
1790 /* Use excpath, which contains the copyin-ed exec path */
1791 DTRACE_PROC1(exec, uintptr_t, excpath);
1792
1793 ndp = kheap_alloc(KHEAP_TEMP, sizeof(*ndp), Z_WAITOK | Z_ZERO);
1794 if (ndp == NULL) {
1795 error = ENOMEM;
1796 goto bad_notrans;
1797 }
1798
1799 NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
1800 UIO_SYSSPACE, CAST_USER_ADDR_T(excpath), imgp->ip_vfs_context);
1801
1802again:
1803 error = namei(ndp);
1804 if (error) {
1805 goto bad_notrans;
1806 }
1807 imgp->ip_ndp = ndp; /* successful namei(); call nameidone() later */
1808 imgp->ip_vp = ndp->ni_vp; /* if set, need to vnode_put() at some point */
1809
1810 /*
1811 * Before we start the transition from binary A to binary B, make
1812 * sure another thread hasn't started exiting the process. We grab
1813 * the proc lock to check p_lflag initially, and the transition
1814 * mechanism ensures that the value doesn't change after we release
1815 * the lock.
1816 */
1817 proc_lock(p);
1818 if (p->p_lflag & P_LEXIT) {
1819 error = EDEADLK;
1820 proc_unlock(p);
1821 goto bad_notrans;
1822 }
1823 error = proc_transstart(p, 1, 0);
1824 proc_unlock(p);
1825 if (error) {
1826 goto bad_notrans;
1827 }
1828
1829 error = exec_check_permissions(imgp);
1830 if (error) {
1831 goto bad;
1832 }
1833
1834 /* Copy; avoid invocation of an interpreter overwriting the original */
1835 if (once) {
1836 once = 0;
1837 *imgp->ip_origvattr = *imgp->ip_vattr;
1838 }
1839
1840 error = vn_rdwr(UIO_READ, imgp->ip_vp, imgp->ip_vdata, PAGE_SIZE, 0,
1841 UIO_SYSSPACE, IO_NODELOCKED,
1842 vfs_context_ucred(imgp->ip_vfs_context),
1843 &resid, vfs_context_proc(imgp->ip_vfs_context));
1844 if (error) {
1845 goto bad;
1846 }
1847
1848 if (resid) {
1849 memset(imgp->ip_vdata + (PAGE_SIZE - resid), 0x0, resid);
1850 }
1851
1852encapsulated_binary:
1853 /* Limit the number of iterations we will attempt on each binary */
1854 if (++itercount > EAI_ITERLIMIT) {
1855 error = EBADEXEC;
1856 goto bad;
1857 }
1858 error = -1;
1859 for (i = 0; error == -1 && execsw[i].ex_imgact != NULL; i++) {
1860 error = (*execsw[i].ex_imgact)(imgp);
1861
1862 switch (error) {
1863 /* case -1: not claimed: continue */
1864 case -2: /* Encapsulated binary, imgp->ip_XXX set for next iteration */
1865 goto encapsulated_binary;
1866
1867 case -3: /* Interpreter */
1868#if CONFIG_MACF
1869 /*
1870 * Copy the script label for later use. Note that
1871 * the label can be different when the script is
1872 * actually read by the interpreter.
1873 */
1874 if (imgp->ip_scriptlabelp) {
1875 mac_vnode_label_free(imgp->ip_scriptlabelp);
1876 }
1877 imgp->ip_scriptlabelp = mac_vnode_label_alloc();
1878 if (imgp->ip_scriptlabelp == NULL) {
1879 error = ENOMEM;
1880 break;
1881 }
1882 mac_vnode_label_copy(imgp->ip_vp->v_label,
1883 imgp->ip_scriptlabelp);
1884
1885 /*
1886 * Take a ref of the script vnode for later use.
1887 */
1888 if (imgp->ip_scriptvp) {
1889 vnode_put(imgp->ip_scriptvp);
1890 imgp->ip_scriptvp = NULLVP;
1891 }
1892 if (vnode_getwithref(imgp->ip_vp) == 0) {
1893 imgp->ip_scriptvp = imgp->ip_vp;
1894 }
1895#endif
1896
1897 nameidone(ndp);
1898
1899 vnode_put(imgp->ip_vp);
1900 imgp->ip_vp = NULL; /* already put */
1901 imgp->ip_ndp = NULL; /* already nameidone */
1902
1903 /* Use excpath, which exec_shell_imgact reset to the interpreter */
1904 NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF,
1905 UIO_SYSSPACE, CAST_USER_ADDR_T(excpath), imgp->ip_vfs_context);
1906
1907 proc_transend(p, 0);
1908 goto again;
1909
1910 default:
1911 break;
1912 }
1913 }
1914
1915 if (error == 0) {
1916 if (imgp->ip_flags & IMGPF_INTERPRET && ndp->ni_vp) {
1917 AUDIT_ARG(vnpath, ndp->ni_vp, ARG_VNODE2);
1918 }
1919
1920 /*
1921 * Call out to allow 3rd party notification of exec.
1922 * Ignore result of kauth_authorize_fileop call.
1923 */
1924 if (kauth_authorize_fileop_has_listeners()) {
1925 kauth_authorize_fileop(vfs_context_ucred(imgp->ip_vfs_context),
1926 KAUTH_FILEOP_EXEC,
1927 (uintptr_t)ndp->ni_vp, 0);
1928 }
1929 }
1930bad:
1931 proc_transend(p, 0);
1932
1933bad_notrans:
1934 if (imgp->ip_strings) {
1935 execargs_free(imgp);
1936 }
1937 if (imgp->ip_ndp) {
1938 nameidone(imgp->ip_ndp);
1939 }
1940 kheap_free(KHEAP_TEMP, ndp, sizeof(*ndp));
1941
1942 return error;
1943}
1944
1945/*
1946 * exec_validate_spawnattr_policy
1947 *
1948 * Description: Validates the entitlements required to set the apptype.
1949 *
1950 * Parameters: int psa_apptype posix spawn attribute apptype
1951 *
1952 * Returns: 0 Success
1953 * EPERM Failure
1954 */
1955static errno_t
1956exec_validate_spawnattr_policy(int psa_apptype)
1957{
1958 if ((psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK) != 0) {
1959 int proctype = psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK;
1960 if (proctype == POSIX_SPAWN_PROC_TYPE_DRIVER) {
1961 if (!IOTaskHasEntitlement(current_task(), POSIX_SPAWN_ENTITLEMENT_DRIVER)) {
1962 return EPERM;
1963 }
1964 }
1965 }
1966
1967 return 0;
1968}
1969
1970/*
1971 * exec_handle_spawnattr_policy
1972 *
1973 * Description: Decode and apply the posix_spawn apptype, qos clamp, and watchport ports to the task.
1974 *
1975 * Parameters: proc_t p process to apply attributes to
1976 * int psa_apptype posix spawn attribute apptype
1977 *
1978 * Returns: 0 Success
1979 */
1980static errno_t
1981exec_handle_spawnattr_policy(proc_t p, thread_t thread, int psa_apptype, uint64_t psa_qos_clamp,
1982 task_role_t psa_darwin_role, struct exec_port_actions *port_actions)
1983{
1984 int apptype = TASK_APPTYPE_NONE;
1985 int qos_clamp = THREAD_QOS_UNSPECIFIED;
1986 task_role_t role = TASK_UNSPECIFIED;
1987
1988 if ((psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK) != 0) {
1989 int proctype = psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK;
1990
1991 switch (proctype) {
1992 case POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE:
1993 apptype = TASK_APPTYPE_DAEMON_INTERACTIVE;
1994 break;
1995 case POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD:
1996 apptype = TASK_APPTYPE_DAEMON_STANDARD;
1997 break;
1998 case POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE:
1999 apptype = TASK_APPTYPE_DAEMON_ADAPTIVE;
2000 break;
2001 case POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND:
2002 apptype = TASK_APPTYPE_DAEMON_BACKGROUND;
2003 break;
2004 case POSIX_SPAWN_PROC_TYPE_APP_DEFAULT:
2005 apptype = TASK_APPTYPE_APP_DEFAULT;
2006 break;
2007 case POSIX_SPAWN_PROC_TYPE_DRIVER:
2008 apptype = TASK_APPTYPE_DRIVER;
2009 break;
2010 default:
2011 apptype = TASK_APPTYPE_NONE;
2012 /* TODO: Should an invalid value here fail the spawn? */
2013 break;
2014 }
2015 }
2016
2017 if (psa_qos_clamp != POSIX_SPAWN_PROC_CLAMP_NONE) {
2018 switch (psa_qos_clamp) {
2019 case POSIX_SPAWN_PROC_CLAMP_UTILITY:
2020 qos_clamp = THREAD_QOS_UTILITY;
2021 break;
2022 case POSIX_SPAWN_PROC_CLAMP_BACKGROUND:
2023 qos_clamp = THREAD_QOS_BACKGROUND;
2024 break;
2025 case POSIX_SPAWN_PROC_CLAMP_MAINTENANCE:
2026 qos_clamp = THREAD_QOS_MAINTENANCE;
2027 break;
2028 default:
2029 qos_clamp = THREAD_QOS_UNSPECIFIED;
2030 /* TODO: Should an invalid value here fail the spawn? */
2031 break;
2032 }
2033 }
2034
2035 if (psa_darwin_role != PRIO_DARWIN_ROLE_DEFAULT) {
2036 proc_darwin_role_to_task_role(psa_darwin_role, &role);
2037 }
2038
2039 if (apptype != TASK_APPTYPE_NONE ||
2040 qos_clamp != THREAD_QOS_UNSPECIFIED ||
2041 role != TASK_UNSPECIFIED ||
2042 port_actions->portwatch_count) {
2043 proc_set_task_spawnpolicy(p->task, thread, apptype, qos_clamp, role,
2044 port_actions->portwatch_array, port_actions->portwatch_count);
2045 }
2046
2047 if (port_actions->registered_count) {
2048 if (mach_ports_register(p->task, port_actions->registered_array,
2049 port_actions->registered_count)) {
2050 return EINVAL;
2051 }
2052 /* mach_ports_register() consumed the array */
2053 port_actions->registered_array = NULL;
2054 port_actions->registered_count = 0;
2055 }
2056
2057 return 0;
2058}
2059
2060static void
2061exec_port_actions_destroy(struct exec_port_actions *port_actions)
2062{
2063 if (port_actions->portwatch_array) {
2064 for (uint32_t i = 0; i < port_actions->portwatch_count; i++) {
2065 ipc_port_t port = NULL;
2066 if ((port = port_actions->portwatch_array[i]) != NULL) {
2067 ipc_port_release_send(port);
2068 }
2069 }
2070 kfree(port_actions->portwatch_array,
2071 port_actions->portwatch_count * sizeof(ipc_port_t *));
2072 }
2073
2074 if (port_actions->registered_array) {
2075 for (uint32_t i = 0; i < port_actions->registered_count; i++) {
2076 ipc_port_t port = NULL;
2077 if ((port = port_actions->registered_array[i]) != NULL) {
2078 ipc_port_release_send(port);
2079 }
2080 }
2081 kfree(port_actions->registered_array,
2082 port_actions->registered_count * sizeof(ipc_port_t *));
2083 }
2084}
2085
2086/*
2087 * exec_handle_port_actions
2088 *
2089 * Description: Go through the _posix_port_actions_t contents,
2090 * calling task_set_special_port, task_set_exception_ports
2091 * and/or audit_session_spawnjoin for the current task.
2092 *
2093 * Parameters: struct image_params * Image parameter block
2094 *
2095 * Returns: 0 Success
2096 * EINVAL Failure
2097 * ENOTSUP Illegal posix_spawn attr flag was set
2098 */
2099static errno_t
2100exec_handle_port_actions(struct image_params *imgp,
2101 struct exec_port_actions *actions)
2102{
2103 _posix_spawn_port_actions_t pacts = imgp->ip_px_spa;
2104#if CONFIG_AUDIT
2105 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
2106#endif
2107 _ps_port_action_t *act = NULL;
2108 task_t task = get_threadtask(imgp->ip_new_thread);
2109 ipc_port_t port = NULL;
2110 errno_t ret = 0;
2111 int i, portwatch_i = 0, registered_i = 0;
2112 kern_return_t kr;
2113 boolean_t task_has_watchport_boost = task_has_watchports(current_task());
2114 boolean_t in_exec = (imgp->ip_flags & IMGPF_EXEC);
2115 int ptrauth_task_port_count = 0;
2116 boolean_t suid_cred_specified = FALSE;
2117
2118 for (i = 0; i < pacts->pspa_count; i++) {
2119 act = &pacts->pspa_actions[i];
2120
2121 switch (act->port_type) {
2122 case PSPA_SPECIAL:
2123 case PSPA_EXCEPTION:
2124#if CONFIG_AUDIT
2125 case PSPA_AU_SESSION:
2126#endif
2127 break;
2128 case PSPA_IMP_WATCHPORTS:
2129 if (++actions->portwatch_count > TASK_MAX_WATCHPORT_COUNT) {
2130 ret = EINVAL;
2131 goto done;
2132 }
2133 break;
2134 case PSPA_REGISTERED_PORTS:
2135 if (++actions->registered_count > TASK_PORT_REGISTER_MAX) {
2136 ret = EINVAL;
2137 goto done;
2138 }
2139 break;
2140
2141 case PSPA_PTRAUTH_TASK_PORT:
2142 if (++ptrauth_task_port_count > 1) {
2143 ret = EINVAL;
2144 goto done;
2145 }
2146 break;
2147
2148 case PSPA_SUID_CRED:
2149 /* Only a single suid credential can be specified. */
2150 if (suid_cred_specified) {
2151 ret = EINVAL;
2152 goto done;
2153 }
2154 suid_cred_specified = TRUE;
2155 break;
2156
2157 default:
2158 ret = EINVAL;
2159 goto done;
2160 }
2161 }
2162
2163 if (actions->portwatch_count) {
2164 if (in_exec && task_has_watchport_boost) {
2165 ret = EINVAL;
2166 goto done;
2167 }
2168 actions->portwatch_array =
2169 kalloc(sizeof(ipc_port_t *) * actions->portwatch_count);
2170 if (actions->portwatch_array == NULL) {
2171 ret = ENOMEM;
2172 goto done;
2173 }
2174 bzero(actions->portwatch_array,
2175 sizeof(ipc_port_t *) * actions->portwatch_count);
2176 }
2177
2178 if (actions->registered_count) {
2179 actions->registered_array =
2180 kalloc(sizeof(ipc_port_t *) * actions->registered_count);
2181 if (actions->registered_array == NULL) {
2182 ret = ENOMEM;
2183 goto done;
2184 }
2185 bzero(actions->registered_array,
2186 sizeof(ipc_port_t *) * actions->registered_count);
2187 }
2188
2189 for (i = 0; i < pacts->pspa_count; i++) {
2190 act = &pacts->pspa_actions[i];
2191
2192 if (MACH_PORT_VALID(act->new_port)) {
2193 kr = ipc_object_copyin(get_task_ipcspace(current_task()),
2194 act->new_port, MACH_MSG_TYPE_COPY_SEND,
2195 (ipc_object_t *) &port, 0, NULL, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
2196
2197 if (kr != KERN_SUCCESS) {
2198 ret = EINVAL;
2199 goto done;
2200 }
2201 } else {
2202 /* it's NULL or DEAD */
2203 port = CAST_MACH_NAME_TO_PORT(act->new_port);
2204 }
2205
2206 switch (act->port_type) {
2207 case PSPA_SPECIAL:
2208 kr = task_set_special_port(task, act->which, port);
2209
2210 if (kr != KERN_SUCCESS) {
2211 ret = EINVAL;
2212 }
2213 break;
2214
2215 case PSPA_EXCEPTION:
2216 kr = task_set_exception_ports(task, act->mask, port,
2217 act->behavior, act->flavor);
2218 if (kr != KERN_SUCCESS) {
2219 ret = EINVAL;
2220 }
2221 break;
2222#if CONFIG_AUDIT
2223 case PSPA_AU_SESSION:
2224 ret = audit_session_spawnjoin(p, task, port);
2225 if (ret) {
2226 /* audit_session_spawnjoin() has already dropped the reference in case of error. */
2227 goto done;
2228 }
2229
2230 break;
2231#endif
2232 case PSPA_IMP_WATCHPORTS:
2233 if (actions->portwatch_array) {
2234 /* hold on to this till end of spawn */
2235 actions->portwatch_array[portwatch_i++] = port;
2236 } else {
2237 ipc_port_release_send(port);
2238 }
2239 break;
2240 case PSPA_REGISTERED_PORTS:
2241 /* hold on to this till end of spawn */
2242 actions->registered_array[registered_i++] = port;
2243 break;
2244
2245 case PSPA_PTRAUTH_TASK_PORT:
2246#if defined(HAS_APPLE_PAC)
2247 {
2248 task_t ptr_auth_task = convert_port_to_task(port);
2249
2250 if (ptr_auth_task == TASK_NULL) {
2251 ret = EINVAL;
2252 break;
2253 }
2254
2255 imgp->ip_inherited_shared_region_id =
2256 task_get_vm_shared_region_id_and_jop_pid(ptr_auth_task,
2257 &imgp->ip_inherited_jop_pid);
2258
2259 /* Deallocate task ref returned by convert_port_to_task */
2260 task_deallocate(ptr_auth_task);
2261 }
2262#endif /* HAS_APPLE_PAC */
2263
2264 /* consume the port right in case of success */
2265 ipc_port_release_send(port);
2266 break;
2267
2268 case PSPA_SUID_CRED:
2269 imgp->ip_sc_port = port;
2270 break;
2271
2272 default:
2273 ret = EINVAL;
2274 break;
2275 }
2276
2277 if (ret) {
2278 /* action failed, so release port resources */
2279 ipc_port_release_send(port);
2280 break;
2281 }
2282 }
2283
2284done:
2285 if (0 != ret) {
2286 DTRACE_PROC1(spawn__port__failure, mach_port_name_t, act->new_port);
2287 }
2288 return ret;
2289}
2290
2291/*
2292 * exec_handle_file_actions
2293 *
2294 * Description: Go through the _posix_file_actions_t contents applying the
2295 * open, close, and dup2 operations to the open file table for
2296 * the current process.
2297 *
2298 * Parameters: struct image_params * Image parameter block
2299 *
2300 * Returns: 0 Success
2301 * ???
2302 *
2303 * Note: Actions are applied in the order specified, with the credential
2304 * of the parent process. This is done to permit the parent
2305 * process to utilize POSIX_SPAWN_RESETIDS to drop privilege in
2306 * the child following operations the child may in fact not be
2307 * normally permitted to perform.
2308 */
2309static int
2310exec_handle_file_actions(struct image_params *imgp, short psa_flags)
2311{
2312 int error = 0;
2313 int action;
2314 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
2315 _posix_spawn_file_actions_t px_sfap = imgp->ip_px_sfa;
2316 int ival[2]; /* dummy retval for system calls) */
2317#if CONFIG_AUDIT
2318 struct uthread *uthread = get_bsdthread_info(current_thread());
2319#endif
2320
2321 for (action = 0; action < px_sfap->psfa_act_count; action++) {
2322 _psfa_action_t *psfa = &px_sfap->psfa_act_acts[action];
2323
2324 switch (psfa->psfaa_type) {
2325 case PSFA_OPEN: {
2326 /*
2327 * Open is different, in that it requires the use of
2328 * a path argument, which is normally copied in from
2329 * user space; because of this, we have to support an
2330 * open from kernel space that passes an address space
2331 * context of UIO_SYSSPACE, and casts the address
2332 * argument to a user_addr_t.
2333 */
2334 char *bufp = NULL;
2335 struct vnode_attr *vap;
2336 struct nameidata *ndp;
2337 int mode = psfa->psfaa_openargs.psfao_mode;
2338 int origfd;
2339
2340 bufp = kheap_alloc(KHEAP_TEMP,
2341 sizeof(*vap) + sizeof(*ndp), Z_WAITOK | Z_ZERO);
2342 if (bufp == NULL) {
2343 error = ENOMEM;
2344 break;
2345 }
2346
2347 vap = (struct vnode_attr *) bufp;
2348 ndp = (struct nameidata *) (bufp + sizeof(*vap));
2349
2350 VATTR_INIT(vap);
2351 /* Mask off all but regular access permissions */
2352 mode = ((mode & ~p->p_fd->fd_cmask) & ALLPERMS) & ~S_ISTXT;
2353 VATTR_SET(vap, va_mode, mode & ACCESSPERMS);
2354
2355 AUDIT_SUBCALL_ENTER(OPEN, p, uthread);
2356
2357 NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_SYSSPACE,
2358 CAST_USER_ADDR_T(psfa->psfaa_openargs.psfao_path),
2359 imgp->ip_vfs_context);
2360
2361 error = open1(imgp->ip_vfs_context,
2362 ndp,
2363 psfa->psfaa_openargs.psfao_oflag,
2364 vap,
2365 fileproc_alloc_init, NULL,
2366 &origfd);
2367
2368 kheap_free(KHEAP_TEMP, bufp, sizeof(*vap) + sizeof(*ndp));
2369
2370 AUDIT_SUBCALL_EXIT(uthread, error);
2371
2372 /*
2373 * If there's an error, or we get the right fd by
2374 * accident, then drop out here. This is easier than
2375 * reworking all the open code to preallocate fd
2376 * slots, and internally taking one as an argument.
2377 */
2378 if (error || origfd == psfa->psfaa_filedes) {
2379 break;
2380 }
2381
2382 /*
2383 * If we didn't fall out from an error, we ended up
2384 * with the wrong fd; so now we've got to try to dup2
2385 * it to the right one.
2386 */
2387 AUDIT_SUBCALL_ENTER(DUP2, p, uthread);
2388 error = dup2(p, origfd, psfa->psfaa_filedes, ival);
2389 AUDIT_SUBCALL_EXIT(uthread, error);
2390 if (error) {
2391 break;
2392 }
2393
2394 /*
2395 * Finally, close the original fd.
2396 */
2397 AUDIT_SUBCALL_ENTER(CLOSE, p, uthread);
2398 error = close_nocancel(p, origfd);
2399 AUDIT_SUBCALL_EXIT(uthread, error);
2400 }
2401 break;
2402
2403 case PSFA_DUP2: {
2404 AUDIT_SUBCALL_ENTER(DUP2, p, uthread);
2405 error = dup2(p, psfa->psfaa_filedes,
2406 psfa->psfaa_dup2args.psfad_newfiledes, ival);
2407 AUDIT_SUBCALL_EXIT(uthread, error);
2408 }
2409 break;
2410
2411 case PSFA_FILEPORT_DUP2: {
2412 ipc_port_t port;
2413 kern_return_t kr;
2414 int origfd;
2415
2416 if (!MACH_PORT_VALID(psfa->psfaa_fileport)) {
2417 error = EINVAL;
2418 break;
2419 }
2420
2421 kr = ipc_object_copyin(get_task_ipcspace(current_task()),
2422 psfa->psfaa_fileport, MACH_MSG_TYPE_COPY_SEND,
2423 (ipc_object_t *) &port, 0, NULL, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
2424
2425 if (kr != KERN_SUCCESS) {
2426 error = EINVAL;
2427 break;
2428 }
2429
2430 error = fileport_makefd(p, port, 0, &origfd);
2431
2432 if (IPC_PORT_NULL != port) {
2433 ipc_port_release_send(port);
2434 }
2435
2436 if (error || origfd == psfa->psfaa_dup2args.psfad_newfiledes) {
2437 break;
2438 }
2439
2440 AUDIT_SUBCALL_ENTER(DUP2, p, uthread);
2441 error = dup2(p, origfd,
2442 psfa->psfaa_dup2args.psfad_newfiledes, ival);
2443 AUDIT_SUBCALL_EXIT(uthread, error);
2444 if (error) {
2445 break;
2446 }
2447
2448 AUDIT_SUBCALL_ENTER(CLOSE, p, uthread);
2449 error = close_nocancel(p, origfd);
2450 AUDIT_SUBCALL_EXIT(uthread, error);
2451 }
2452 break;
2453
2454 case PSFA_CLOSE: {
2455 AUDIT_SUBCALL_ENTER(CLOSE, p, uthread);
2456 error = close_nocancel(p, psfa->psfaa_filedes);
2457 AUDIT_SUBCALL_EXIT(uthread, error);
2458 }
2459 break;
2460
2461 case PSFA_INHERIT: {
2462 struct fileproc *fp;
2463
2464 /*
2465 * Check to see if the descriptor exists, and
2466 * ensure it's -not- marked as close-on-exec.
2467 *
2468 * Attempting to "inherit" a guarded fd will
2469 * result in a error.
2470 */
2471
2472 proc_fdlock(p);
2473 if ((fp = fp_get_noref_locked(p, psfa->psfaa_filedes)) == NULL) {
2474 error = EBADF;
2475 } else if (fp_isguarded(fp, 0)) {
2476 error = fp_guard_exception(p, psfa->psfaa_filedes,
2477 fp, kGUARD_EXC_NOCLOEXEC);
2478 } else {
2479 p->p_fd->fd_ofileflags[psfa->psfaa_filedes] &= ~UF_EXCLOSE;
2480 error = 0;
2481 }
2482 proc_fdunlock(p);
2483 }
2484 break;
2485
2486 case PSFA_CHDIR: {
2487 /*
2488 * Chdir is different, in that it requires the use of
2489 * a path argument, which is normally copied in from
2490 * user space; because of this, we have to support a
2491 * chdir from kernel space that passes an address space
2492 * context of UIO_SYSSPACE, and casts the address
2493 * argument to a user_addr_t.
2494 */
2495 struct nameidata *nd;
2496 nd = kheap_alloc(KHEAP_TEMP, sizeof(*nd), Z_WAITOK | Z_ZERO);
2497 if (nd == NULL) {
2498 error = ENOMEM;
2499 break;
2500 }
2501
2502 AUDIT_SUBCALL_ENTER(CHDIR, p, uthread);
2503 NDINIT(nd, LOOKUP, OP_CHDIR, FOLLOW | AUDITVNPATH1, UIO_SYSSPACE,
2504 CAST_USER_ADDR_T(psfa->psfaa_chdirargs.psfac_path),
2505 imgp->ip_vfs_context);
2506
2507 error = chdir_internal(p, imgp->ip_vfs_context, nd, 0);
2508 kheap_free(KHEAP_TEMP, nd, sizeof(*nd));
2509 AUDIT_SUBCALL_EXIT(uthread, error);
2510 }
2511 break;
2512
2513 case PSFA_FCHDIR: {
2514 struct fchdir_args fchdira;
2515
2516 fchdira.fd = psfa->psfaa_filedes;
2517
2518 AUDIT_SUBCALL_ENTER(FCHDIR, p, uthread);
2519 error = fchdir(p, &fchdira, ival);
2520 AUDIT_SUBCALL_EXIT(uthread, error);
2521 }
2522 break;
2523
2524 default:
2525 error = EINVAL;
2526 break;
2527 }
2528
2529 /* All file actions failures are considered fatal, per POSIX */
2530
2531 if (error) {
2532 if (PSFA_OPEN == psfa->psfaa_type) {
2533 DTRACE_PROC1(spawn__open__failure, uintptr_t,
2534 psfa->psfaa_openargs.psfao_path);
2535 } else {
2536 DTRACE_PROC1(spawn__fd__failure, int, psfa->psfaa_filedes);
2537 }
2538 break;
2539 }
2540 }
2541
2542 if (error != 0 || (psa_flags & POSIX_SPAWN_CLOEXEC_DEFAULT) == 0) {
2543 return error;
2544 }
2545
2546 /*
2547 * If POSIX_SPAWN_CLOEXEC_DEFAULT is set, behave (during
2548 * this spawn only) as if "close on exec" is the default
2549 * disposition of all pre-existing file descriptors. In this case,
2550 * the list of file descriptors mentioned in the file actions
2551 * are the only ones that can be inherited, so mark them now.
2552 *
2553 * The actual closing part comes later, in fdexec().
2554 */
2555 proc_fdlock(p);
2556 for (action = 0; action < px_sfap->psfa_act_count; action++) {
2557 _psfa_action_t *psfa = &px_sfap->psfa_act_acts[action];
2558 int fd = psfa->psfaa_filedes;
2559
2560 switch (psfa->psfaa_type) {
2561 case PSFA_DUP2:
2562 case PSFA_FILEPORT_DUP2:
2563 fd = psfa->psfaa_dup2args.psfad_newfiledes;
2564 OS_FALLTHROUGH;
2565 case PSFA_OPEN:
2566 case PSFA_INHERIT:
2567 *fdflags(p, fd) |= UF_INHERIT;
2568 break;
2569
2570 case PSFA_CLOSE:
2571 case PSFA_CHDIR:
2572 case PSFA_FCHDIR:
2573 /*
2574 * Although PSFA_FCHDIR does have a file descriptor, it is not
2575 * *creating* one, thus we do not automatically mark it for
2576 * inheritance under POSIX_SPAWN_CLOEXEC_DEFAULT. A client that
2577 * wishes it to be inherited should use the PSFA_INHERIT action
2578 * explicitly.
2579 */
2580 break;
2581 }
2582 }
2583 proc_fdunlock(p);
2584
2585 return 0;
2586}
2587
2588#if CONFIG_MACF
2589/*
2590 * exec_spawnattr_getmacpolicyinfo
2591 */
2592void *
2593exec_spawnattr_getmacpolicyinfo(const void *macextensions, const char *policyname, size_t *lenp)
2594{
2595 const struct _posix_spawn_mac_policy_extensions *psmx = macextensions;
2596 int i;
2597
2598 if (psmx == NULL) {
2599 return NULL;
2600 }
2601
2602 for (i = 0; i < psmx->psmx_count; i++) {
2603 const _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[i];
2604 if (strncmp(extension->policyname, policyname, sizeof(extension->policyname)) == 0) {
2605 if (lenp != NULL) {
2606 *lenp = (size_t)extension->datalen;
2607 }
2608 return extension->datap;
2609 }
2610 }
2611
2612 if (lenp != NULL) {
2613 *lenp = 0;
2614 }
2615 return NULL;
2616}
2617
2618static void
2619spawn_free_macpolicyinfo(const struct user__posix_spawn_args_desc *px_args,
2620 _posix_spawn_mac_policy_extensions_t psmx, int count)
2621{
2622 if (psmx == NULL) {
2623 return;
2624 }
2625 for (int i = 0; i < count; i++) {
2626 _ps_mac_policy_extension_t *ext = &psmx->psmx_extensions[i];
2627 kheap_free(KHEAP_TEMP, ext->datap, (vm_size_t) ext->datalen);
2628 }
2629 kheap_free(KHEAP_TEMP, psmx, px_args->mac_extensions_size);
2630}
2631
2632static int
2633spawn_copyin_macpolicyinfo(const struct user__posix_spawn_args_desc *px_args,
2634 _posix_spawn_mac_policy_extensions_t *psmxp)
2635{
2636 _posix_spawn_mac_policy_extensions_t psmx = NULL;
2637 int error = 0;
2638 int copycnt = 0;
2639
2640 *psmxp = NULL;
2641
2642 if (px_args->mac_extensions_size < PS_MAC_EXTENSIONS_SIZE(1) ||
2643 px_args->mac_extensions_size > PAGE_SIZE) {
2644 error = EINVAL;
2645 goto bad;
2646 }
2647
2648 psmx = kheap_alloc(KHEAP_TEMP, px_args->mac_extensions_size, Z_WAITOK);
2649 if (psmx == NULL) {
2650 error = ENOMEM;
2651 goto bad;
2652 }
2653
2654 error = copyin(px_args->mac_extensions, psmx, px_args->mac_extensions_size);
2655 if (error) {
2656 goto bad;
2657 }
2658
2659 size_t extsize = PS_MAC_EXTENSIONS_SIZE(psmx->psmx_count);
2660 if (extsize == 0 || extsize > px_args->mac_extensions_size) {
2661 error = EINVAL;
2662 goto bad;
2663 }
2664
2665 for (int i = 0; i < psmx->psmx_count; i++) {
2666 _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[i];
2667 if (extension->datalen == 0 || extension->datalen > PAGE_SIZE) {
2668 error = EINVAL;
2669 goto bad;
2670 }
2671 }
2672
2673 for (copycnt = 0; copycnt < psmx->psmx_count; copycnt++) {
2674 _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[copycnt];
2675 void *data = NULL;
2676
2677#if !__LP64__
2678 if (extension->data > UINT32_MAX) {
2679 goto bad;
2680 }
2681#endif
2682 data = kheap_alloc(KHEAP_TEMP, (vm_size_t) extension->datalen, Z_WAITOK);
2683 if (data == NULL) {
2684 error = ENOMEM;
2685 goto bad;
2686 }
2687 error = copyin((user_addr_t)extension->data, data, (size_t)extension->datalen);
2688 if (error) {
2689 kheap_free(KHEAP_TEMP, data, (vm_size_t) extension->datalen);
2690 error = ENOMEM;
2691 goto bad;
2692 }
2693 extension->datap = data;
2694 }
2695
2696 *psmxp = psmx;
2697 return 0;
2698
2699bad:
2700 spawn_free_macpolicyinfo(px_args, psmx, copycnt);
2701 return error;
2702}
2703#endif /* CONFIG_MACF */
2704
2705#if CONFIG_COALITIONS
2706static inline void
2707spawn_coalitions_release_all(coalition_t coal[COALITION_NUM_TYPES])
2708{
2709 for (int c = 0; c < COALITION_NUM_TYPES; c++) {
2710 if (coal[c]) {
2711 coalition_remove_active(coal[c]);
2712 coalition_release(coal[c]);
2713 }
2714 }
2715}
2716#endif
2717
2718#if CONFIG_PERSONAS
2719static int
2720spawn_validate_persona(struct _posix_spawn_persona_info *px_persona)
2721{
2722 int error = 0;
2723 struct persona *persona = NULL;
2724 int verify = px_persona->pspi_flags & POSIX_SPAWN_PERSONA_FLAGS_VERIFY;
2725
2726 if (!IOTaskHasEntitlement(current_task(), PERSONA_MGMT_ENTITLEMENT)) {
2727 return EPERM;
2728 }
2729
2730 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GROUPS) {
2731 if (px_persona->pspi_ngroups > NGROUPS_MAX) {
2732 return EINVAL;
2733 }
2734 }
2735
2736 persona = persona_lookup(px_persona->pspi_id);
2737 if (!persona) {
2738 error = ESRCH;
2739 goto out;
2740 }
2741
2742 if (verify) {
2743 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_UID) {
2744 if (px_persona->pspi_uid != persona_get_uid(persona)) {
2745 error = EINVAL;
2746 goto out;
2747 }
2748 }
2749 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GID) {
2750 if (px_persona->pspi_gid != persona_get_gid(persona)) {
2751 error = EINVAL;
2752 goto out;
2753 }
2754 }
2755 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GROUPS) {
2756 size_t ngroups = 0;
2757 gid_t groups[NGROUPS_MAX];
2758
2759 if (persona_get_groups(persona, &ngroups, groups,
2760 px_persona->pspi_ngroups) != 0) {
2761 error = EINVAL;
2762 goto out;
2763 }
2764 if (ngroups != px_persona->pspi_ngroups) {
2765 error = EINVAL;
2766 goto out;
2767 }
2768 while (ngroups--) {
2769 if (px_persona->pspi_groups[ngroups] != groups[ngroups]) {
2770 error = EINVAL;
2771 goto out;
2772 }
2773 }
2774 if (px_persona->pspi_gmuid != persona_get_gmuid(persona)) {
2775 error = EINVAL;
2776 goto out;
2777 }
2778 }
2779 }
2780
2781out:
2782 if (persona) {
2783 persona_put(persona);
2784 }
2785
2786 return error;
2787}
2788
2789static int
2790spawn_persona_adopt(proc_t p, struct _posix_spawn_persona_info *px_persona)
2791{
2792 int ret;
2793 kauth_cred_t cred;
2794 struct persona *persona = NULL;
2795 int override = !!(px_persona->pspi_flags & POSIX_SPAWN_PERSONA_FLAGS_OVERRIDE);
2796
2797 if (!override) {
2798 return persona_proc_adopt_id(p, px_persona->pspi_id, NULL);
2799 }
2800
2801 /*
2802 * we want to spawn into the given persona, but we want to override
2803 * the kauth with a different UID/GID combo
2804 */
2805 persona = persona_lookup(px_persona->pspi_id);
2806 if (!persona) {
2807 return ESRCH;
2808 }
2809
2810 cred = persona_get_cred(persona);
2811 if (!cred) {
2812 ret = EINVAL;
2813 goto out;
2814 }
2815
2816 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_UID) {
2817 cred = kauth_cred_setresuid(cred,
2818 px_persona->pspi_uid,
2819 px_persona->pspi_uid,
2820 px_persona->pspi_uid,
2821 KAUTH_UID_NONE);
2822 }
2823
2824 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GID) {
2825 cred = kauth_cred_setresgid(cred,
2826 px_persona->pspi_gid,
2827 px_persona->pspi_gid,
2828 px_persona->pspi_gid);
2829 }
2830
2831 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GROUPS) {
2832 cred = kauth_cred_setgroups(cred,
2833 px_persona->pspi_groups,
2834 px_persona->pspi_ngroups,
2835 px_persona->pspi_gmuid);
2836 }
2837
2838 ret = persona_proc_adopt(p, persona, cred);
2839
2840out:
2841 persona_put(persona);
2842 return ret;
2843}
2844#endif
2845
2846#if __arm64__
2847extern int legacy_footprint_entitlement_mode;
2848static inline void
2849proc_legacy_footprint_entitled(proc_t p, task_t task)
2850{
2851#pragma unused(p)
2852 boolean_t legacy_footprint_entitled;
2853
2854 switch (legacy_footprint_entitlement_mode) {
2855 case LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE:
2856 /* the entitlement is ignored */
2857 break;
2858 case LEGACY_FOOTPRINT_ENTITLEMENT_IOS11_ACCT:
2859 /* the entitlement grants iOS11 legacy accounting */
2860 legacy_footprint_entitled = IOTaskHasEntitlement(task,
2861 "com.apple.private.memory.legacy_footprint");
2862 if (legacy_footprint_entitled) {
2863 task_set_legacy_footprint(task);
2864 }
2865 break;
2866 case LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE:
2867 /* the entitlement grants a footprint limit increase */
2868 legacy_footprint_entitled = IOTaskHasEntitlement(task,
2869 "com.apple.private.memory.legacy_footprint");
2870 if (legacy_footprint_entitled) {
2871 task_set_extra_footprint_limit(task);
2872 }
2873 break;
2874 default:
2875 break;
2876 }
2877}
2878
2879static inline void
2880proc_ios13extended_footprint_entitled(proc_t p, task_t task)
2881{
2882#pragma unused(p)
2883 boolean_t ios13extended_footprint_entitled;
2884
2885 /* the entitlement grants a footprint limit increase */
2886 ios13extended_footprint_entitled = IOTaskHasEntitlement(task,
2887 "com.apple.developer.memory.ios13extended_footprint");
2888 if (ios13extended_footprint_entitled) {
2889 task_set_ios13extended_footprint_limit(task);
2890 }
2891}
2892static inline void
2893proc_increased_memory_limit_entitled(proc_t p, task_t task)
2894{
2895 static const char kIncreasedMemoryLimitEntitlement[] = "com.apple.developer.kernel.increased-memory-limit";
2896 bool entitled = false;
2897
2898 entitled = IOTaskHasEntitlement(task, kIncreasedMemoryLimitEntitlement);
2899 if (entitled) {
2900 memorystatus_act_on_entitled_task_limit(p);
2901 }
2902}
2903
2904/*
2905 * Check for any of the various entitlements that permit a higher
2906 * task footprint limit or alternate accounting and apply them.
2907 */
2908static inline void
2909proc_footprint_entitlement_hacks(proc_t p, task_t task)
2910{
2911 proc_legacy_footprint_entitled(p, task);
2912 proc_ios13extended_footprint_entitled(p, task);
2913 proc_increased_memory_limit_entitled(p, task);
2914}
2915#endif /* __arm64__ */
2916
2917#if CONFIG_MACF
2918/*
2919 * Processes with certain entitlements are granted a jumbo-size VM map.
2920 */
2921static inline void
2922proc_apply_jit_and_jumbo_va_policies(proc_t p, task_t task)
2923{
2924 bool jit_entitled;
2925 jit_entitled = (mac_proc_check_map_anon(p, 0, 0, 0, MAP_JIT, NULL) == 0);
2926 if (jit_entitled || (IOTaskHasEntitlement(task,
2927 "com.apple.developer.kernel.extended-virtual-addressing"))) {
2928 vm_map_set_jumbo(get_task_map(task));
2929 if (jit_entitled) {
2930 vm_map_set_jit_entitled(get_task_map(task));
2931 }
2932 }
2933}
2934#endif /* CONFIG_MACF */
2935
2936/*
2937 * Apply a modification on the proc's kauth cred until it converges.
2938 *
2939 * `update` consumes its argument to return a new kauth cred.
2940 */
2941static void
2942apply_kauth_cred_update(proc_t p,
2943 kauth_cred_t (^update)(kauth_cred_t orig_cred))
2944{
2945 kauth_cred_t my_cred, my_new_cred;
2946
2947 my_cred = kauth_cred_proc_ref(p);
2948 for (;;) {
2949 my_new_cred = update(my_cred);
2950 if (my_cred == my_new_cred) {
2951 kauth_cred_unref(&my_new_cred);
2952 break;
2953 }
2954
2955 /* try update cred on proc */
2956 proc_ucred_lock(p);
2957
2958 if (p->p_ucred == my_cred) {
2959 /* base pointer didn't change, donate our ref */
2960 p->p_ucred = my_new_cred;
2961 PROC_UPDATE_CREDS_ONPROC(p);
2962 proc_ucred_unlock(p);
2963
2964 /* drop p->p_ucred reference */
2965 kauth_cred_unref(&my_cred);
2966 break;
2967 }
2968
2969 /* base pointer changed, retry */
2970 my_cred = p->p_ucred;
2971 kauth_cred_ref(my_cred);
2972 proc_ucred_unlock(p);
2973
2974 kauth_cred_unref(&my_new_cred);
2975 }
2976}
2977
2978static int
2979spawn_posix_cred_adopt(proc_t p,
2980 struct _posix_spawn_posix_cred_info *px_pcred_info)
2981{
2982 int error = 0;
2983
2984 if (px_pcred_info->pspci_flags & POSIX_SPAWN_POSIX_CRED_GID) {
2985 struct setgid_args args = {
2986 .gid = px_pcred_info->pspci_gid,
2987 };
2988 error = setgid(p, &args, NULL);
2989 if (error) {
2990 return error;
2991 }
2992 }
2993
2994 if (px_pcred_info->pspci_flags & POSIX_SPAWN_POSIX_CRED_GROUPS) {
2995 error = setgroups_internal(p,
2996 px_pcred_info->pspci_ngroups,
2997 px_pcred_info->pspci_groups,
2998 px_pcred_info->pspci_gmuid);
2999 if (error) {
3000 return error;
3001 }
3002 }
3003
3004 if (px_pcred_info->pspci_flags & POSIX_SPAWN_POSIX_CRED_UID) {
3005 struct setuid_args args = {
3006 .uid = px_pcred_info->pspci_uid,
3007 };
3008 error = setuid(p, &args, NULL);
3009 if (error) {
3010 return error;
3011 }
3012 }
3013 return 0;
3014}
3015
3016/*
3017 * posix_spawn
3018 *
3019 * Parameters: uap->pid Pointer to pid return area
3020 * uap->fname File name to exec
3021 * uap->argp Argument list
3022 * uap->envp Environment list
3023 *
3024 * Returns: 0 Success
3025 * EINVAL Invalid argument
3026 * ENOTSUP Not supported
3027 * ENOEXEC Executable file format error
3028 * exec_activate_image:EINVAL Invalid argument
3029 * exec_activate_image:EACCES Permission denied
3030 * exec_activate_image:EINTR Interrupted function
3031 * exec_activate_image:ENOMEM Not enough space
3032 * exec_activate_image:EFAULT Bad address
3033 * exec_activate_image:ENAMETOOLONG Filename too long
3034 * exec_activate_image:ENOEXEC Executable file format error
3035 * exec_activate_image:ETXTBSY Text file busy [misuse of error code]
3036 * exec_activate_image:EAUTH Image decryption failed
3037 * exec_activate_image:EBADEXEC The executable is corrupt/unknown
3038 * exec_activate_image:???
3039 * mac_execve_enter:???
3040 *
3041 * TODO: Expect to need __mac_posix_spawn() at some point...
3042 * Handle posix_spawnattr_t
3043 * Handle posix_spawn_file_actions_t
3044 */
3045int
3046posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval)
3047{
3048 proc_t p = ap; /* quiet bogus GCC vfork() warning */
3049 user_addr_t pid = uap->pid;
3050 int ival[2]; /* dummy retval for setpgid() */
3051 char *bufp = NULL;
3052 char *subsystem_root_path = NULL;
3053 struct image_params *imgp;
3054 struct vnode_attr *vap;
3055 struct vnode_attr *origvap;
3056 struct uthread *uthread = 0; /* compiler complains if not set to 0*/
3057 int error, sig;
3058 int is_64 = IS_64BIT_PROCESS(p);
3059 struct vfs_context context;
3060 struct user__posix_spawn_args_desc px_args;
3061 struct _posix_spawnattr px_sa;
3062 _posix_spawn_file_actions_t px_sfap = NULL;
3063 _posix_spawn_port_actions_t px_spap = NULL;
3064 struct __kern_sigaction vec;
3065 boolean_t spawn_no_exec = FALSE;
3066 boolean_t proc_transit_set = TRUE;
3067 boolean_t exec_done = FALSE;
3068 struct exec_port_actions port_actions = { };
3069 vm_size_t px_sa_offset = offsetof(struct _posix_spawnattr, psa_ports);
3070 task_t old_task = current_task();
3071 task_t new_task = NULL;
3072 boolean_t should_release_proc_ref = FALSE;
3073 void *inherit = NULL;
3074#if CONFIG_PERSONAS
3075 struct _posix_spawn_persona_info *px_persona = NULL;
3076#endif
3077 struct _posix_spawn_posix_cred_info *px_pcred_info = NULL;
3078
3079 /*
3080 * Allocate a big chunk for locals instead of using stack since these
3081 * structures are pretty big.
3082 */
3083 bufp = kheap_alloc(KHEAP_TEMP,
3084 sizeof(*imgp) + sizeof(*vap) + sizeof(*origvap), Z_WAITOK | Z_ZERO);
3085 imgp = (struct image_params *) bufp;
3086 if (bufp == NULL) {
3087 error = ENOMEM;
3088 goto bad;
3089 }
3090 vap = (struct vnode_attr *) (bufp + sizeof(*imgp));
3091 origvap = (struct vnode_attr *) (bufp + sizeof(*imgp) + sizeof(*vap));
3092
3093 /* Initialize the common data in the image_params structure */
3094 imgp->ip_user_fname = uap->path;
3095 imgp->ip_user_argv = uap->argv;
3096 imgp->ip_user_envv = uap->envp;
3097 imgp->ip_vattr = vap;
3098 imgp->ip_origvattr = origvap;
3099 imgp->ip_vfs_context = &context;
3100 imgp->ip_flags = (is_64 ? IMGPF_WAS_64BIT_ADDR : IMGPF_NONE);
3101 imgp->ip_seg = (is_64 ? UIO_USERSPACE64 : UIO_USERSPACE32);
3102 imgp->ip_mac_return = 0;
3103 imgp->ip_px_persona = NULL;
3104 imgp->ip_px_pcred_info = NULL;
3105 imgp->ip_cs_error = OS_REASON_NULL;
3106 imgp->ip_simulator_binary = IMGPF_SB_DEFAULT;
3107 imgp->ip_subsystem_root_path = NULL;
3108 imgp->ip_inherited_shared_region_id = NULL;
3109 imgp->ip_inherited_jop_pid = 0;
3110
3111 if (uap->adesc != USER_ADDR_NULL) {
3112 if (is_64) {
3113 error = copyin(uap->adesc, &px_args, sizeof(px_args));
3114 } else {
3115 struct user32__posix_spawn_args_desc px_args32;
3116
3117 error = copyin(uap->adesc, &px_args32, sizeof(px_args32));
3118
3119 /*
3120 * Convert arguments descriptor from external 32 bit
3121 * representation to internal 64 bit representation
3122 */
3123 px_args.attr_size = px_args32.attr_size;
3124 px_args.attrp = CAST_USER_ADDR_T(px_args32.attrp);
3125 px_args.file_actions_size = px_args32.file_actions_size;
3126 px_args.file_actions = CAST_USER_ADDR_T(px_args32.file_actions);
3127 px_args.port_actions_size = px_args32.port_actions_size;
3128 px_args.port_actions = CAST_USER_ADDR_T(px_args32.port_actions);
3129 px_args.mac_extensions_size = px_args32.mac_extensions_size;
3130 px_args.mac_extensions = CAST_USER_ADDR_T(px_args32.mac_extensions);
3131 px_args.coal_info_size = px_args32.coal_info_size;
3132 px_args.coal_info = CAST_USER_ADDR_T(px_args32.coal_info);
3133 px_args.persona_info_size = px_args32.persona_info_size;
3134 px_args.persona_info = CAST_USER_ADDR_T(px_args32.persona_info);
3135 px_args.posix_cred_info_size = px_args32.posix_cred_info_size;
3136 px_args.posix_cred_info = CAST_USER_ADDR_T(px_args32.posix_cred_info);
3137 px_args.subsystem_root_path_size = px_args32.subsystem_root_path_size;
3138 px_args.subsystem_root_path = CAST_USER_ADDR_T(px_args32.subsystem_root_path);
3139 }
3140 if (error) {
3141 goto bad;
3142 }
3143
3144 if (px_args.attr_size != 0) {
3145 /*
3146 * We are not copying the port_actions pointer,
3147 * because we already have it from px_args.
3148 * This is a bit fragile: <rdar://problem/16427422>
3149 */
3150
3151 if ((error = copyin(px_args.attrp, &px_sa, px_sa_offset)) != 0) {
3152 goto bad;
3153 }
3154
3155 bzero((void *)((unsigned long) &px_sa + px_sa_offset), sizeof(px_sa) - px_sa_offset );
3156
3157 imgp->ip_px_sa = &px_sa;
3158 }
3159 if (px_args.file_actions_size != 0) {
3160 /* Limit file_actions to allowed number of open files */
3161 rlim_t maxfa = (p->p_limit ? MIN(proc_limitgetcur(p, RLIMIT_NOFILE, TRUE), maxfilesperproc) : NOFILE);
3162 size_t maxfa_size = PSF_ACTIONS_SIZE(maxfa);
3163 if (px_args.file_actions_size < PSF_ACTIONS_SIZE(1) ||
3164 maxfa_size == 0 || px_args.file_actions_size > maxfa_size) {
3165 error = EINVAL;
3166 goto bad;
3167 }
3168
3169 px_sfap = kheap_alloc(KHEAP_TEMP,
3170 px_args.file_actions_size, Z_WAITOK);
3171 if (px_sfap == NULL) {
3172 error = ENOMEM;
3173 goto bad;
3174 }
3175 imgp->ip_px_sfa = px_sfap;
3176
3177 if ((error = copyin(px_args.file_actions, px_sfap,
3178 px_args.file_actions_size)) != 0) {
3179 goto bad;
3180 }
3181
3182 /* Verify that the action count matches the struct size */
3183 size_t psfsize = PSF_ACTIONS_SIZE(px_sfap->psfa_act_count);
3184 if (psfsize == 0 || psfsize != px_args.file_actions_size) {
3185 error = EINVAL;
3186 goto bad;
3187 }
3188 }
3189 if (px_args.port_actions_size != 0) {
3190 /* Limit port_actions to one page of data */
3191 if (px_args.port_actions_size < PS_PORT_ACTIONS_SIZE(1) ||
3192 px_args.port_actions_size > PAGE_SIZE) {
3193 error = EINVAL;
3194 goto bad;
3195 }
3196
3197 px_spap = kheap_alloc(KHEAP_TEMP,
3198 px_args.port_actions_size, Z_WAITOK);
3199 if (px_spap == NULL) {
3200 error = ENOMEM;
3201 goto bad;
3202 }
3203 imgp->ip_px_spa = px_spap;
3204
3205 if ((error = copyin(px_args.port_actions, px_spap,
3206 px_args.port_actions_size)) != 0) {
3207 goto bad;
3208 }
3209
3210 /* Verify that the action count matches the struct size */
3211 size_t pasize = PS_PORT_ACTIONS_SIZE(px_spap->pspa_count);
3212 if (pasize == 0 || pasize != px_args.port_actions_size) {
3213 error = EINVAL;
3214 goto bad;
3215 }
3216 }
3217#if CONFIG_PERSONAS
3218 /* copy in the persona info */
3219 if (px_args.persona_info_size != 0 && px_args.persona_info != 0) {
3220 /* for now, we need the exact same struct in user space */
3221 if (px_args.persona_info_size != sizeof(*px_persona)) {
3222 error = ERANGE;
3223 goto bad;
3224 }
3225
3226 px_persona = kheap_alloc(KHEAP_TEMP,
3227 px_args.persona_info_size, Z_WAITOK);
3228 if (px_persona == NULL) {
3229 error = ENOMEM;
3230 goto bad;
3231 }
3232 imgp->ip_px_persona = px_persona;
3233
3234 if ((error = copyin(px_args.persona_info, px_persona,
3235 px_args.persona_info_size)) != 0) {
3236 goto bad;
3237 }
3238 if ((error = spawn_validate_persona(px_persona)) != 0) {
3239 goto bad;
3240 }
3241 }
3242#endif
3243 /* copy in the posix cred info */
3244 if (px_args.posix_cred_info_size != 0 && px_args.posix_cred_info != 0) {
3245 /* for now, we need the exact same struct in user space */
3246 if (px_args.posix_cred_info_size != sizeof(*px_pcred_info)) {
3247 error = ERANGE;
3248 goto bad;
3249 }
3250
3251 if (!kauth_cred_issuser(kauth_cred_get())) {
3252 error = EPERM;
3253 goto bad;
3254 }
3255
3256 px_pcred_info = kheap_alloc(KHEAP_TEMP,
3257 px_args.posix_cred_info_size, Z_WAITOK);
3258 if (px_pcred_info == NULL) {
3259 error = ENOMEM;
3260 goto bad;
3261 }
3262 imgp->ip_px_pcred_info = px_pcred_info;
3263
3264 if ((error = copyin(px_args.posix_cred_info, px_pcred_info,
3265 px_args.posix_cred_info_size)) != 0) {
3266 goto bad;
3267 }
3268
3269 if (px_pcred_info->pspci_flags & POSIX_SPAWN_POSIX_CRED_GROUPS) {
3270 if (px_pcred_info->pspci_ngroups > NGROUPS_MAX) {
3271 error = EINVAL;
3272 goto bad;
3273 }
3274 }
3275 }
3276#if CONFIG_MACF
3277 if (px_args.mac_extensions_size != 0) {
3278 if ((error = spawn_copyin_macpolicyinfo(&px_args, (_posix_spawn_mac_policy_extensions_t *)&imgp->ip_px_smpx)) != 0) {
3279 goto bad;
3280 }
3281 }
3282#endif /* CONFIG_MACF */
3283 if ((px_args.subsystem_root_path_size > 0) && (px_args.subsystem_root_path_size <= MAXPATHLEN)) {
3284 /*
3285 * If a valid-looking subsystem root has been
3286 * specified...
3287 */
3288 if (IOTaskHasEntitlement(old_task, SPAWN_SUBSYSTEM_ROOT_ENTITLEMENT)) {
3289 /*
3290 * ...AND the parent has the entitlement, copy
3291 * the subsystem root path in.
3292 */
3293 subsystem_root_path = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
3294
3295 if (subsystem_root_path == NULL) {
3296 error = ENOMEM;
3297 goto bad;
3298 }
3299
3300 if ((error = copyin(px_args.subsystem_root_path, subsystem_root_path, px_args.subsystem_root_path_size))) {
3301 goto bad;
3302 }
3303
3304 /* Paranoia */
3305 subsystem_root_path[px_args.subsystem_root_path_size - 1] = 0;
3306 }
3307 }
3308 }
3309
3310 /* set uthread to parent */
3311 uthread = get_bsdthread_info(current_thread());
3312
3313 /*
3314 * <rdar://6640530>; this does not result in a behaviour change
3315 * relative to Leopard, so there should not be any existing code
3316 * which depends on it.
3317 */
3318 if (uthread->uu_flag & UT_VFORK) {
3319 error = EINVAL;
3320 goto bad;
3321 }
3322
3323 if (imgp->ip_px_sa != NULL) {
3324 struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
3325 if ((psa->psa_options & PSA_OPTION_PLUGIN_HOST_DISABLE_A_KEYS) == PSA_OPTION_PLUGIN_HOST_DISABLE_A_KEYS) {
3326 imgp->ip_flags |= IMGPF_PLUGIN_HOST_DISABLE_A_KEYS;
3327 }
3328
3329 if ((error = exec_validate_spawnattr_policy(psa->psa_apptype)) != 0) {
3330 goto bad;
3331 }
3332 }
3333
3334 /*
3335 * If we don't have the extension flag that turns "posix_spawn()"
3336 * into "execve() with options", then we will be creating a new
3337 * process which does not inherit memory from the parent process,
3338 * which is one of the most expensive things about using fork()
3339 * and execve().
3340 */
3341 if (imgp->ip_px_sa == NULL || !(px_sa.psa_flags & POSIX_SPAWN_SETEXEC)) {
3342 /* Set the new task's coalition, if it is requested. */
3343 coalition_t coal[COALITION_NUM_TYPES] = { COALITION_NULL };
3344#if CONFIG_COALITIONS
3345 int i, ncoals;
3346 kern_return_t kr = KERN_SUCCESS;
3347 struct _posix_spawn_coalition_info coal_info;
3348 int coal_role[COALITION_NUM_TYPES];
3349
3350 if (imgp->ip_px_sa == NULL || !px_args.coal_info) {
3351 goto do_fork1;
3352 }
3353
3354 memset(&coal_info, 0, sizeof(coal_info));
3355
3356 if (px_args.coal_info_size > sizeof(coal_info)) {
3357 px_args.coal_info_size = sizeof(coal_info);
3358 }
3359 error = copyin(px_args.coal_info,
3360 &coal_info, px_args.coal_info_size);
3361 if (error != 0) {
3362 goto bad;
3363 }
3364
3365 ncoals = 0;
3366 for (i = 0; i < COALITION_NUM_TYPES; i++) {
3367 uint64_t cid = coal_info.psci_info[i].psci_id;
3368 if (cid != 0) {
3369 /*
3370 * don't allow tasks which are not in a
3371 * privileged coalition to spawn processes
3372 * into coalitions other than their own
3373 */
3374 if (!task_is_in_privileged_coalition(p->task, i) &&
3375 !IOTaskHasEntitlement(p->task, COALITION_SPAWN_ENTITLEMENT)) {
3376 coal_dbg("ERROR: %d not in privilegd "
3377 "coalition of type %d",
3378 p->p_pid, i);
3379 spawn_coalitions_release_all(coal);
3380 error = EPERM;
3381 goto bad;
3382 }
3383
3384 coal_dbg("searching for coalition id:%llu", cid);
3385 /*
3386 * take a reference and activation on the
3387 * coalition to guard against free-while-spawn
3388 * races
3389 */
3390 coal[i] = coalition_find_and_activate_by_id(cid);
3391 if (coal[i] == COALITION_NULL) {
3392 coal_dbg("could not find coalition id:%llu "
3393 "(perhaps it has been terminated or reaped)", cid);
3394 /*
3395 * release any other coalition's we
3396 * may have a reference to
3397 */
3398 spawn_coalitions_release_all(coal);
3399 error = ESRCH;
3400 goto bad;
3401 }
3402 if (coalition_type(coal[i]) != i) {
3403 coal_dbg("coalition with id:%lld is not of type:%d"
3404 " (it's type:%d)", cid, i, coalition_type(coal[i]));
3405 error = ESRCH;
3406 goto bad;
3407 }
3408 coal_role[i] = coal_info.psci_info[i].psci_role;
3409 ncoals++;
3410 }
3411 }
3412 if (ncoals < COALITION_NUM_TYPES) {
3413 /*
3414 * If the user is attempting to spawn into a subset of
3415 * the known coalition types, then make sure they have
3416 * _at_least_ specified a resource coalition. If not,
3417 * the following fork1() call will implicitly force an
3418 * inheritance from 'p' and won't actually spawn the
3419 * new task into the coalitions the user specified.
3420 * (also the call to coalitions_set_roles will panic)
3421 */
3422 if (coal[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
3423 spawn_coalitions_release_all(coal);
3424 error = EINVAL;
3425 goto bad;
3426 }
3427 }
3428do_fork1:
3429#endif /* CONFIG_COALITIONS */
3430
3431 /*
3432 * note that this will implicitly inherit the
3433 * caller's persona (if it exists)
3434 */
3435 error = fork1(p, &imgp->ip_new_thread, PROC_CREATE_SPAWN, coal);
3436 /* returns a thread and task reference */
3437
3438 if (error == 0) {
3439 new_task = get_threadtask(imgp->ip_new_thread);
3440 }
3441#if CONFIG_COALITIONS
3442 /* set the roles of this task within each given coalition */
3443 if (error == 0) {
3444 kr = coalitions_set_roles(coal, new_task, coal_role);
3445 if (kr != KERN_SUCCESS) {
3446 error = EINVAL;
3447 }
3448 if (kdebug_debugid_enabled(MACHDBG_CODE(DBG_MACH_COALITION,
3449 MACH_COALITION_ADOPT))) {
3450 for (i = 0; i < COALITION_NUM_TYPES; i++) {
3451 if (coal[i] != COALITION_NULL) {
3452 /*
3453 * On 32-bit targets, uniqueid
3454 * will get truncated to 32 bits
3455 */
3456 KDBG_RELEASE(MACHDBG_CODE(
3457 DBG_MACH_COALITION,
3458 MACH_COALITION_ADOPT),
3459 coalition_id(coal[i]),
3460 get_task_uniqueid(new_task));
3461 }
3462 }
3463 }
3464 }
3465
3466 /* drop our references and activations - fork1() now holds them */
3467 spawn_coalitions_release_all(coal);
3468#endif /* CONFIG_COALITIONS */
3469 if (error != 0) {
3470 goto bad;
3471 }
3472 imgp->ip_flags |= IMGPF_SPAWN; /* spawn w/o exec */
3473 spawn_no_exec = TRUE; /* used in later tests */
3474 } else {
3475 /*
3476 * For execve case, create a new task and thread
3477 * which points to current_proc. The current_proc will point
3478 * to the new task after image activation and proc ref drain.
3479 *
3480 * proc (current_proc) <----- old_task (current_task)
3481 * ^ | ^
3482 * | | |
3483 * | ----------------------------------
3484 * |
3485 * --------- new_task (task marked as TF_EXEC_COPY)
3486 *
3487 * After image activation, the proc will point to the new task
3488 * and would look like following.
3489 *
3490 * proc (current_proc) <----- old_task (current_task, marked as TPF_DID_EXEC)
3491 * ^ |
3492 * | |
3493 * | ----------> new_task
3494 * | |
3495 * -----------------
3496 *
3497 * During exec any transition from new_task -> proc is fine, but don't allow
3498 * transition from proc->task, since it will modify old_task.
3499 */
3500 imgp->ip_new_thread = fork_create_child(old_task,
3501 NULL,
3502 p,
3503 FALSE,
3504 p->p_flag & P_LP64,
3505 task_get_64bit_data(old_task),
3506 TRUE);
3507 /* task and thread ref returned by fork_create_child */
3508 if (imgp->ip_new_thread == NULL) {
3509 error = ENOMEM;
3510 goto bad;
3511 }
3512
3513 new_task = get_threadtask(imgp->ip_new_thread);
3514 imgp->ip_flags |= IMGPF_EXEC;
3515 }
3516
3517 if (spawn_no_exec) {
3518 p = (proc_t)get_bsdthreadtask_info(imgp->ip_new_thread);
3519
3520 /*
3521 * We had to wait until this point before firing the
3522 * proc:::create probe, otherwise p would not point to the
3523 * child process.
3524 */
3525 DTRACE_PROC1(create, proc_t, p);
3526 }
3527 assert(p != NULL);
3528
3529 if (subsystem_root_path) {
3530 /* If a subsystem root was specified, swap it in */
3531 char * old_subsystem_root_path = p->p_subsystem_root_path;
3532 p->p_subsystem_root_path = subsystem_root_path;
3533 subsystem_root_path = old_subsystem_root_path;
3534 }
3535
3536 /* We'll need the subsystem root for setting up Apple strings */
3537 imgp->ip_subsystem_root_path = p->p_subsystem_root_path;
3538
3539 context.vc_thread = imgp->ip_new_thread;
3540 context.vc_ucred = p->p_ucred; /* XXX must NOT be kauth_cred_get() */
3541
3542 /*
3543 * Post fdcopy(), pre exec_handle_sugid() - this is where we want
3544 * to handle the file_actions. Since vfork() also ends up setting
3545 * us into the parent process group, and saved off the signal flags,
3546 * this is also where we want to handle the spawn flags.
3547 */
3548
3549 /* Has spawn file actions? */
3550 if (imgp->ip_px_sfa != NULL) {
3551 /*
3552 * The POSIX_SPAWN_CLOEXEC_DEFAULT flag
3553 * is handled in exec_handle_file_actions().
3554 */
3555#if CONFIG_AUDIT
3556 /*
3557 * The file actions auditing can overwrite the upath of
3558 * AUE_POSIX_SPAWN audit record. Save the audit record.
3559 */
3560 struct kaudit_record *save_uu_ar = uthread->uu_ar;
3561 uthread->uu_ar = NULL;
3562#endif
3563 error = exec_handle_file_actions(imgp,
3564 imgp->ip_px_sa != NULL ? px_sa.psa_flags : 0);
3565#if CONFIG_AUDIT
3566 /* Restore the AUE_POSIX_SPAWN audit record. */
3567 uthread->uu_ar = save_uu_ar;
3568#endif
3569 if (error != 0) {
3570 goto bad;
3571 }
3572 }
3573
3574 /* Has spawn port actions? */
3575 if (imgp->ip_px_spa != NULL) {
3576#if CONFIG_AUDIT
3577 /*
3578 * Do the same for the port actions as we did for the file
3579 * actions. Save the AUE_POSIX_SPAWN audit record.
3580 */
3581 struct kaudit_record *save_uu_ar = uthread->uu_ar;
3582 uthread->uu_ar = NULL;
3583#endif
3584 error = exec_handle_port_actions(imgp, &port_actions);
3585#if CONFIG_AUDIT
3586 /* Restore the AUE_POSIX_SPAWN audit record. */
3587 uthread->uu_ar = save_uu_ar;
3588#endif
3589 if (error != 0) {
3590 goto bad;
3591 }
3592 }
3593
3594 /* Has spawn attr? */
3595 if (imgp->ip_px_sa != NULL) {
3596 /*
3597 * Reset UID/GID to parent's RUID/RGID; This works only
3598 * because the operation occurs *after* the vfork() and
3599 * before the call to exec_handle_sugid() by the image
3600 * activator called from exec_activate_image(). POSIX
3601 * requires that any setuid/setgid bits on the process
3602 * image will take precedence over the spawn attributes
3603 * (re)setting them.
3604 *
3605 * Modifications to p_ucred must be guarded using the
3606 * proc's ucred lock. This prevents others from accessing
3607 * a garbage credential.
3608 */
3609 if (px_sa.psa_flags & POSIX_SPAWN_RESETIDS) {
3610 apply_kauth_cred_update(p, ^kauth_cred_t (kauth_cred_t my_cred){
3611 return kauth_cred_setuidgid(my_cred,
3612 kauth_cred_getruid(my_cred),
3613 kauth_cred_getrgid(my_cred));
3614 });
3615 }
3616
3617 if (imgp->ip_px_pcred_info) {
3618 if (!spawn_no_exec) {
3619 error = ENOTSUP;
3620 goto bad;
3621 }
3622
3623 error = spawn_posix_cred_adopt(p, imgp->ip_px_pcred_info);
3624 if (error != 0) {
3625 goto bad;
3626 }
3627 }
3628
3629#if CONFIG_PERSONAS
3630 if (imgp->ip_px_persona != NULL) {
3631 if (!spawn_no_exec) {
3632 error = ENOTSUP;
3633 goto bad;
3634 }
3635
3636 /*
3637 * If we were asked to spawn a process into a new persona,
3638 * do the credential switch now (which may override the UID/GID
3639 * inherit done just above). It's important to do this switch
3640 * before image activation both for reasons stated above, and
3641 * to ensure that the new persona has access to the image/file
3642 * being executed.
3643 */
3644 error = spawn_persona_adopt(p, imgp->ip_px_persona);
3645 if (error != 0) {
3646 goto bad;
3647 }
3648 }
3649#endif /* CONFIG_PERSONAS */
3650#if !SECURE_KERNEL
3651 /*
3652 * Disable ASLR for the spawned process.
3653 *
3654 * But only do so if we are not embedded + RELEASE.
3655 * While embedded allows for a boot-arg (-disable_aslr)
3656 * to deal with this (which itself is only honored on
3657 * DEVELOPMENT or DEBUG builds of xnu), it is often
3658 * useful or necessary to disable ASLR on a per-process
3659 * basis for unit testing and debugging.
3660 */
3661 if (px_sa.psa_flags & _POSIX_SPAWN_DISABLE_ASLR) {
3662 OSBitOrAtomic(P_DISABLE_ASLR, &p->p_flag);
3663 }
3664#endif /* !SECURE_KERNEL */
3665
3666 /* Randomize high bits of ASLR slide */
3667 if (px_sa.psa_flags & _POSIX_SPAWN_HIGH_BITS_ASLR) {
3668 imgp->ip_flags |= IMGPF_HIGH_BITS_ASLR;
3669 }
3670
3671#if !SECURE_KERNEL
3672 /*
3673 * Forcibly disallow execution from data pages for the spawned process
3674 * even if it would otherwise be permitted by the architecture default.
3675 */
3676 if (px_sa.psa_flags & _POSIX_SPAWN_ALLOW_DATA_EXEC) {
3677 imgp->ip_flags |= IMGPF_ALLOW_DATA_EXEC;
3678 }
3679#endif /* !SECURE_KERNEL */
3680
3681#if __has_feature(ptrauth_calls)
3682 if (vm_shared_region_reslide_aslr && is_64 && (px_sa.psa_flags & _POSIX_SPAWN_RESLIDE)) {
3683 imgp->ip_flags |= IMGPF_RESLIDE;
3684 }
3685#endif /* __has_feature(ptrauth_calls) */
3686
3687 if ((px_sa.psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK) ==
3688 POSIX_SPAWN_PROC_TYPE_DRIVER) {
3689 imgp->ip_flags |= IMGPF_DRIVER;
3690 }
3691 }
3692
3693 /*
3694 * Disable ASLR during image activation. This occurs either if the
3695 * _POSIX_SPAWN_DISABLE_ASLR attribute was found above or if
3696 * P_DISABLE_ASLR was inherited from the parent process.
3697 */
3698 if (p->p_flag & P_DISABLE_ASLR) {
3699 imgp->ip_flags |= IMGPF_DISABLE_ASLR;
3700 }
3701
3702 /*
3703 * Clear transition flag so we won't hang if exec_activate_image() causes
3704 * an automount (and launchd does a proc sysctl to service it).
3705 *
3706 * <rdar://problem/6848672>, <rdar://problem/5959568>.
3707 */
3708 if (spawn_no_exec) {
3709 proc_transend(p, 0);
3710 proc_transit_set = 0;
3711 }
3712
3713#if MAC_SPAWN /* XXX */
3714 if (uap->mac_p != USER_ADDR_NULL) {
3715 error = mac_execve_enter(uap->mac_p, imgp);
3716 if (error) {
3717 goto bad;
3718 }
3719 }
3720#endif
3721
3722 /*
3723 * Activate the image
3724 */
3725 error = exec_activate_image(imgp);
3726#if defined(HAS_APPLE_PAC)
3727 ml_task_set_jop_pid_from_shared_region(new_task);
3728 ml_task_set_disable_user_jop(new_task, imgp->ip_flags & IMGPF_NOJOP ? TRUE : FALSE);
3729 ml_thread_set_disable_user_jop(imgp->ip_new_thread, imgp->ip_flags & IMGPF_NOJOP ? TRUE : FALSE);
3730 ml_thread_set_jop_pid(imgp->ip_new_thread, new_task);
3731#endif
3732
3733 if (error == 0 && !spawn_no_exec) {
3734 p = proc_exec_switch_task(p, old_task, new_task, imgp->ip_new_thread, &inherit);
3735 /* proc ref returned */
3736 should_release_proc_ref = TRUE;
3737 }
3738
3739 if (error == 0) {
3740 /* process completed the exec */
3741 exec_done = TRUE;
3742 } else if (error == -1) {
3743 /* Image not claimed by any activator? */
3744 error = ENOEXEC;
3745 }
3746
3747 if (!error && imgp->ip_px_sa != NULL) {
3748 thread_t child_thread = imgp->ip_new_thread;
3749 uthread_t child_uthread = get_bsdthread_info(child_thread);
3750
3751 /*
3752 * Because of POSIX_SPAWN_SETEXEC, we need to handle this after image
3753 * activation, else when image activation fails (before the point of no
3754 * return) would leave the parent process in a modified state.
3755 */
3756 if (px_sa.psa_flags & POSIX_SPAWN_SETPGROUP) {
3757 struct setpgid_args spga;
3758 spga.pid = p->p_pid;
3759 spga.pgid = px_sa.psa_pgroup;
3760 /*
3761 * Effectively, call setpgid() system call; works
3762 * because there are no pointer arguments.
3763 */
3764 if ((error = setpgid(p, &spga, ival)) != 0) {
3765 goto bad;
3766 }
3767 }
3768
3769 if (px_sa.psa_flags & POSIX_SPAWN_SETSID) {
3770 error = setsid_internal(p);
3771 if (error != 0) {
3772 goto bad;
3773 }
3774 }
3775
3776 /*
3777 * If we have a spawn attr, and it contains signal related flags,
3778 * the we need to process them in the "context" of the new child
3779 * process, so we have to process it following image activation,
3780 * prior to making the thread runnable in user space. This is
3781 * necessitated by some signal information being per-thread rather
3782 * than per-process, and we don't have the new allocation in hand
3783 * until after the image is activated.
3784 */
3785
3786 /*
3787 * Mask a list of signals, instead of them being unmasked, if
3788 * they were unmasked in the parent; note that some signals
3789 * are not maskable.
3790 */
3791 if (px_sa.psa_flags & POSIX_SPAWN_SETSIGMASK) {
3792 child_uthread->uu_sigmask = (px_sa.psa_sigmask & ~sigcantmask);
3793 }
3794 /*
3795 * Default a list of signals instead of ignoring them, if
3796 * they were ignored in the parent. Note that we pass
3797 * spawn_no_exec to setsigvec() to indicate that we called
3798 * fork1() and therefore do not need to call proc_signalstart()
3799 * internally.
3800 */
3801 if (px_sa.psa_flags & POSIX_SPAWN_SETSIGDEF) {
3802 vec.sa_handler = SIG_DFL;
3803 vec.sa_tramp = 0;
3804 vec.sa_mask = 0;
3805 vec.sa_flags = 0;
3806 for (sig = 1; sig < NSIG; sig++) {
3807 if (px_sa.psa_sigdefault & (1 << (sig - 1))) {
3808 error = setsigvec(p, child_thread, sig, &vec, spawn_no_exec);
3809 }
3810 }
3811 }
3812
3813 /*
3814 * Activate the CPU usage monitor, if requested. This is done via a task-wide, per-thread CPU
3815 * usage limit, which will generate a resource exceeded exception if any one thread exceeds the
3816 * limit.
3817 *
3818 * Userland gives us interval in seconds, and the kernel SPI expects nanoseconds.
3819 */
3820 if ((px_sa.psa_cpumonitor_percent != 0) && (px_sa.psa_cpumonitor_percent < UINT8_MAX)) {
3821 /*
3822 * Always treat a CPU monitor activation coming from spawn as entitled. Requiring
3823 * an entitlement to configure the monitor a certain way seems silly, since
3824 * whomever is turning it on could just as easily choose not to do so.
3825 */
3826 error = proc_set_task_ruse_cpu(p->task,
3827 TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC,
3828 (uint8_t)px_sa.psa_cpumonitor_percent,
3829 px_sa.psa_cpumonitor_interval * NSEC_PER_SEC,
3830 0, TRUE);
3831 }
3832
3833
3834 if (px_pcred_info &&
3835 (px_pcred_info->pspci_flags & POSIX_SPAWN_POSIX_CRED_LOGIN)) {
3836 /*
3837 * setlogin() must happen after setsid()
3838 */
3839 setlogin_internal(p, px_pcred_info->pspci_login);
3840 }
3841 }
3842
3843bad:
3844
3845 if (error == 0) {
3846 /* reset delay idle sleep status if set */
3847#if CONFIG_DELAY_IDLE_SLEEP
3848 if ((p->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP) {
3849 OSBitAndAtomic(~((uint32_t)P_DELAYIDLESLEEP), &p->p_flag);
3850 }
3851#endif /* CONFIG_DELAY_IDLE_SLEEP */
3852 /* upon successful spawn, re/set the proc control state */
3853 if (imgp->ip_px_sa != NULL) {
3854 switch (px_sa.psa_pcontrol) {
3855 case POSIX_SPAWN_PCONTROL_THROTTLE:
3856 p->p_pcaction = P_PCTHROTTLE;
3857 break;
3858 case POSIX_SPAWN_PCONTROL_SUSPEND:
3859 p->p_pcaction = P_PCSUSP;
3860 break;
3861 case POSIX_SPAWN_PCONTROL_KILL:
3862 p->p_pcaction = P_PCKILL;
3863 break;
3864 case POSIX_SPAWN_PCONTROL_NONE:
3865 default:
3866 p->p_pcaction = 0;
3867 break;
3868 }
3869 ;
3870 }
3871 exec_resettextvp(p, imgp);
3872
3873#if CONFIG_MEMORYSTATUS
3874 /* Set jetsam priority for DriverKit processes */
3875 if (px_sa.psa_apptype == POSIX_SPAWN_PROC_TYPE_DRIVER) {
3876 px_sa.psa_priority = JETSAM_PRIORITY_DRIVER_APPLE;
3877 }
3878
3879 /* Has jetsam attributes? */
3880 if (imgp->ip_px_sa != NULL && (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_SET)) {
3881 /*
3882 * With 2-level high-water-mark support, POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND is no
3883 * longer relevant, as background limits are described via the inactive limit slots.
3884 *
3885 * That said, however, if the POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND is passed in,
3886 * we attempt to mimic previous behavior by forcing the BG limit data into the
3887 * inactive/non-fatal mode and force the active slots to hold system_wide/fatal mode.
3888 */
3889
3890 if (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND) {
3891 memorystatus_update(p, px_sa.psa_priority, 0, FALSE, /* assertion priority */
3892 (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY),
3893 TRUE,
3894 -1, TRUE,
3895 px_sa.psa_memlimit_inactive, FALSE);
3896 } else {
3897 memorystatus_update(p, px_sa.psa_priority, 0, FALSE, /* assertion priority */
3898 (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY),
3899 TRUE,
3900 px_sa.psa_memlimit_active,
3901 (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_MEMLIMIT_ACTIVE_FATAL),
3902 px_sa.psa_memlimit_inactive,
3903 (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_MEMLIMIT_INACTIVE_FATAL));
3904 }
3905 }
3906
3907 /* Has jetsam relaunch behavior? */
3908 if (imgp->ip_px_sa != NULL && (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_MASK)) {
3909 /*
3910 * Launchd has passed in data indicating the behavior of this process in response to jetsam.
3911 * This data would be used by the jetsam subsystem to determine the position and protection
3912 * offered to this process on dirty -> clean transitions.
3913 */
3914 int relaunch_flags = P_MEMSTAT_RELAUNCH_UNKNOWN;
3915 switch (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_MASK) {
3916 case POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_LOW:
3917 relaunch_flags = P_MEMSTAT_RELAUNCH_LOW;
3918 break;
3919 case POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_MED:
3920 relaunch_flags = P_MEMSTAT_RELAUNCH_MED;
3921 break;
3922 case POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_HIGH:
3923 relaunch_flags = P_MEMSTAT_RELAUNCH_HIGH;
3924 break;
3925 default:
3926 break;
3927 }
3928 memorystatus_relaunch_flags_update(p, relaunch_flags);
3929 }
3930
3931#endif /* CONFIG_MEMORYSTATUS */
3932 if (imgp->ip_px_sa != NULL && px_sa.psa_thread_limit > 0) {
3933 task_set_thread_limit(new_task, (uint16_t)px_sa.psa_thread_limit);
3934 }
3935
3936 /* Disable wakeup monitoring for DriverKit processes */
3937 if (px_sa.psa_apptype == POSIX_SPAWN_PROC_TYPE_DRIVER) {
3938 uint32_t flags = WAKEMON_DISABLE;
3939 task_wakeups_monitor_ctl(new_task, &flags, NULL);
3940 }
3941 }
3942
3943 /*
3944 * If we successfully called fork1(), we always need to do this;
3945 * we identify this case by noting the IMGPF_SPAWN flag. This is
3946 * because we come back from that call with signals blocked in the
3947 * child, and we have to unblock them, but we want to wait until
3948 * after we've performed any spawn actions. This has to happen
3949 * before check_for_signature(), which uses psignal.
3950 */
3951 if (spawn_no_exec) {
3952 if (proc_transit_set) {
3953 proc_transend(p, 0);
3954 }
3955
3956 /*
3957 * Drop the signal lock on the child which was taken on our
3958 * behalf by forkproc()/cloneproc() to prevent signals being
3959 * received by the child in a partially constructed state.
3960 */
3961 proc_signalend(p, 0);
3962 }
3963
3964 if (error == 0) {
3965 /*
3966 * We need to initialize the bank context behind the protection of
3967 * the proc_trans lock to prevent a race with exit. We can't do this during
3968 * exec_activate_image because task_bank_init checks entitlements that
3969 * aren't loaded until subsequent calls (including exec_resettextvp).
3970 */
3971 error = proc_transstart(p, 0, 0);
3972
3973 if (error == 0) {
3974 task_bank_init(new_task);
3975 proc_transend(p, 0);
3976 }
3977
3978#if __arm64__
3979 proc_footprint_entitlement_hacks(p, new_task);
3980#endif /* __arm64__ */
3981
3982#if __has_feature(ptrauth_calls)
3983 task_set_pac_exception_fatal_flag(new_task);
3984#endif /* __has_feature(ptrauth_calls) */
3985 }
3986
3987 /* Inherit task role from old task to new task for exec */
3988 if (error == 0 && !spawn_no_exec) {
3989 proc_inherit_task_role(new_task, old_task);
3990 }
3991
3992#if CONFIG_ARCADE
3993 if (error == 0) {
3994 /*
3995 * Check to see if we need to trigger an arcade upcall AST now
3996 * that the vnode has been reset on the task.
3997 */
3998 arcade_prepare(new_task, imgp->ip_new_thread);
3999 }
4000#endif /* CONFIG_ARCADE */
4001
4002 /* Clear the initial wait on the thread before handling spawn policy */
4003 if (imgp && imgp->ip_new_thread) {
4004 task_clear_return_wait(get_threadtask(imgp->ip_new_thread), TCRW_CLEAR_INITIAL_WAIT);
4005 }
4006
4007 /*
4008 * Apply the spawnattr policy, apptype (which primes the task for importance donation),
4009 * and bind any portwatch ports to the new task.
4010 * This must be done after the exec so that the child's thread is ready,
4011 * and after the in transit state has been released, because priority is
4012 * dropped here so we need to be prepared for a potentially long preemption interval
4013 *
4014 * TODO: Consider splitting this up into separate phases
4015 */
4016 if (error == 0 && imgp->ip_px_sa != NULL) {
4017 struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
4018
4019 error = exec_handle_spawnattr_policy(p, imgp->ip_new_thread, psa->psa_apptype, psa->psa_qos_clamp,
4020 psa->psa_darwin_role, &port_actions);
4021 }
4022
4023 /* Transfer the turnstile watchport boost to new task if in exec */
4024 if (error == 0 && !spawn_no_exec) {
4025 task_transfer_turnstile_watchports(old_task, new_task, imgp->ip_new_thread);
4026 }
4027
4028 /*
4029 * Apply the requested maximum address.
4030 */
4031 if (error == 0 && imgp->ip_px_sa != NULL) {
4032 struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
4033
4034 if (psa->psa_max_addr) {
4035 vm_map_set_max_addr(get_task_map(new_task), (vm_map_offset_t)psa->psa_max_addr);
4036 }
4037 }
4038
4039 if (error == 0 && imgp->ip_px_sa != NULL) {
4040 struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
4041
4042 if (psa->psa_no_smt) {
4043 task_set_no_smt(new_task);
4044 }
4045 if (psa->psa_tecs) {
4046 task_set_tecs(new_task);
4047 }
4048 }
4049
4050 if (error == 0) {
4051 /* Apply the main thread qos */
4052 thread_t main_thread = imgp->ip_new_thread;
4053 task_set_main_thread_qos(new_task, main_thread);
4054
4055#if CONFIG_MACF
4056 proc_apply_jit_and_jumbo_va_policies(p, new_task);
4057#endif /* CONFIG_MACF */
4058 }
4059
4060 /*
4061 * Release any ports we kept around for binding to the new task
4062 * We need to release the rights even if the posix_spawn has failed.
4063 */
4064 if (imgp->ip_px_spa != NULL) {
4065 exec_port_actions_destroy(&port_actions);
4066 }
4067
4068 /*
4069 * We have to delay operations which might throw a signal until after
4070 * the signals have been unblocked; however, we want that to happen
4071 * after exec_resettextvp() so that the textvp is correct when they
4072 * fire.
4073 */
4074 if (error == 0) {
4075 error = check_for_signature(p, imgp);
4076
4077 /*
4078 * Pay for our earlier safety; deliver the delayed signals from
4079 * the incomplete spawn process now that it's complete.
4080 */
4081 if (imgp != NULL && spawn_no_exec && (p->p_lflag & P_LTRACED)) {
4082 psignal_vfork(p, p->task, imgp->ip_new_thread, SIGTRAP);
4083 }
4084
4085 if (error == 0 && !spawn_no_exec) {
4086 KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXEC),
4087 p->p_pid);
4088 }
4089 }
4090
4091 if (spawn_no_exec) {
4092 /* flag the 'fork' has occurred */
4093 proc_knote(p->p_pptr, NOTE_FORK | p->p_pid);
4094 }
4095
4096 /* flag exec has occurred, notify only if it has not failed due to FP Key error */
4097 if (!error && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0)) {
4098 proc_knote(p, NOTE_EXEC);
4099 }
4100
4101 if (imgp != NULL) {
4102 if (imgp->ip_vp) {
4103 vnode_put(imgp->ip_vp);
4104 }
4105 if (imgp->ip_scriptvp) {
4106 vnode_put(imgp->ip_scriptvp);
4107 }
4108 if (imgp->ip_strings) {
4109 execargs_free(imgp);
4110 }
4111 kheap_free(KHEAP_TEMP, imgp->ip_px_sfa,
4112 px_args.file_actions_size);
4113 kheap_free(KHEAP_TEMP, imgp->ip_px_spa,
4114 px_args.port_actions_size);
4115#if CONFIG_PERSONAS
4116 kheap_free(KHEAP_TEMP, imgp->ip_px_persona,
4117 px_args.persona_info_size);
4118#endif
4119 kheap_free(KHEAP_TEMP, imgp->ip_px_pcred_info,
4120 px_args.posix_cred_info_size);
4121
4122 if (subsystem_root_path != NULL) {
4123 zfree(ZV_NAMEI, subsystem_root_path);
4124 }
4125#if CONFIG_MACF
4126 _posix_spawn_mac_policy_extensions_t psmx = imgp->ip_px_smpx;
4127 if (psmx) {
4128 spawn_free_macpolicyinfo(&px_args,
4129 psmx, psmx->psmx_count);
4130 }
4131 if (imgp->ip_execlabelp) {
4132 mac_cred_label_free(imgp->ip_execlabelp);
4133 }
4134 if (imgp->ip_scriptlabelp) {
4135 mac_vnode_label_free(imgp->ip_scriptlabelp);
4136 }
4137 if (imgp->ip_cs_error != OS_REASON_NULL) {
4138 os_reason_free(imgp->ip_cs_error);
4139 imgp->ip_cs_error = OS_REASON_NULL;
4140 }
4141 if (imgp->ip_inherited_shared_region_id != NULL) {
4142 kheap_free(KHEAP_DATA_BUFFERS, imgp->ip_inherited_shared_region_id,
4143 strlen(imgp->ip_inherited_shared_region_id) + 1);
4144 imgp->ip_inherited_shared_region_id = NULL;
4145 }
4146#endif
4147 if (imgp->ip_sc_port != NULL) {
4148 ipc_port_release_send(imgp->ip_sc_port);
4149 imgp->ip_sc_port = NULL;
4150 }
4151 }
4152
4153#if CONFIG_DTRACE
4154 if (spawn_no_exec) {
4155 /*
4156 * In the original DTrace reference implementation,
4157 * posix_spawn() was a libc routine that just
4158 * did vfork(2) then exec(2). Thus the proc::: probes
4159 * are very fork/exec oriented. The details of this
4160 * in-kernel implementation of posix_spawn() is different
4161 * (while producing the same process-observable effects)
4162 * particularly w.r.t. errors, and which thread/process
4163 * is constructing what on behalf of whom.
4164 */
4165 if (error) {
4166 DTRACE_PROC1(spawn__failure, int, error);
4167 } else {
4168 DTRACE_PROC(spawn__success);
4169 /*
4170 * Some DTrace scripts, e.g. newproc.d in
4171 * /usr/bin, rely on the the 'exec-success'
4172 * probe being fired in the child after the
4173 * new process image has been constructed
4174 * in order to determine the associated pid.
4175 *
4176 * So, even though the parent built the image
4177 * here, for compatibility, mark the new thread
4178 * so 'exec-success' fires on it as it leaves
4179 * the kernel.
4180 */
4181 dtrace_thread_didexec(imgp->ip_new_thread);
4182 }
4183 } else {
4184 if (error) {
4185 DTRACE_PROC1(exec__failure, int, error);
4186 } else {
4187 dtrace_thread_didexec(imgp->ip_new_thread);
4188 }
4189 }
4190
4191 if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL) {
4192 (*dtrace_proc_waitfor_hook)(p);
4193 }
4194#endif
4195
4196#if CONFIG_AUDIT
4197 if (!error && AUDIT_ENABLED() && p) {
4198 /* Add the CDHash of the new process to the audit record */
4199 uint8_t *cdhash = cs_get_cdhash(p);
4200 if (cdhash) {
4201 AUDIT_ARG(data, cdhash, sizeof(uint8_t), CS_CDHASH_LEN);
4202 }
4203 }
4204#endif
4205
4206 /*
4207 * clear bsd_info from old task if it did exec.
4208 */
4209 if (task_did_exec(old_task)) {
4210 set_bsdtask_info(old_task, NULL);
4211 }
4212
4213 /* clear bsd_info from new task and terminate it if exec failed */
4214 if (new_task != NULL && task_is_exec_copy(new_task)) {
4215 set_bsdtask_info(new_task, NULL);
4216 task_terminate_internal(new_task);
4217 }
4218
4219 /* Return to both the parent and the child? */
4220 if (imgp != NULL && spawn_no_exec) {
4221 /*
4222 * If the parent wants the pid, copy it out
4223 */
4224 if (pid != USER_ADDR_NULL) {
4225 _Static_assert(sizeof(p->p_pid) == 4, "posix_spawn() assumes a 32-bit pid_t");
4226 bool aligned = (pid & 3) == 0;
4227 if (aligned) {
4228 (void)copyout_atomic32(p->p_pid, pid);
4229 } else {
4230 (void)suword(pid, p->p_pid);
4231 }
4232 }
4233 retval[0] = error;
4234
4235 /*
4236 * If we had an error, perform an internal reap ; this is
4237 * entirely safe, as we have a real process backing us.
4238 */
4239 if (error) {
4240 proc_list_lock();
4241 p->p_listflag |= P_LIST_DEADPARENT;
4242 proc_list_unlock();
4243 proc_lock(p);
4244 /* make sure no one else has killed it off... */
4245 if (p->p_stat != SZOMB && p->exit_thread == NULL) {
4246 p->exit_thread = current_thread();
4247 proc_unlock(p);
4248 exit1(p, 1, (int *)NULL);
4249 } else {
4250 /* someone is doing it for us; just skip it */
4251 proc_unlock(p);
4252 }
4253 }
4254 }
4255
4256 /*
4257 * Do not terminate the current task, if proc_exec_switch_task did not
4258 * switch the tasks, terminating the current task without the switch would
4259 * result in loosing the SIGKILL status.
4260 */
4261 if (task_did_exec(old_task)) {
4262 /* Terminate the current task, since exec will start in new task */
4263 task_terminate_internal(old_task);
4264 }
4265
4266 /* Release the thread ref returned by fork_create_child/fork1 */
4267 if (imgp != NULL && imgp->ip_new_thread) {
4268 /* wake up the new thread */
4269 task_clear_return_wait(get_threadtask(imgp->ip_new_thread), TCRW_CLEAR_FINAL_WAIT);
4270 thread_deallocate(imgp->ip_new_thread);
4271 imgp->ip_new_thread = NULL;
4272 }
4273
4274 /* Release the ref returned by fork_create_child/fork1 */
4275 if (new_task) {
4276 task_deallocate(new_task);
4277 new_task = NULL;
4278 }
4279
4280 if (should_release_proc_ref) {
4281 proc_rele(p);
4282 }
4283
4284 kheap_free(KHEAP_TEMP, bufp,
4285 sizeof(*imgp) + sizeof(*vap) + sizeof(*origvap));
4286
4287 if (inherit != NULL) {
4288 ipc_importance_release(inherit);
4289 }
4290
4291 return error;
4292}
4293
4294/*
4295 * proc_exec_switch_task
4296 *
4297 * Parameters: p proc
4298 * old_task task before exec
4299 * new_task task after exec
4300 * new_thread thread in new task
4301 * inherit resulting importance linkage
4302 *
4303 * Returns: proc.
4304 *
4305 * Note: The function will switch the task pointer of proc
4306 * from old task to new task. The switch needs to happen
4307 * after draining all proc refs and inside a proc translock.
4308 * In the case of failure to switch the task, which might happen
4309 * if the process received a SIGKILL or jetsam killed it, it will make
4310 * sure that the new tasks terminates. User proc ref returned
4311 * to caller.
4312 *
4313 * This function is called after point of no return, in the case
4314 * failure to switch, it will terminate the new task and swallow the
4315 * error and let the terminated process complete exec and die.
4316 */
4317proc_t
4318proc_exec_switch_task(proc_t p, task_t old_task, task_t new_task, thread_t new_thread,
4319 void **inherit)
4320{
4321 int error = 0;
4322 boolean_t task_active;
4323 boolean_t proc_active;
4324 boolean_t thread_active;
4325 thread_t old_thread = current_thread();
4326
4327 /*
4328 * Switch the task pointer of proc to new task.
4329 * Before switching the task, wait for proc_refdrain.
4330 * After the switch happens, the proc can disappear,
4331 * take a ref before it disappears. Waiting for
4332 * proc_refdrain in exec will block all other threads
4333 * trying to take a proc ref, boost the current thread
4334 * to avoid priority inversion.
4335 */
4336 thread_set_exec_promotion(old_thread);
4337 p = proc_refdrain_with_refwait(p, TRUE);
4338 /* extra proc ref returned to the caller */
4339
4340 assert(get_threadtask(new_thread) == new_task);
4341 task_active = task_is_active(new_task);
4342
4343 /* Take the proc_translock to change the task ptr */
4344 proc_lock(p);
4345 proc_active = !(p->p_lflag & P_LEXIT);
4346
4347 /* Check if the current thread is not aborted due to SIGKILL */
4348 thread_active = thread_is_active(old_thread);
4349
4350 /*
4351 * Do not switch the task if the new task or proc is already terminated
4352 * as a result of error in exec past point of no return
4353 */
4354 if (proc_active && task_active && thread_active) {
4355 error = proc_transstart(p, 1, 0);
4356 if (error == 0) {
4357 uthread_t new_uthread = get_bsdthread_info(new_thread);
4358 uthread_t old_uthread = get_bsdthread_info(current_thread());
4359
4360 /*
4361 * bsd_info of old_task will get cleared in execve and posix_spawn
4362 * after firing exec-success/error dtrace probe.
4363 */
4364 p->task = new_task;
4365
4366 /* Clear dispatchqueue and workloop ast offset */
4367 p->p_dispatchqueue_offset = 0;
4368 p->p_dispatchqueue_serialno_offset = 0;
4369 p->p_dispatchqueue_label_offset = 0;
4370 p->p_return_to_kernel_offset = 0;
4371
4372 /* Copy the signal state, dtrace state and set bsd ast on new thread */
4373 act_set_astbsd(new_thread);
4374 new_uthread->uu_siglist = old_uthread->uu_siglist;
4375 new_uthread->uu_sigwait = old_uthread->uu_sigwait;
4376 new_uthread->uu_sigmask = old_uthread->uu_sigmask;
4377 new_uthread->uu_oldmask = old_uthread->uu_oldmask;
4378 new_uthread->uu_vforkmask = old_uthread->uu_vforkmask;
4379 new_uthread->uu_exit_reason = old_uthread->uu_exit_reason;
4380#if CONFIG_DTRACE
4381 new_uthread->t_dtrace_sig = old_uthread->t_dtrace_sig;
4382 new_uthread->t_dtrace_stop = old_uthread->t_dtrace_stop;
4383 new_uthread->t_dtrace_resumepid = old_uthread->t_dtrace_resumepid;
4384 assert(new_uthread->t_dtrace_scratch == NULL);
4385 new_uthread->t_dtrace_scratch = old_uthread->t_dtrace_scratch;
4386
4387 old_uthread->t_dtrace_sig = 0;
4388 old_uthread->t_dtrace_stop = 0;
4389 old_uthread->t_dtrace_resumepid = 0;
4390 old_uthread->t_dtrace_scratch = NULL;
4391#endif
4392 /* Copy the resource accounting info */
4393 thread_copy_resource_info(new_thread, current_thread());
4394
4395 /* Clear the exit reason and signal state on old thread */
4396 old_uthread->uu_exit_reason = NULL;
4397 old_uthread->uu_siglist = 0;
4398
4399 /* Add the new uthread to proc uthlist and remove the old one */
4400 TAILQ_INSERT_TAIL(&p->p_uthlist, new_uthread, uu_list);
4401 TAILQ_REMOVE(&p->p_uthlist, old_uthread, uu_list);
4402
4403 task_set_did_exec_flag(old_task);
4404 task_clear_exec_copy_flag(new_task);
4405
4406 task_copy_fields_for_exec(new_task, old_task);
4407
4408 /* Transfer sandbox filter bits to new_task. */
4409 task_transfer_mach_filter_bits(new_task, old_task);
4410
4411 /*
4412 * Need to transfer pending watch port boosts to the new task
4413 * while still making sure that the old task remains in the
4414 * importance linkage. Create an importance linkage from old task
4415 * to new task, then switch the task importance base of old task
4416 * and new task. After the switch the port watch boost will be
4417 * boosting the new task and new task will be donating importance
4418 * to old task.
4419 */
4420 *inherit = ipc_importance_exec_switch_task(old_task, new_task);
4421
4422 proc_transend(p, 1);
4423 }
4424 }
4425
4426 proc_unlock(p);
4427 proc_refwake(p);
4428 thread_clear_exec_promotion(old_thread);
4429
4430 if (error != 0 || !task_active || !proc_active || !thread_active) {
4431 task_terminate_internal(new_task);
4432 }
4433
4434 return p;
4435}
4436
4437/*
4438 * execve
4439 *
4440 * Parameters: uap->fname File name to exec
4441 * uap->argp Argument list
4442 * uap->envp Environment list
4443 *
4444 * Returns: 0 Success
4445 * __mac_execve:EINVAL Invalid argument
4446 * __mac_execve:ENOTSUP Invalid argument
4447 * __mac_execve:EACCES Permission denied
4448 * __mac_execve:EINTR Interrupted function
4449 * __mac_execve:ENOMEM Not enough space
4450 * __mac_execve:EFAULT Bad address
4451 * __mac_execve:ENAMETOOLONG Filename too long
4452 * __mac_execve:ENOEXEC Executable file format error
4453 * __mac_execve:ETXTBSY Text file busy [misuse of error code]
4454 * __mac_execve:???
4455 *
4456 * TODO: Dynamic linker header address on stack is copied via suword()
4457 */
4458/* ARGSUSED */
4459int
4460execve(proc_t p, struct execve_args *uap, int32_t *retval)
4461{
4462 struct __mac_execve_args muap;
4463 int err;
4464
4465 memoryshot(VM_EXECVE, DBG_FUNC_NONE);
4466
4467 muap.fname = uap->fname;
4468 muap.argp = uap->argp;
4469 muap.envp = uap->envp;
4470 muap.mac_p = USER_ADDR_NULL;
4471 err = __mac_execve(p, &muap, retval);
4472
4473 return err;
4474}
4475
4476/*
4477 * __mac_execve
4478 *
4479 * Parameters: uap->fname File name to exec
4480 * uap->argp Argument list
4481 * uap->envp Environment list
4482 * uap->mac_p MAC label supplied by caller
4483 *
4484 * Returns: 0 Success
4485 * EINVAL Invalid argument
4486 * ENOTSUP Not supported
4487 * ENOEXEC Executable file format error
4488 * exec_activate_image:EINVAL Invalid argument
4489 * exec_activate_image:EACCES Permission denied
4490 * exec_activate_image:EINTR Interrupted function
4491 * exec_activate_image:ENOMEM Not enough space
4492 * exec_activate_image:EFAULT Bad address
4493 * exec_activate_image:ENAMETOOLONG Filename too long
4494 * exec_activate_image:ENOEXEC Executable file format error
4495 * exec_activate_image:ETXTBSY Text file busy [misuse of error code]
4496 * exec_activate_image:EBADEXEC The executable is corrupt/unknown
4497 * exec_activate_image:???
4498 * mac_execve_enter:???
4499 *
4500 * TODO: Dynamic linker header address on stack is copied via suword()
4501 */
4502int
4503__mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval)
4504{
4505 char *bufp = NULL;
4506 struct image_params *imgp;
4507 struct vnode_attr *vap;
4508 struct vnode_attr *origvap;
4509 int error;
4510 int is_64 = IS_64BIT_PROCESS(p);
4511 struct vfs_context context;
4512 struct uthread *uthread;
4513 task_t old_task = current_task();
4514 task_t new_task = NULL;
4515 boolean_t should_release_proc_ref = FALSE;
4516 boolean_t exec_done = FALSE;
4517 boolean_t in_vfexec = FALSE;
4518 void *inherit = NULL;
4519
4520 context.vc_thread = current_thread();
4521 context.vc_ucred = kauth_cred_proc_ref(p); /* XXX must NOT be kauth_cred_get() */
4522
4523 /* Allocate a big chunk for locals instead of using stack since these
4524 * structures a pretty big.
4525 */
4526 bufp = kheap_alloc(KHEAP_TEMP,
4527 sizeof(*imgp) + sizeof(*vap) + sizeof(*origvap), Z_WAITOK | Z_ZERO);
4528 imgp = (struct image_params *) bufp;
4529 if (bufp == NULL) {
4530 error = ENOMEM;
4531 goto exit_with_error;
4532 }
4533 vap = (struct vnode_attr *) (bufp + sizeof(*imgp));
4534 origvap = (struct vnode_attr *) (bufp + sizeof(*imgp) + sizeof(*vap));
4535
4536 /* Initialize the common data in the image_params structure */
4537 imgp->ip_user_fname = uap->fname;
4538 imgp->ip_user_argv = uap->argp;
4539 imgp->ip_user_envv = uap->envp;
4540 imgp->ip_vattr = vap;
4541 imgp->ip_origvattr = origvap;
4542 imgp->ip_vfs_context = &context;
4543 imgp->ip_flags = (is_64 ? IMGPF_WAS_64BIT_ADDR : IMGPF_NONE) | ((p->p_flag & P_DISABLE_ASLR) ? IMGPF_DISABLE_ASLR : IMGPF_NONE);
4544 imgp->ip_seg = (is_64 ? UIO_USERSPACE64 : UIO_USERSPACE32);
4545 imgp->ip_mac_return = 0;
4546 imgp->ip_cs_error = OS_REASON_NULL;
4547 imgp->ip_simulator_binary = IMGPF_SB_DEFAULT;
4548 imgp->ip_subsystem_root_path = NULL;
4549
4550#if CONFIG_MACF
4551 if (uap->mac_p != USER_ADDR_NULL) {
4552 error = mac_execve_enter(uap->mac_p, imgp);
4553 if (error) {
4554 kauth_cred_unref(&context.vc_ucred);
4555 goto exit_with_error;
4556 }
4557 }
4558#endif
4559 uthread = get_bsdthread_info(current_thread());
4560 if (uthread->uu_flag & UT_VFORK) {
4561 imgp->ip_flags |= IMGPF_VFORK_EXEC;
4562 in_vfexec = TRUE;
4563 } else {
4564 imgp->ip_flags |= IMGPF_EXEC;
4565
4566 /*
4567 * For execve case, create a new task and thread
4568 * which points to current_proc. The current_proc will point
4569 * to the new task after image activation and proc ref drain.
4570 *
4571 * proc (current_proc) <----- old_task (current_task)
4572 * ^ | ^
4573 * | | |
4574 * | ----------------------------------
4575 * |
4576 * --------- new_task (task marked as TF_EXEC_COPY)
4577 *
4578 * After image activation, the proc will point to the new task
4579 * and would look like following.
4580 *
4581 * proc (current_proc) <----- old_task (current_task, marked as TPF_DID_EXEC)
4582 * ^ |
4583 * | |
4584 * | ----------> new_task
4585 * | |
4586 * -----------------
4587 *
4588 * During exec any transition from new_task -> proc is fine, but don't allow
4589 * transition from proc->task, since it will modify old_task.
4590 */
4591 imgp->ip_new_thread = fork_create_child(old_task,
4592 NULL,
4593 p,
4594 FALSE,
4595 p->p_flag & P_LP64,
4596 task_get_64bit_data(old_task),
4597 TRUE);
4598 /* task and thread ref returned by fork_create_child */
4599 if (imgp->ip_new_thread == NULL) {
4600 error = ENOMEM;
4601 goto exit_with_error;
4602 }
4603
4604 new_task = get_threadtask(imgp->ip_new_thread);
4605 context.vc_thread = imgp->ip_new_thread;
4606 }
4607
4608 imgp->ip_subsystem_root_path = p->p_subsystem_root_path;
4609
4610 error = exec_activate_image(imgp);
4611 /* thread and task ref returned for vfexec case */
4612
4613 if (imgp->ip_new_thread != NULL) {
4614 /*
4615 * task reference might be returned by exec_activate_image
4616 * for vfexec.
4617 */
4618 new_task = get_threadtask(imgp->ip_new_thread);
4619#if defined(HAS_APPLE_PAC)
4620 ml_task_set_disable_user_jop(new_task, imgp->ip_flags & IMGPF_NOJOP ? TRUE : FALSE);
4621 ml_thread_set_disable_user_jop(imgp->ip_new_thread, imgp->ip_flags & IMGPF_NOJOP ? TRUE : FALSE);
4622#endif
4623 }
4624
4625 if (!error && !in_vfexec) {
4626 p = proc_exec_switch_task(p, old_task, new_task, imgp->ip_new_thread, &inherit);
4627 /* proc ref returned */
4628 should_release_proc_ref = TRUE;
4629 }
4630
4631 kauth_cred_unref(&context.vc_ucred);
4632
4633 /* Image not claimed by any activator? */
4634 if (error == -1) {
4635 error = ENOEXEC;
4636 }
4637
4638 if (!error) {
4639 exec_done = TRUE;
4640 assert(imgp->ip_new_thread != NULL);
4641
4642 exec_resettextvp(p, imgp);
4643 error = check_for_signature(p, imgp);
4644 }
4645
4646#if defined(HAS_APPLE_PAC)
4647 if (imgp->ip_new_thread && !error) {
4648 ml_task_set_jop_pid_from_shared_region(new_task);
4649 ml_thread_set_jop_pid(imgp->ip_new_thread, new_task);
4650 }
4651#endif /* defined(HAS_APPLE_PAC) */
4652
4653 /* flag exec has occurred, notify only if it has not failed due to FP Key error */
4654 if (exec_done && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0)) {
4655 proc_knote(p, NOTE_EXEC);
4656 }
4657
4658 if (imgp->ip_vp != NULLVP) {
4659 vnode_put(imgp->ip_vp);
4660 }
4661 if (imgp->ip_scriptvp != NULLVP) {
4662 vnode_put(imgp->ip_scriptvp);
4663 }
4664 if (imgp->ip_strings) {
4665 execargs_free(imgp);
4666 }
4667#if CONFIG_MACF
4668 if (imgp->ip_execlabelp) {
4669 mac_cred_label_free(imgp->ip_execlabelp);
4670 }
4671 if (imgp->ip_scriptlabelp) {
4672 mac_vnode_label_free(imgp->ip_scriptlabelp);
4673 }
4674#endif
4675 if (imgp->ip_cs_error != OS_REASON_NULL) {
4676 os_reason_free(imgp->ip_cs_error);
4677 imgp->ip_cs_error = OS_REASON_NULL;
4678 }
4679
4680 if (!error) {
4681 /*
4682 * We need to initialize the bank context behind the protection of
4683 * the proc_trans lock to prevent a race with exit. We can't do this during
4684 * exec_activate_image because task_bank_init checks entitlements that
4685 * aren't loaded until subsequent calls (including exec_resettextvp).
4686 */
4687 error = proc_transstart(p, 0, 0);
4688 }
4689
4690 if (!error) {
4691 task_bank_init(new_task);
4692 proc_transend(p, 0);
4693
4694#if __arm64__
4695 proc_footprint_entitlement_hacks(p, new_task);
4696#endif /* __arm64__ */
4697
4698 /* Sever any extant thread affinity */
4699 thread_affinity_exec(current_thread());
4700
4701 /* Inherit task role from old task to new task for exec */
4702 if (!in_vfexec) {
4703 proc_inherit_task_role(new_task, old_task);
4704 }
4705
4706 thread_t main_thread = imgp->ip_new_thread;
4707
4708 task_set_main_thread_qos(new_task, main_thread);
4709
4710#if __has_feature(ptrauth_calls)
4711 task_set_pac_exception_fatal_flag(new_task);
4712#endif /* __has_feature(ptrauth_calls) */
4713
4714#if CONFIG_ARCADE
4715 /*
4716 * Check to see if we need to trigger an arcade upcall AST now
4717 * that the vnode has been reset on the task.
4718 */
4719 arcade_prepare(new_task, imgp->ip_new_thread);
4720#endif /* CONFIG_ARCADE */
4721
4722#if CONFIG_MACF
4723 proc_apply_jit_and_jumbo_va_policies(p, new_task);
4724#endif /* CONFIG_MACF */
4725
4726 if (vm_darkwake_mode == TRUE) {
4727 /*
4728 * This process is being launched when the system
4729 * is in darkwake. So mark it specially. This will
4730 * cause all its pages to be entered in the background Q.
4731 */
4732 task_set_darkwake_mode(new_task, vm_darkwake_mode);
4733 }
4734
4735#if CONFIG_DTRACE
4736 dtrace_thread_didexec(imgp->ip_new_thread);
4737
4738 if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL) {
4739 (*dtrace_proc_waitfor_hook)(p);
4740 }
4741#endif
4742
4743#if CONFIG_AUDIT
4744 if (!error && AUDIT_ENABLED() && p) {
4745 /* Add the CDHash of the new process to the audit record */
4746 uint8_t *cdhash = cs_get_cdhash(p);
4747 if (cdhash) {
4748 AUDIT_ARG(data, cdhash, sizeof(uint8_t), CS_CDHASH_LEN);
4749 }
4750 }
4751#endif
4752
4753 if (in_vfexec) {
4754 vfork_return(p, retval, p->p_pid);
4755 }
4756 } else {
4757 DTRACE_PROC1(exec__failure, int, error);
4758 }
4759
4760exit_with_error:
4761
4762 /*
4763 * clear bsd_info from old task if it did exec.
4764 */
4765 if (task_did_exec(old_task)) {
4766 set_bsdtask_info(old_task, NULL);
4767 }
4768
4769 /* clear bsd_info from new task and terminate it if exec failed */
4770 if (new_task != NULL && task_is_exec_copy(new_task)) {
4771 set_bsdtask_info(new_task, NULL);
4772 task_terminate_internal(new_task);
4773 }
4774
4775 if (imgp != NULL) {
4776 /* Clear the initial wait on the thread transferring watchports */
4777 if (imgp->ip_new_thread) {
4778 task_clear_return_wait(get_threadtask(imgp->ip_new_thread), TCRW_CLEAR_INITIAL_WAIT);
4779 }
4780
4781 /* Transfer the watchport boost to new task */
4782 if (!error && !in_vfexec) {
4783 task_transfer_turnstile_watchports(old_task,
4784 new_task, imgp->ip_new_thread);
4785 }
4786 /*
4787 * Do not terminate the current task, if proc_exec_switch_task did not
4788 * switch the tasks, terminating the current task without the switch would
4789 * result in loosing the SIGKILL status.
4790 */
4791 if (task_did_exec(old_task)) {
4792 /* Terminate the current task, since exec will start in new task */
4793 task_terminate_internal(old_task);
4794 }
4795
4796 /* Release the thread ref returned by fork_create_child */
4797 if (imgp->ip_new_thread) {
4798 /* wake up the new exec thread */
4799 task_clear_return_wait(get_threadtask(imgp->ip_new_thread), TCRW_CLEAR_FINAL_WAIT);
4800 thread_deallocate(imgp->ip_new_thread);
4801 imgp->ip_new_thread = NULL;
4802 }
4803 }
4804
4805 /* Release the ref returned by fork_create_child */
4806 if (new_task) {
4807 task_deallocate(new_task);
4808 new_task = NULL;
4809 }
4810
4811 if (should_release_proc_ref) {
4812 proc_rele(p);
4813 }
4814
4815 kheap_free(KHEAP_TEMP, bufp,
4816 sizeof(*imgp) + sizeof(*vap) + sizeof(*origvap));
4817
4818 if (inherit != NULL) {
4819 ipc_importance_release(inherit);
4820 }
4821
4822 return error;
4823}
4824
4825
4826/*
4827 * copyinptr
4828 *
4829 * Description: Copy a pointer in from user space to a user_addr_t in kernel
4830 * space, based on 32/64 bitness of the user space
4831 *
4832 * Parameters: froma User space address
4833 * toptr Address of kernel space user_addr_t
4834 * ptr_size 4/8, based on 'froma' address space
4835 *
4836 * Returns: 0 Success
4837 * EFAULT Bad 'froma'
4838 *
4839 * Implicit returns:
4840 * *ptr_size Modified
4841 */
4842static int
4843copyinptr(user_addr_t froma, user_addr_t *toptr, int ptr_size)
4844{
4845 int error;
4846
4847 if (ptr_size == 4) {
4848 /* 64 bit value containing 32 bit address */
4849 unsigned int i = 0;
4850
4851 error = copyin(froma, &i, 4);
4852 *toptr = CAST_USER_ADDR_T(i); /* SAFE */
4853 } else {
4854 error = copyin(froma, toptr, 8);
4855 }
4856 return error;
4857}
4858
4859
4860/*
4861 * copyoutptr
4862 *
4863 * Description: Copy a pointer out from a user_addr_t in kernel space to
4864 * user space, based on 32/64 bitness of the user space
4865 *
4866 * Parameters: ua User space address to copy to
4867 * ptr Address of kernel space user_addr_t
4868 * ptr_size 4/8, based on 'ua' address space
4869 *
4870 * Returns: 0 Success
4871 * EFAULT Bad 'ua'
4872 *
4873 */
4874static int
4875copyoutptr(user_addr_t ua, user_addr_t ptr, int ptr_size)
4876{
4877 int error;
4878
4879 if (ptr_size == 4) {
4880 /* 64 bit value containing 32 bit address */
4881 unsigned int i = CAST_DOWN_EXPLICIT(unsigned int, ua); /* SAFE */
4882
4883 error = copyout(&i, ptr, 4);
4884 } else {
4885 error = copyout(&ua, ptr, 8);
4886 }
4887 return error;
4888}
4889
4890
4891/*
4892 * exec_copyout_strings
4893 *
4894 * Copy out the strings segment to user space. The strings segment is put
4895 * on a preinitialized stack frame.
4896 *
4897 * Parameters: struct image_params * the image parameter block
4898 * int * a pointer to the stack offset variable
4899 *
4900 * Returns: 0 Success
4901 * !0 Faiure: errno
4902 *
4903 * Implicit returns:
4904 * (*stackp) The stack offset, modified
4905 *
4906 * Note: The strings segment layout is backward, from the beginning
4907 * of the top of the stack to consume the minimal amount of
4908 * space possible; the returned stack pointer points to the
4909 * end of the area consumed (stacks grow downward).
4910 *
4911 * argc is an int; arg[i] are pointers; env[i] are pointers;
4912 * the 0's are (void *)NULL's
4913 *
4914 * The stack frame layout is:
4915 *
4916 * +-------------+ <- p->user_stack
4917 * | 16b |
4918 * +-------------+
4919 * | STRING AREA |
4920 * | : |
4921 * | : |
4922 * | : |
4923 * +- -- -- -- --+
4924 * | PATH AREA |
4925 * +-------------+
4926 * | 0 |
4927 * +-------------+
4928 * | applev[n] |
4929 * +-------------+
4930 * :
4931 * :
4932 * +-------------+
4933 * | applev[1] |
4934 * +-------------+
4935 * | exec_path / |
4936 * | applev[0] |
4937 * +-------------+
4938 * | 0 |
4939 * +-------------+
4940 * | env[n] |
4941 * +-------------+
4942 * :
4943 * :
4944 * +-------------+
4945 * | env[0] |
4946 * +-------------+
4947 * | 0 |
4948 * +-------------+
4949 * | arg[argc-1] |
4950 * +-------------+
4951 * :
4952 * :
4953 * +-------------+
4954 * | arg[0] |
4955 * +-------------+
4956 * | argc |
4957 * sp-> +-------------+
4958 *
4959 * Although technically a part of the STRING AREA, we treat the PATH AREA as
4960 * a separate entity. This allows us to align the beginning of the PATH AREA
4961 * to a pointer boundary so that the exec_path, env[i], and argv[i] pointers
4962 * which preceed it on the stack are properly aligned.
4963 */
4964__attribute__((noinline))
4965static int
4966exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp)
4967{
4968 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
4969 int ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4;
4970 int ptr_area_size;
4971 void *ptr_buffer_start, *ptr_buffer;
4972 size_t string_size;
4973
4974 user_addr_t string_area; /* *argv[], *env[] */
4975 user_addr_t ptr_area; /* argv[], env[], applev[] */
4976 user_addr_t argc_area; /* argc */
4977 user_addr_t stack;
4978 int error;
4979
4980 unsigned i;
4981 struct copyout_desc {
4982 char *start_string;
4983 int count;
4984#if CONFIG_DTRACE
4985 user_addr_t *dtrace_cookie;
4986#endif
4987 boolean_t null_term;
4988 } descriptors[] = {
4989 {
4990 .start_string = imgp->ip_startargv,
4991 .count = imgp->ip_argc,
4992#if CONFIG_DTRACE
4993 .dtrace_cookie = &p->p_dtrace_argv,
4994#endif
4995 .null_term = TRUE
4996 },
4997 {
4998 .start_string = imgp->ip_endargv,
4999 .count = imgp->ip_envc,
5000#if CONFIG_DTRACE
5001 .dtrace_cookie = &p->p_dtrace_envp,
5002#endif
5003 .null_term = TRUE
5004 },
5005 {
5006 .start_string = imgp->ip_strings,
5007 .count = 1,
5008#if CONFIG_DTRACE
5009 .dtrace_cookie = NULL,
5010#endif
5011 .null_term = FALSE
5012 },
5013 {
5014 .start_string = imgp->ip_endenvv,
5015 .count = imgp->ip_applec - 1, /* exec_path handled above */
5016#if CONFIG_DTRACE
5017 .dtrace_cookie = NULL,
5018#endif
5019 .null_term = TRUE
5020 }
5021 };
5022
5023 stack = *stackp;
5024
5025 /*
5026 * All previous contributors to the string area
5027 * should have aligned their sub-area
5028 */
5029 if (imgp->ip_strspace % ptr_size != 0) {
5030 error = EINVAL;
5031 goto bad;
5032 }
5033
5034 /* Grow the stack down for the strings we've been building up */
5035 string_size = imgp->ip_strendp - imgp->ip_strings;
5036 stack -= string_size;
5037 string_area = stack;
5038
5039 /*
5040 * Need room for one pointer for each string, plus
5041 * one for the NULLs terminating the argv, envv, and apple areas.
5042 */
5043 ptr_area_size = (imgp->ip_argc + imgp->ip_envc + imgp->ip_applec + 3) * ptr_size;
5044 stack -= ptr_area_size;
5045 ptr_area = stack;
5046
5047 /* We'll construct all the pointer arrays in our string buffer,
5048 * which we already know is aligned properly, and ip_argspace
5049 * was used to verify we have enough space.
5050 */
5051 ptr_buffer_start = ptr_buffer = (void *)imgp->ip_strendp;
5052
5053 /*
5054 * Need room for pointer-aligned argc slot.
5055 */
5056 stack -= ptr_size;
5057 argc_area = stack;
5058
5059 /*
5060 * Record the size of the arguments area so that sysctl_procargs()
5061 * can return the argument area without having to parse the arguments.
5062 */
5063 proc_lock(p);
5064 p->p_argc = imgp->ip_argc;
5065 p->p_argslen = (int)(*stackp - string_area);
5066 proc_unlock(p);
5067
5068 /* Return the initial stack address: the location of argc */
5069 *stackp = stack;
5070
5071 /*
5072 * Copy out the entire strings area.
5073 */
5074 error = copyout(imgp->ip_strings, string_area,
5075 string_size);
5076 if (error) {
5077 goto bad;
5078 }
5079
5080 for (i = 0; i < sizeof(descriptors) / sizeof(descriptors[0]); i++) {
5081 char *cur_string = descriptors[i].start_string;
5082 int j;
5083
5084#if CONFIG_DTRACE
5085 if (descriptors[i].dtrace_cookie) {
5086 proc_lock(p);
5087 *descriptors[i].dtrace_cookie = ptr_area + ((uintptr_t)ptr_buffer - (uintptr_t)ptr_buffer_start); /* dtrace convenience */
5088 proc_unlock(p);
5089 }
5090#endif /* CONFIG_DTRACE */
5091
5092 /*
5093 * For each segment (argv, envv, applev), copy as many pointers as requested
5094 * to our pointer buffer.
5095 */
5096 for (j = 0; j < descriptors[i].count; j++) {
5097 user_addr_t cur_address = string_area + (cur_string - imgp->ip_strings);
5098
5099 /* Copy out the pointer to the current string. Alignment has been verified */
5100 if (ptr_size == 8) {
5101 *(uint64_t *)ptr_buffer = (uint64_t)cur_address;
5102 } else {
5103 *(uint32_t *)ptr_buffer = (uint32_t)cur_address;
5104 }
5105
5106 ptr_buffer = (void *)((uintptr_t)ptr_buffer + ptr_size);
5107 cur_string += strlen(cur_string) + 1; /* Only a NUL between strings in the same area */
5108 }
5109
5110 if (descriptors[i].null_term) {
5111 if (ptr_size == 8) {
5112 *(uint64_t *)ptr_buffer = 0ULL;
5113 } else {
5114 *(uint32_t *)ptr_buffer = 0;
5115 }
5116
5117 ptr_buffer = (void *)((uintptr_t)ptr_buffer + ptr_size);
5118 }
5119 }
5120
5121 /*
5122 * Copy out all our pointer arrays in bulk.
5123 */
5124 error = copyout(ptr_buffer_start, ptr_area,
5125 ptr_area_size);
5126 if (error) {
5127 goto bad;
5128 }
5129
5130 /* argc (int32, stored in a ptr_size area) */
5131 error = copyoutptr((user_addr_t)imgp->ip_argc, argc_area, ptr_size);
5132 if (error) {
5133 goto bad;
5134 }
5135
5136bad:
5137 return error;
5138}
5139
5140
5141/*
5142 * exec_extract_strings
5143 *
5144 * Copy arguments and environment from user space into work area; we may
5145 * have already copied some early arguments into the work area, and if
5146 * so, any arguments opied in are appended to those already there.
5147 * This function is the primary manipulator of ip_argspace, since
5148 * these are the arguments the client of execve(2) knows about. After
5149 * each argv[]/envv[] string is copied, we charge the string length
5150 * and argv[]/envv[] pointer slot to ip_argspace, so that we can
5151 * full preflight the arg list size.
5152 *
5153 * Parameters: struct image_params * the image parameter block
5154 *
5155 * Returns: 0 Success
5156 * !0 Failure: errno
5157 *
5158 * Implicit returns;
5159 * (imgp->ip_argc) Count of arguments, updated
5160 * (imgp->ip_envc) Count of environment strings, updated
5161 * (imgp->ip_argspace) Count of remaining of NCARGS
5162 * (imgp->ip_interp_buffer) Interpreter and args (mutated in place)
5163 *
5164 *
5165 * Note: The argument and environment vectors are user space pointers
5166 * to arrays of user space pointers.
5167 */
5168__attribute__((noinline))
5169static int
5170exec_extract_strings(struct image_params *imgp)
5171{
5172 int error = 0;
5173 int ptr_size = (imgp->ip_flags & IMGPF_WAS_64BIT_ADDR) ? 8 : 4;
5174 int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4;
5175 user_addr_t argv = imgp->ip_user_argv;
5176 user_addr_t envv = imgp->ip_user_envv;
5177
5178 /*
5179 * Adjust space reserved for the path name by however much padding it
5180 * needs. Doing this here since we didn't know if this would be a 32-
5181 * or 64-bit process back in exec_save_path.
5182 */
5183 while (imgp->ip_strspace % new_ptr_size != 0) {
5184 *imgp->ip_strendp++ = '\0';
5185 imgp->ip_strspace--;
5186 /* imgp->ip_argspace--; not counted towards exec args total */
5187 }
5188
5189 /*
5190 * From now on, we start attributing string space to ip_argspace
5191 */
5192 imgp->ip_startargv = imgp->ip_strendp;
5193 imgp->ip_argc = 0;
5194
5195 if ((imgp->ip_flags & IMGPF_INTERPRET) != 0) {
5196 user_addr_t arg;
5197 char *argstart, *ch;
5198
5199 /* First, the arguments in the "#!" string are tokenized and extracted. */
5200 argstart = imgp->ip_interp_buffer;
5201 while (argstart) {
5202 ch = argstart;
5203 while (*ch && !IS_WHITESPACE(*ch)) {
5204 ch++;
5205 }
5206
5207 if (*ch == '\0') {
5208 /* last argument, no need to NUL-terminate */
5209 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(argstart), UIO_SYSSPACE, TRUE);
5210 argstart = NULL;
5211 } else {
5212 /* NUL-terminate */
5213 *ch = '\0';
5214 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(argstart), UIO_SYSSPACE, TRUE);
5215
5216 /*
5217 * Find the next string. We know spaces at the end of the string have already
5218 * been stripped.
5219 */
5220 argstart = ch + 1;
5221 while (IS_WHITESPACE(*argstart)) {
5222 argstart++;
5223 }
5224 }
5225
5226 /* Error-check, regardless of whether this is the last interpreter arg or not */
5227 if (error) {
5228 goto bad;
5229 }
5230 if (imgp->ip_argspace < new_ptr_size) {
5231 error = E2BIG;
5232 goto bad;
5233 }
5234 imgp->ip_argspace -= new_ptr_size; /* to hold argv[] entry */
5235 imgp->ip_argc++;
5236 }
5237
5238 if (argv != 0LL) {
5239 /*
5240 * If we are running an interpreter, replace the av[0] that was
5241 * passed to execve() with the path name that was
5242 * passed to execve() for interpreters which do not use the PATH
5243 * to locate their script arguments.
5244 */
5245 error = copyinptr(argv, &arg, ptr_size);
5246 if (error) {
5247 goto bad;
5248 }
5249 if (arg != 0LL) {
5250 argv += ptr_size; /* consume without using */
5251 }
5252 }
5253
5254 if (imgp->ip_interp_sugid_fd != -1) {
5255 char temp[19]; /* "/dev/fd/" + 10 digits + NUL */
5256 snprintf(temp, sizeof(temp), "/dev/fd/%d", imgp->ip_interp_sugid_fd);
5257 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(temp), UIO_SYSSPACE, TRUE);
5258 } else {
5259 error = exec_add_user_string(imgp, imgp->ip_user_fname, imgp->ip_seg, TRUE);
5260 }
5261
5262 if (error) {
5263 goto bad;
5264 }
5265 if (imgp->ip_argspace < new_ptr_size) {
5266 error = E2BIG;
5267 goto bad;
5268 }
5269 imgp->ip_argspace -= new_ptr_size; /* to hold argv[] entry */
5270 imgp->ip_argc++;
5271 }
5272
5273 while (argv != 0LL) {
5274 user_addr_t arg;
5275
5276 error = copyinptr(argv, &arg, ptr_size);
5277 if (error) {
5278 goto bad;
5279 }
5280
5281 if (arg == 0LL) {
5282 break;
5283 }
5284
5285 argv += ptr_size;
5286
5287 /*
5288 * av[n...] = arg[n]
5289 */
5290 error = exec_add_user_string(imgp, arg, imgp->ip_seg, TRUE);
5291 if (error) {
5292 goto bad;
5293 }
5294 if (imgp->ip_argspace < new_ptr_size) {
5295 error = E2BIG;
5296 goto bad;
5297 }
5298 imgp->ip_argspace -= new_ptr_size; /* to hold argv[] entry */
5299 imgp->ip_argc++;
5300 }
5301
5302 /* Save space for argv[] NULL terminator */
5303 if (imgp->ip_argspace < new_ptr_size) {
5304 error = E2BIG;
5305 goto bad;
5306 }
5307 imgp->ip_argspace -= new_ptr_size;
5308
5309 /* Note where the args ends and env begins. */
5310 imgp->ip_endargv = imgp->ip_strendp;
5311 imgp->ip_envc = 0;
5312
5313 /* Now, get the environment */
5314 while (envv != 0LL) {
5315 user_addr_t env;
5316
5317 error = copyinptr(envv, &env, ptr_size);
5318 if (error) {
5319 goto bad;
5320 }
5321
5322 envv += ptr_size;
5323 if (env == 0LL) {
5324 break;
5325 }
5326 /*
5327 * av[n...] = env[n]
5328 */
5329 error = exec_add_user_string(imgp, env, imgp->ip_seg, TRUE);
5330 if (error) {
5331 goto bad;
5332 }
5333 if (imgp->ip_argspace < new_ptr_size) {
5334 error = E2BIG;
5335 goto bad;
5336 }
5337 imgp->ip_argspace -= new_ptr_size; /* to hold envv[] entry */
5338 imgp->ip_envc++;
5339 }
5340
5341 /* Save space for envv[] NULL terminator */
5342 if (imgp->ip_argspace < new_ptr_size) {
5343 error = E2BIG;
5344 goto bad;
5345 }
5346 imgp->ip_argspace -= new_ptr_size;
5347
5348 /* Align the tail of the combined argv+envv area */
5349 while (imgp->ip_strspace % new_ptr_size != 0) {
5350 if (imgp->ip_argspace < 1) {
5351 error = E2BIG;
5352 goto bad;
5353 }
5354 *imgp->ip_strendp++ = '\0';
5355 imgp->ip_strspace--;
5356 imgp->ip_argspace--;
5357 }
5358
5359 /* Note where the envv ends and applev begins. */
5360 imgp->ip_endenvv = imgp->ip_strendp;
5361
5362 /*
5363 * From now on, we are no longer charging argument
5364 * space to ip_argspace.
5365 */
5366
5367bad:
5368 return error;
5369}
5370
5371/*
5372 * Libc has an 8-element array set up for stack guard values. It only fills
5373 * in one of those entries, and both gcc and llvm seem to use only a single
5374 * 8-byte guard. Until somebody needs more than an 8-byte guard value, don't
5375 * do the work to construct them.
5376 */
5377#define GUARD_VALUES 1
5378#define GUARD_KEY "stack_guard="
5379
5380/*
5381 * System malloc needs some entropy when it is initialized.
5382 */
5383#define ENTROPY_VALUES 2
5384#define ENTROPY_KEY "malloc_entropy="
5385
5386/*
5387 * libplatform needs a random pointer-obfuscation value when it is initialized.
5388 */
5389#define PTR_MUNGE_VALUES 1
5390#define PTR_MUNGE_KEY "ptr_munge="
5391
5392/*
5393 * System malloc engages nanozone for UIAPP.
5394 */
5395#define NANO_ENGAGE_KEY "MallocNanoZone=1"
5396
5397#define PFZ_KEY "pfz="
5398extern user32_addr_t commpage_text32_location;
5399extern user64_addr_t commpage_text64_location;
5400
5401extern uuid_string_t bootsessionuuid_string;
5402
5403#define MAIN_STACK_VALUES 4
5404#define MAIN_STACK_KEY "main_stack="
5405
5406#define FSID_KEY "executable_file="
5407#define DYLD_FSID_KEY "dyld_file="
5408#define CDHASH_KEY "executable_cdhash="
5409#define DYLD_FLAGS_KEY "dyld_flags="
5410#define SUBSYSTEM_ROOT_PATH_KEY "subsystem_root_path="
5411#define APP_BOOT_SESSION_KEY "executable_boothash="
5412#if __has_feature(ptrauth_calls)
5413#define PTRAUTH_DISABLED_FLAG "ptrauth_disabled=1"
5414#define DYLD_ARM64E_ABI_KEY "arm64e_abi="
5415#endif /* __has_feature(ptrauth_calls) */
5416#define MAIN_TH_PORT_KEY "th_port="
5417
5418#define FSID_MAX_STRING "0x1234567890abcdef,0x1234567890abcdef"
5419
5420#define HEX_STR_LEN 18 // 64-bit hex value "0x0123456701234567"
5421#define HEX_STR_LEN32 10 // 32-bit hex value "0x01234567"
5422
5423static int
5424exec_add_entropy_key(struct image_params *imgp,
5425 const char *key,
5426 int values,
5427 boolean_t embedNUL)
5428{
5429 const int limit = 8;
5430 uint64_t entropy[limit];
5431 char str[strlen(key) + (HEX_STR_LEN + 1) * limit + 1];
5432 if (values > limit) {
5433 values = limit;
5434 }
5435
5436 read_random(entropy, sizeof(entropy[0]) * values);
5437
5438 if (embedNUL) {
5439 entropy[0] &= ~(0xffull << 8);
5440 }
5441
5442 int len = scnprintf(str, sizeof(str), "%s0x%llx", key, entropy[0]);
5443 size_t remaining = sizeof(str) - len;
5444 for (int i = 1; i < values && remaining > 0; ++i) {
5445 size_t start = sizeof(str) - remaining;
5446 len = scnprintf(&str[start], remaining, ",0x%llx", entropy[i]);
5447 remaining -= len;
5448 }
5449
5450 return exec_add_user_string(imgp, CAST_USER_ADDR_T(str), UIO_SYSSPACE, FALSE);
5451}
5452
5453/*
5454 * Build up the contents of the apple[] string vector
5455 */
5456#if (DEVELOPMENT || DEBUG)
5457extern uint64_t dyld_flags;
5458#endif
5459
5460#if __has_feature(ptrauth_calls)
5461static inline bool
5462is_arm64e_running_as_arm64(const struct image_params *imgp)
5463{
5464 return (imgp->ip_origcpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E &&
5465 (imgp->ip_flags & IMGPF_NOJOP);
5466}
5467#endif /* __has_feature(ptrauth_calls) */
5468
5469static int
5470exec_add_apple_strings(struct image_params *imgp,
5471 const load_result_t *load_result)
5472{
5473 int error;
5474 int img_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4;
5475 thread_t new_thread;
5476 ipc_port_t sright;
5477
5478 /* exec_save_path stored the first string */
5479 imgp->ip_applec = 1;
5480
5481 /* adding the pfz string */
5482 {
5483 char pfz_string[strlen(PFZ_KEY) + HEX_STR_LEN + 1];
5484
5485 if (img_ptr_size == 8) {
5486 __assert_only size_t ret = snprintf(pfz_string, sizeof(pfz_string), PFZ_KEY "0x%llx", commpage_text64_location);
5487 assert(ret < sizeof(pfz_string));
5488 } else {
5489 snprintf(pfz_string, sizeof(pfz_string), PFZ_KEY "0x%x", commpage_text32_location);
5490 }
5491 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(pfz_string), UIO_SYSSPACE, FALSE);
5492 if (error) {
5493 printf("Failed to add the pfz string with error %d\n", error);
5494 goto bad;
5495 }
5496 imgp->ip_applec++;
5497 }
5498
5499 /* adding the NANO_ENGAGE_KEY key */
5500 if (imgp->ip_px_sa) {
5501 int proc_flags = (((struct _posix_spawnattr *) imgp->ip_px_sa)->psa_flags);
5502
5503 if ((proc_flags & _POSIX_SPAWN_NANO_ALLOCATOR) == _POSIX_SPAWN_NANO_ALLOCATOR) {
5504 const char *nano_string = NANO_ENGAGE_KEY;
5505 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(nano_string), UIO_SYSSPACE, FALSE);
5506 if (error) {
5507 goto bad;
5508 }
5509 imgp->ip_applec++;
5510 }
5511 }
5512
5513 /*
5514 * Supply libc with a collection of random values to use when
5515 * implementing -fstack-protector.
5516 *
5517 * (The first random string always contains an embedded NUL so that
5518 * __stack_chk_guard also protects against C string vulnerabilities)
5519 */
5520 error = exec_add_entropy_key(imgp, GUARD_KEY, GUARD_VALUES, TRUE);
5521 if (error) {
5522 goto bad;
5523 }
5524 imgp->ip_applec++;
5525
5526 /*
5527 * Supply libc with entropy for system malloc.
5528 */
5529 error = exec_add_entropy_key(imgp, ENTROPY_KEY, ENTROPY_VALUES, FALSE);
5530 if (error) {
5531 goto bad;
5532 }
5533 imgp->ip_applec++;
5534
5535 /*
5536 * Supply libpthread & libplatform with a random value to use for pointer
5537 * obfuscation.
5538 */
5539 error = exec_add_entropy_key(imgp, PTR_MUNGE_KEY, PTR_MUNGE_VALUES, FALSE);
5540 if (error) {
5541 goto bad;
5542 }
5543 imgp->ip_applec++;
5544
5545 /*
5546 * Add MAIN_STACK_KEY: Supplies the address and size of the main thread's
5547 * stack if it was allocated by the kernel.
5548 *
5549 * The guard page is not included in this stack size as libpthread
5550 * expects to add it back in after receiving this value.
5551 */
5552 if (load_result->unixproc) {
5553 char stack_string[strlen(MAIN_STACK_KEY) + (HEX_STR_LEN + 1) * MAIN_STACK_VALUES + 1];
5554 snprintf(stack_string, sizeof(stack_string),
5555 MAIN_STACK_KEY "0x%llx,0x%llx,0x%llx,0x%llx",
5556 (uint64_t)load_result->user_stack,
5557 (uint64_t)load_result->user_stack_size,
5558 (uint64_t)load_result->user_stack_alloc,
5559 (uint64_t)load_result->user_stack_alloc_size);
5560 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(stack_string), UIO_SYSSPACE, FALSE);
5561 if (error) {
5562 goto bad;
5563 }
5564 imgp->ip_applec++;
5565 }
5566
5567 if (imgp->ip_vattr) {
5568 uint64_t fsid = vnode_get_va_fsid(imgp->ip_vattr);
5569 uint64_t fsobjid = imgp->ip_vattr->va_fileid;
5570
5571 char fsid_string[strlen(FSID_KEY) + strlen(FSID_MAX_STRING) + 1];
5572 snprintf(fsid_string, sizeof(fsid_string),
5573 FSID_KEY "0x%llx,0x%llx", fsid, fsobjid);
5574 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(fsid_string), UIO_SYSSPACE, FALSE);
5575 if (error) {
5576 goto bad;
5577 }
5578 imgp->ip_applec++;
5579 }
5580
5581 if (imgp->ip_dyld_fsid || imgp->ip_dyld_fsobjid) {
5582 char fsid_string[strlen(DYLD_FSID_KEY) + strlen(FSID_MAX_STRING) + 1];
5583 snprintf(fsid_string, sizeof(fsid_string),
5584 DYLD_FSID_KEY "0x%llx,0x%llx", imgp->ip_dyld_fsid, imgp->ip_dyld_fsobjid);
5585 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(fsid_string), UIO_SYSSPACE, FALSE);
5586 if (error) {
5587 goto bad;
5588 }
5589 imgp->ip_applec++;
5590 }
5591
5592 uint8_t cdhash[SHA1_RESULTLEN];
5593 int cdhash_errror = ubc_cs_getcdhash(imgp->ip_vp, imgp->ip_arch_offset, cdhash);
5594 if (cdhash_errror == 0) {
5595 char hash_string[strlen(CDHASH_KEY) + 2 * SHA1_RESULTLEN + 1];
5596 strncpy(hash_string, CDHASH_KEY, sizeof(hash_string));
5597 char *p = hash_string + sizeof(CDHASH_KEY) - 1;
5598 for (int i = 0; i < SHA1_RESULTLEN; i++) {
5599 snprintf(p, 3, "%02x", (int) cdhash[i]);
5600 p += 2;
5601 }
5602 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(hash_string), UIO_SYSSPACE, FALSE);
5603 if (error) {
5604 goto bad;
5605 }
5606 imgp->ip_applec++;
5607
5608 /* hash together cd-hash and boot-session-uuid */
5609 uint8_t sha_digest[SHA256_DIGEST_LENGTH];
5610 SHA256_CTX sha_ctx;
5611 SHA256_Init(&sha_ctx);
5612 SHA256_Update(&sha_ctx, bootsessionuuid_string, sizeof(bootsessionuuid_string));
5613 SHA256_Update(&sha_ctx, cdhash, sizeof(cdhash));
5614 SHA256_Final(sha_digest, &sha_ctx);
5615 char app_boot_string[strlen(APP_BOOT_SESSION_KEY) + 2 * SHA1_RESULTLEN + 1];
5616 strncpy(app_boot_string, APP_BOOT_SESSION_KEY, sizeof(app_boot_string));
5617 char *s = app_boot_string + sizeof(APP_BOOT_SESSION_KEY) - 1;
5618 for (int i = 0; i < SHA1_RESULTLEN; i++) {
5619 snprintf(s, 3, "%02x", (int) sha_digest[i]);
5620 s += 2;
5621 }
5622 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(app_boot_string), UIO_SYSSPACE, FALSE);
5623 if (error) {
5624 goto bad;
5625 }
5626 imgp->ip_applec++;
5627 }
5628#if (DEVELOPMENT || DEBUG)
5629 if (dyld_flags) {
5630 char dyld_flags_string[strlen(DYLD_FLAGS_KEY) + HEX_STR_LEN + 1];
5631 snprintf(dyld_flags_string, sizeof(dyld_flags_string), DYLD_FLAGS_KEY "0x%llx", dyld_flags);
5632 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(dyld_flags_string), UIO_SYSSPACE, FALSE);
5633 if (error) {
5634 goto bad;
5635 }
5636 imgp->ip_applec++;
5637 }
5638#endif
5639 if (imgp->ip_subsystem_root_path) {
5640 size_t buffer_len = MAXPATHLEN + strlen(SUBSYSTEM_ROOT_PATH_KEY);
5641 char subsystem_root_path_string[buffer_len];
5642 int required_len = snprintf(subsystem_root_path_string, buffer_len, SUBSYSTEM_ROOT_PATH_KEY "%s", imgp->ip_subsystem_root_path);
5643
5644 if (((size_t)required_len >= buffer_len) || (required_len < 0)) {
5645 error = ENAMETOOLONG;
5646 goto bad;
5647 }
5648
5649 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(subsystem_root_path_string), UIO_SYSSPACE, FALSE);
5650 if (error) {
5651 goto bad;
5652 }
5653
5654 imgp->ip_applec++;
5655 }
5656#if __has_feature(ptrauth_calls)
5657 if (is_arm64e_running_as_arm64(imgp)) {
5658 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(PTRAUTH_DISABLED_FLAG), UIO_SYSSPACE, FALSE);
5659 if (error) {
5660 goto bad;
5661 }
5662
5663 imgp->ip_applec++;
5664 }
5665#endif /* __has_feature(ptrauth_calls) */
5666
5667
5668#if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
5669 {
5670 char dyld_abi_string[strlen(DYLD_ARM64E_ABI_KEY) + 8];
5671 strlcpy(dyld_abi_string, DYLD_ARM64E_ABI_KEY, sizeof(dyld_abi_string));
5672 bool allowAll = bootarg_arm64e_preview_abi;
5673 strlcat(dyld_abi_string, (allowAll ? "all" : "os"), sizeof(dyld_abi_string));
5674 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(dyld_abi_string), UIO_SYSSPACE, FALSE);
5675 if (error) {
5676 goto bad;
5677 }
5678
5679 imgp->ip_applec++;
5680 }
5681#endif
5682 /*
5683 * Add main thread mach port name
5684 * +1 uref on main thread port, this ref will be extracted by libpthread in __pthread_init
5685 * and consumed in _bsdthread_terminate. Leaking the main thread port name if not linked
5686 * against libpthread.
5687 */
5688 if ((new_thread = imgp->ip_new_thread) != THREAD_NULL) {
5689 thread_reference(new_thread);
5690 sright = convert_thread_to_port_pinned(new_thread);
5691 task_t new_task = get_threadtask(new_thread);
5692 mach_port_name_t name = ipc_port_copyout_send(sright, get_task_ipcspace(new_task));
5693 char port_name_hex_str[strlen(MAIN_TH_PORT_KEY) + HEX_STR_LEN32 + 1];
5694 snprintf(port_name_hex_str, sizeof(port_name_hex_str), MAIN_TH_PORT_KEY "0x%x", name);
5695
5696 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(port_name_hex_str), UIO_SYSSPACE, FALSE);
5697 if (error) {
5698 goto bad;
5699 }
5700 imgp->ip_applec++;
5701 }
5702
5703 /* Align the tail of the combined applev area */
5704 while (imgp->ip_strspace % img_ptr_size != 0) {
5705 *imgp->ip_strendp++ = '\0';
5706 imgp->ip_strspace--;
5707 }
5708
5709bad:
5710 return error;
5711}
5712
5713/*
5714 * exec_check_permissions
5715 *
5716 * Description: Verify that the file that is being attempted to be executed
5717 * is in fact allowed to be executed based on it POSIX file
5718 * permissions and other access control criteria
5719 *
5720 * Parameters: struct image_params * the image parameter block
5721 *
5722 * Returns: 0 Success
5723 * EACCES Permission denied
5724 * ENOEXEC Executable file format error
5725 * ETXTBSY Text file busy [misuse of error code]
5726 * vnode_getattr:???
5727 * vnode_authorize:???
5728 */
5729static int
5730exec_check_permissions(struct image_params *imgp)
5731{
5732 struct vnode *vp = imgp->ip_vp;
5733 struct vnode_attr *vap = imgp->ip_vattr;
5734 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
5735 int error;
5736 kauth_action_t action;
5737
5738 /* Only allow execution of regular files */
5739 if (!vnode_isreg(vp)) {
5740 return EACCES;
5741 }
5742
5743 /* Get the file attributes that we will be using here and elsewhere */
5744 VATTR_INIT(vap);
5745 VATTR_WANTED(vap, va_uid);
5746 VATTR_WANTED(vap, va_gid);
5747 VATTR_WANTED(vap, va_mode);
5748 VATTR_WANTED(vap, va_fsid);
5749 VATTR_WANTED(vap, va_fsid64);
5750 VATTR_WANTED(vap, va_fileid);
5751 VATTR_WANTED(vap, va_data_size);
5752 if ((error = vnode_getattr(vp, vap, imgp->ip_vfs_context)) != 0) {
5753 return error;
5754 }
5755
5756 /*
5757 * Ensure that at least one execute bit is on - otherwise root
5758 * will always succeed, and we don't want to happen unless the
5759 * file really is executable.
5760 */
5761 if (!vfs_authopaque(vnode_mount(vp)) && ((vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0)) {
5762 return EACCES;
5763 }
5764
5765 /* Disallow zero length files */
5766 if (vap->va_data_size == 0) {
5767 return ENOEXEC;
5768 }
5769
5770 imgp->ip_arch_offset = (user_size_t)0;
5771#if __LP64__
5772 imgp->ip_arch_size = vap->va_data_size;
5773#else
5774 if (vap->va_data_size > UINT32_MAX) {
5775 return ENOEXEC;
5776 }
5777 imgp->ip_arch_size = (user_size_t)vap->va_data_size;
5778#endif
5779
5780 /* Disable setuid-ness for traced programs or if MNT_NOSUID */
5781 if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_lflag & P_LTRACED)) {
5782 vap->va_mode &= ~(VSUID | VSGID);
5783 }
5784
5785 /*
5786 * Disable _POSIX_SPAWN_ALLOW_DATA_EXEC and _POSIX_SPAWN_DISABLE_ASLR
5787 * flags for setuid/setgid binaries.
5788 */
5789 if (vap->va_mode & (VSUID | VSGID)) {
5790 imgp->ip_flags &= ~(IMGPF_ALLOW_DATA_EXEC | IMGPF_DISABLE_ASLR);
5791 }
5792
5793#if CONFIG_MACF
5794 error = mac_vnode_check_exec(imgp->ip_vfs_context, vp, imgp);
5795 if (error) {
5796 return error;
5797 }
5798#endif
5799
5800 /* Check for execute permission */
5801 action = KAUTH_VNODE_EXECUTE;
5802 /* Traced images must also be readable */
5803 if (p->p_lflag & P_LTRACED) {
5804 action |= KAUTH_VNODE_READ_DATA;
5805 }
5806 if ((error = vnode_authorize(vp, NULL, action, imgp->ip_vfs_context)) != 0) {
5807 return error;
5808 }
5809
5810#if 0
5811 /* Don't let it run if anyone had it open for writing */
5812 vnode_lock(vp);
5813 if (vp->v_writecount) {
5814 panic("going to return ETXTBSY %x", vp);
5815 vnode_unlock(vp);
5816 return ETXTBSY;
5817 }
5818 vnode_unlock(vp);
5819#endif
5820
5821 /* XXX May want to indicate to underlying FS that vnode is open */
5822
5823 return error;
5824}
5825
5826
5827/*
5828 * exec_handle_sugid
5829 *
5830 * Initially clear the P_SUGID in the process flags; if an SUGID process is
5831 * exec'ing a non-SUGID image, then this is the point of no return.
5832 *
5833 * If the image being activated is SUGID, then replace the credential with a
5834 * copy, disable tracing (unless the tracing process is root), reset the
5835 * mach task port to revoke it, set the P_SUGID bit,
5836 *
5837 * If the saved user and group ID will be changing, then make sure it happens
5838 * to a new credential, rather than a shared one.
5839 *
5840 * Set the security token (this is probably obsolete, given that the token
5841 * should not technically be separate from the credential itself).
5842 *
5843 * Parameters: struct image_params * the image parameter block
5844 *
5845 * Returns: void No failure indication
5846 *
5847 * Implicit returns:
5848 * <process credential> Potentially modified/replaced
5849 * <task port> Potentially revoked
5850 * <process flags> P_SUGID bit potentially modified
5851 * <security token> Potentially modified
5852 */
5853__attribute__((noinline))
5854static int
5855exec_handle_sugid(struct image_params *imgp)
5856{
5857 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
5858 kauth_cred_t cred = vfs_context_ucred(imgp->ip_vfs_context);
5859 int i;
5860 int leave_sugid_clear = 0;
5861 int mac_reset_ipc = 0;
5862 int error = 0;
5863 task_t task = NULL;
5864#if CONFIG_MACF
5865 int mac_transition, disjoint_cred = 0;
5866 int label_update_return = 0;
5867
5868 /*
5869 * Determine whether a call to update the MAC label will result in the
5870 * credential changing.
5871 *
5872 * Note: MAC policies which do not actually end up modifying
5873 * the label subsequently are strongly encouraged to
5874 * return 0 for this check, since a non-zero answer will
5875 * slow down the exec fast path for normal binaries.
5876 */
5877 mac_transition = mac_cred_check_label_update_execve(
5878 imgp->ip_vfs_context,
5879 imgp->ip_vp,
5880 imgp->ip_arch_offset,
5881 imgp->ip_scriptvp,
5882 imgp->ip_scriptlabelp,
5883 imgp->ip_execlabelp,
5884 p,
5885 imgp->ip_px_smpx);
5886#endif
5887
5888 OSBitAndAtomic(~((uint32_t)P_SUGID), &p->p_flag);
5889
5890 /*
5891 * Order of the following is important; group checks must go last,
5892 * as we use the success of the 'ismember' check combined with the
5893 * failure of the explicit match to indicate that we will be setting
5894 * the egid of the process even though the new process did not
5895 * require VSUID/VSGID bits in order for it to set the new group as
5896 * its egid.
5897 *
5898 * Note: Technically, by this we are implying a call to
5899 * setegid() in the new process, rather than implying
5900 * it used its VSGID bit to set the effective group,
5901 * even though there is no code in that process to make
5902 * such a call.
5903 */
5904 if (((imgp->ip_origvattr->va_mode & VSUID) != 0 &&
5905 kauth_cred_getuid(cred) != imgp->ip_origvattr->va_uid) ||
5906 ((imgp->ip_origvattr->va_mode & VSGID) != 0 &&
5907 ((kauth_cred_ismember_gid(cred, imgp->ip_origvattr->va_gid, &leave_sugid_clear) || !leave_sugid_clear) ||
5908 (kauth_cred_getgid(cred) != imgp->ip_origvattr->va_gid))) ||
5909 (imgp->ip_sc_port != NULL)) {
5910#if CONFIG_MACF
5911/* label for MAC transition and neither VSUID nor VSGID */
5912handle_mac_transition:
5913#endif
5914
5915#if CONFIG_SETUID
5916 /*
5917 * Replace the credential with a copy of itself if euid or
5918 * egid change.
5919 *
5920 * Note: setuid binaries will automatically opt out of
5921 * group resolver participation as a side effect
5922 * of this operation. This is an intentional
5923 * part of the security model, which requires a
5924 * participating credential be established by
5925 * escalating privilege, setting up all other
5926 * aspects of the credential including whether
5927 * or not to participate in external group
5928 * membership resolution, then dropping their
5929 * effective privilege to that of the desired
5930 * final credential state.
5931 *
5932 * Modifications to p_ucred must be guarded using the
5933 * proc's ucred lock. This prevents others from accessing
5934 * a garbage credential.
5935 */
5936
5937 if (imgp->ip_sc_port != NULL) {
5938 extern int suid_cred_verify(ipc_port_t, vnode_t, uint32_t *);
5939 int ret = -1;
5940 uid_t uid = UINT32_MAX;
5941
5942 /*
5943 * Check that the vnodes match. If a script is being
5944 * executed check the script's vnode rather than the
5945 * interpreter's.
5946 */
5947 struct vnode *vp = imgp->ip_scriptvp != NULL ? imgp->ip_scriptvp : imgp->ip_vp;
5948
5949 ret = suid_cred_verify(imgp->ip_sc_port, vp, &uid);
5950 if (ret == 0) {
5951 apply_kauth_cred_update(p, ^kauth_cred_t (kauth_cred_t my_cred) {
5952 return kauth_cred_setresuid(my_cred,
5953 KAUTH_UID_NONE,
5954 uid,
5955 uid,
5956 KAUTH_UID_NONE);
5957 });
5958 } else {
5959 error = EPERM;
5960 }
5961 }
5962
5963 if (imgp->ip_origvattr->va_mode & VSUID) {
5964 apply_kauth_cred_update(p, ^kauth_cred_t (kauth_cred_t my_cred) {
5965 return kauth_cred_setresuid(my_cred,
5966 KAUTH_UID_NONE,
5967 imgp->ip_origvattr->va_uid,
5968 imgp->ip_origvattr->va_uid,
5969 KAUTH_UID_NONE);
5970 });
5971 }
5972
5973 if (imgp->ip_origvattr->va_mode & VSGID) {
5974 apply_kauth_cred_update(p, ^kauth_cred_t (kauth_cred_t my_cred) {
5975 return kauth_cred_setresgid(my_cred,
5976 KAUTH_GID_NONE,
5977 imgp->ip_origvattr->va_gid,
5978 imgp->ip_origvattr->va_gid);
5979 });
5980 }
5981#endif /* CONFIG_SETUID */
5982
5983#if CONFIG_MACF
5984 /*
5985 * If a policy has indicated that it will transition the label,
5986 * before making the call into the MAC policies, get a new
5987 * duplicate credential, so they can modify it without
5988 * modifying any others sharing it.
5989 */
5990 if (mac_transition) {
5991 /*
5992 * This hook may generate upcalls that require
5993 * importance donation from the kernel.
5994 * (23925818)
5995 */
5996 thread_t thread = current_thread();
5997 thread_enable_send_importance(thread, TRUE);
5998 kauth_proc_label_update_execve(p,
5999 imgp->ip_vfs_context,
6000 imgp->ip_vp,
6001 imgp->ip_arch_offset,
6002 imgp->ip_scriptvp,
6003 imgp->ip_scriptlabelp,
6004 imgp->ip_execlabelp,
6005 &imgp->ip_csflags,
6006 imgp->ip_px_smpx,
6007 &disjoint_cred, /* will be non zero if disjoint */
6008 &label_update_return);
6009 thread_enable_send_importance(thread, FALSE);
6010
6011 if (disjoint_cred) {
6012 /*
6013 * If updating the MAC label resulted in a
6014 * disjoint credential, flag that we need to
6015 * set the P_SUGID bit. This protects
6016 * against debuggers being attached by an
6017 * insufficiently privileged process onto the
6018 * result of a transition to a more privileged
6019 * credential.
6020 */
6021 leave_sugid_clear = 0;
6022 }
6023
6024 imgp->ip_mac_return = label_update_return;
6025 }
6026
6027 mac_reset_ipc = mac_proc_check_inherit_ipc_ports(p, p->p_textvp, p->p_textoff, imgp->ip_vp, imgp->ip_arch_offset, imgp->ip_scriptvp);
6028
6029#endif /* CONFIG_MACF */
6030
6031 /*
6032 * If 'leave_sugid_clear' is non-zero, then we passed the
6033 * VSUID and MACF checks, and successfully determined that
6034 * the previous cred was a member of the VSGID group, but
6035 * that it was not the default at the time of the execve,
6036 * and that the post-labelling credential was not disjoint.
6037 * So we don't set the P_SUGID or reset mach ports and fds
6038 * on the basis of simply running this code.
6039 */
6040 if (mac_reset_ipc || !leave_sugid_clear) {
6041 /*
6042 * Have mach reset the task and thread ports.
6043 * We don't want anyone who had the ports before
6044 * a setuid exec to be able to access/control the
6045 * task/thread after.
6046 */
6047 ipc_task_reset((imgp->ip_new_thread != NULL) ?
6048 get_threadtask(imgp->ip_new_thread) : p->task);
6049 ipc_thread_reset((imgp->ip_new_thread != NULL) ?
6050 imgp->ip_new_thread : current_thread());
6051 }
6052
6053 if (!leave_sugid_clear) {
6054 /*
6055 * Flag the process as setuid.
6056 */
6057 OSBitOrAtomic(P_SUGID, &p->p_flag);
6058
6059 /*
6060 * Radar 2261856; setuid security hole fix
6061 * XXX For setuid processes, attempt to ensure that
6062 * stdin, stdout, and stderr are already allocated.
6063 * We do not want userland to accidentally allocate
6064 * descriptors in this range which has implied meaning
6065 * to libc.
6066 */
6067 for (i = 0; i < 3; i++) {
6068 if (fp_get_noref_locked(p, i) != NULL) {
6069 continue;
6070 }
6071
6072 /*
6073 * Do the kernel equivalent of
6074 *
6075 * if i == 0
6076 * (void) open("/dev/null", O_RDONLY);
6077 * else
6078 * (void) open("/dev/null", O_WRONLY);
6079 */
6080
6081 struct fileproc *fp;
6082 int indx;
6083 int flag;
6084 struct nameidata *ndp = NULL;
6085
6086 if (i == 0) {
6087 flag = FREAD;
6088 } else {
6089 flag = FWRITE;
6090 }
6091
6092 if ((error = falloc(p,
6093 &fp, &indx, imgp->ip_vfs_context)) != 0) {
6094 continue;
6095 }
6096
6097 ndp = kheap_alloc(KHEAP_TEMP,
6098 sizeof(*ndp), Z_WAITOK | Z_ZERO);
6099 if (ndp == NULL) {
6100 fp_free(p, indx, fp);
6101 error = ENOMEM;
6102 break;
6103 }
6104
6105 NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW, UIO_SYSSPACE,
6106 CAST_USER_ADDR_T("/dev/null"),
6107 imgp->ip_vfs_context);
6108
6109 if ((error = vn_open(ndp, flag, 0)) != 0) {
6110 fp_free(p, indx, fp);
6111 kheap_free(KHEAP_TEMP, ndp, sizeof(*ndp));
6112 break;
6113 }
6114
6115 struct fileglob *fg = fp->fp_glob;
6116
6117 fg->fg_flag = flag;
6118 fg->fg_ops = &vnops;
6119 fg->fg_data = ndp->ni_vp;
6120
6121 vnode_put(ndp->ni_vp);
6122
6123 proc_fdlock(p);
6124 procfdtbl_releasefd(p, indx, NULL);
6125 fp_drop(p, indx, fp, 1);
6126 proc_fdunlock(p);
6127
6128 kheap_free(KHEAP_TEMP, ndp, sizeof(*ndp));
6129 }
6130 }
6131 }
6132#if CONFIG_MACF
6133 else {
6134 /*
6135 * We are here because we were told that the MAC label will
6136 * be transitioned, and the binary is not VSUID or VSGID; to
6137 * deal with this case, we could either duplicate a lot of
6138 * code, or we can indicate we want to default the P_SUGID
6139 * bit clear and jump back up.
6140 */
6141 if (mac_transition) {
6142 leave_sugid_clear = 1;
6143 goto handle_mac_transition;
6144 }
6145 }
6146
6147#endif /* CONFIG_MACF */
6148
6149 /*
6150 * Implement the semantic where the effective user and group become
6151 * the saved user and group in exec'ed programs.
6152 *
6153 * Modifications to p_ucred must be guarded using the
6154 * proc's ucred lock. This prevents others from accessing
6155 * a garbage credential.
6156 */
6157 apply_kauth_cred_update(p, ^kauth_cred_t (kauth_cred_t my_cred) {
6158 return kauth_cred_setsvuidgid(my_cred,
6159 kauth_cred_getuid(my_cred),
6160 kauth_cred_getgid(my_cred));
6161 });
6162
6163 /* Update the process' identity version and set the security token */
6164 p->p_idversion = OSIncrementAtomic(&nextpidversion);
6165
6166 if (imgp->ip_new_thread != NULL) {
6167 task = get_threadtask(imgp->ip_new_thread);
6168 } else {
6169 task = p->task;
6170 }
6171 set_security_token_task_internal(p, task);
6172
6173 return error;
6174}
6175
6176
6177/*
6178 * create_unix_stack
6179 *
6180 * Description: Set the user stack address for the process to the provided
6181 * address. If a custom stack was not set as a result of the
6182 * load process (i.e. as specified by the image file for the
6183 * executable), then allocate the stack in the provided map and
6184 * set up appropriate guard pages for enforcing administrative
6185 * limits on stack growth, if they end up being needed.
6186 *
6187 * Parameters: p Process to set stack on
6188 * load_result Information from mach-o load commands
6189 * map Address map in which to allocate the new stack
6190 *
6191 * Returns: KERN_SUCCESS Stack successfully created
6192 * !KERN_SUCCESS Mach failure code
6193 */
6194__attribute__((noinline))
6195static kern_return_t
6196create_unix_stack(vm_map_t map, load_result_t* load_result,
6197 proc_t p)
6198{
6199 mach_vm_size_t size, prot_size;
6200 mach_vm_offset_t addr, prot_addr;
6201 kern_return_t kr;
6202
6203 mach_vm_address_t user_stack = load_result->user_stack;
6204
6205 proc_lock(p);
6206 p->user_stack = (uintptr_t)user_stack;
6207 if (load_result->custom_stack) {
6208 p->p_lflag |= P_LCUSTOM_STACK;
6209 }
6210 proc_unlock(p);
6211 if (vm_map_page_shift(map) < (int)PAGE_SHIFT) {
6212 DEBUG4K_LOAD("map %p user_stack 0x%llx custom %d user_stack_alloc_size 0x%llx\n", map, user_stack, load_result->custom_stack, load_result->user_stack_alloc_size);
6213 }
6214
6215 if (load_result->user_stack_alloc_size > 0) {
6216 /*
6217 * Allocate enough space for the maximum stack size we
6218 * will ever authorize and an extra page to act as
6219 * a guard page for stack overflows. For default stacks,
6220 * vm_initial_limit_stack takes care of the extra guard page.
6221 * Otherwise we must allocate it ourselves.
6222 */
6223 if (mach_vm_round_page_overflow(load_result->user_stack_alloc_size, &size)) {
6224 return KERN_INVALID_ARGUMENT;
6225 }
6226 addr = vm_map_trunc_page(load_result->user_stack - size,
6227 vm_map_page_mask(map));
6228 kr = mach_vm_allocate_kernel(map, &addr, size,
6229 VM_FLAGS_FIXED, VM_MEMORY_STACK);
6230 if (kr != KERN_SUCCESS) {
6231 // Can't allocate at default location, try anywhere
6232 addr = 0;
6233 kr = mach_vm_allocate_kernel(map, &addr, size,
6234 VM_FLAGS_ANYWHERE, VM_MEMORY_STACK);
6235 if (kr != KERN_SUCCESS) {
6236 return kr;
6237 }
6238
6239 user_stack = addr + size;
6240 load_result->user_stack = (user_addr_t)user_stack;
6241
6242 proc_lock(p);
6243 p->user_stack = (uintptr_t)user_stack;
6244 proc_unlock(p);
6245 }
6246
6247 load_result->user_stack_alloc = (user_addr_t)addr;
6248
6249 /*
6250 * And prevent access to what's above the current stack
6251 * size limit for this process.
6252 */
6253 if (load_result->user_stack_size == 0) {
6254 load_result->user_stack_size = proc_limitgetcur(p, RLIMIT_STACK, TRUE);
6255 prot_size = vm_map_trunc_page(size - load_result->user_stack_size, vm_map_page_mask(map));
6256 } else {
6257 prot_size = PAGE_SIZE;
6258 }
6259
6260 prot_addr = addr;
6261 kr = mach_vm_protect(map,
6262 prot_addr,
6263 prot_size,
6264 FALSE,
6265 VM_PROT_NONE);
6266 if (kr != KERN_SUCCESS) {
6267 (void)mach_vm_deallocate(map, addr, size);
6268 return kr;
6269 }
6270 }
6271
6272 return KERN_SUCCESS;
6273}
6274
6275#include <sys/reboot.h>
6276
6277/*
6278 * load_init_program_at_path
6279 *
6280 * Description: Load the "init" program; in most cases, this will be "launchd"
6281 *
6282 * Parameters: p Process to call execve() to create
6283 * the "init" program
6284 * scratch_addr Page in p, scratch space
6285 * path NULL terminated path
6286 *
6287 * Returns: KERN_SUCCESS Success
6288 * !KERN_SUCCESS See execve/mac_execve for error codes
6289 *
6290 * Notes: The process that is passed in is the first manufactured
6291 * process on the system, and gets here via bsd_ast() firing
6292 * for the first time. This is done to ensure that bsd_init()
6293 * has run to completion.
6294 *
6295 * The address map of the first manufactured process matches the
6296 * word width of the kernel. Once the self-exec completes, the
6297 * initproc might be different.
6298 */
6299static int
6300load_init_program_at_path(proc_t p, user_addr_t scratch_addr, const char* path)
6301{
6302 int retval[2];
6303 int error;
6304 struct execve_args init_exec_args;
6305 user_addr_t argv0 = USER_ADDR_NULL, argv1 = USER_ADDR_NULL;
6306
6307 /*
6308 * Validate inputs and pre-conditions
6309 */
6310 assert(p);
6311 assert(scratch_addr);
6312 assert(path);
6313
6314 /*
6315 * Copy out program name.
6316 */
6317 size_t path_length = strlen(path) + 1;
6318 argv0 = scratch_addr;
6319 error = copyout(path, argv0, path_length);
6320 if (error) {
6321 return error;
6322 }
6323
6324 scratch_addr = USER_ADDR_ALIGN(scratch_addr + path_length, sizeof(user_addr_t));
6325
6326 /*
6327 * Put out first (and only) argument, similarly.
6328 * Assumes everything fits in a page as allocated above.
6329 */
6330 if (boothowto & RB_SINGLE) {
6331 const char *init_args = "-s";
6332 size_t init_args_length = strlen(init_args) + 1;
6333
6334 argv1 = scratch_addr;
6335 error = copyout(init_args, argv1, init_args_length);
6336 if (error) {
6337 return error;
6338 }
6339
6340 scratch_addr = USER_ADDR_ALIGN(scratch_addr + init_args_length, sizeof(user_addr_t));
6341 }
6342
6343 if (proc_is64bit(p)) {
6344 user64_addr_t argv64bit[3] = {};
6345
6346 argv64bit[0] = argv0;
6347 argv64bit[1] = argv1;
6348 argv64bit[2] = USER_ADDR_NULL;
6349
6350 error = copyout(argv64bit, scratch_addr, sizeof(argv64bit));
6351 if (error) {
6352 return error;
6353 }
6354 } else {
6355 user32_addr_t argv32bit[3] = {};
6356
6357 argv32bit[0] = (user32_addr_t)argv0;
6358 argv32bit[1] = (user32_addr_t)argv1;
6359 argv32bit[2] = USER_ADDR_NULL;
6360
6361 error = copyout(argv32bit, scratch_addr, sizeof(argv32bit));
6362 if (error) {
6363 return error;
6364 }
6365 }
6366
6367 /*
6368 * Set up argument block for fake call to execve.
6369 */
6370 init_exec_args.fname = argv0;
6371 init_exec_args.argp = scratch_addr;
6372 init_exec_args.envp = USER_ADDR_NULL;
6373
6374 /*
6375 * So that init task is set with uid,gid 0 token
6376 */
6377 set_security_token(p);
6378
6379 return execve(p, &init_exec_args, retval);
6380}
6381
6382static const char * init_programs[] = {
6383#if DEBUG
6384 "/usr/appleinternal/sbin/launchd.debug",
6385#endif
6386#if DEVELOPMENT || DEBUG
6387 "/usr/appleinternal/sbin/launchd.development",
6388#endif
6389 "/sbin/launchd",
6390};
6391
6392/*
6393 * load_init_program
6394 *
6395 * Description: Load the "init" program; in most cases, this will be "launchd"
6396 *
6397 * Parameters: p Process to call execve() to create
6398 * the "init" program
6399 *
6400 * Returns: (void)
6401 *
6402 * Notes: The process that is passed in is the first manufactured
6403 * process on the system, and gets here via bsd_ast() firing
6404 * for the first time. This is done to ensure that bsd_init()
6405 * has run to completion.
6406 *
6407 * In DEBUG & DEVELOPMENT builds, the launchdsuffix boot-arg
6408 * may be used to select a specific launchd executable. As with
6409 * the kcsuffix boot-arg, setting launchdsuffix to "" or "release"
6410 * will force /sbin/launchd to be selected.
6411 *
6412 * Search order by build:
6413 *
6414 * DEBUG DEVELOPMENT RELEASE PATH
6415 * ----------------------------------------------------------------------------------
6416 * 1 1 NA /usr/appleinternal/sbin/launchd.$LAUNCHDSUFFIX
6417 * 2 NA NA /usr/appleinternal/sbin/launchd.debug
6418 * 3 2 NA /usr/appleinternal/sbin/launchd.development
6419 * 4 3 1 /sbin/launchd
6420 */
6421void
6422load_init_program(proc_t p)
6423{
6424 uint32_t i;
6425 int error;
6426 vm_map_t map = current_map();
6427 mach_vm_offset_t scratch_addr = 0;
6428 mach_vm_size_t map_page_size = vm_map_page_size(map);
6429
6430 (void) mach_vm_allocate_kernel(map, &scratch_addr, map_page_size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_NONE);
6431#if CONFIG_MEMORYSTATUS
6432 (void) memorystatus_init_at_boot_snapshot();
6433#endif /* CONFIG_MEMORYSTATUS */
6434
6435#if __has_feature(ptrauth_calls)
6436 PE_parse_boot_argn("vm_shared_region_per_team_id", &vm_shared_region_per_team_id, sizeof(vm_shared_region_per_team_id));
6437 PE_parse_boot_argn("vm_shared_region_by_entitlement", &vm_shared_region_by_entitlement, sizeof(vm_shared_region_by_entitlement));
6438 PE_parse_boot_argn("vm_shared_region_reslide_aslr", &vm_shared_region_reslide_aslr, sizeof(vm_shared_region_reslide_aslr));
6439 PE_parse_boot_argn("vm_shared_region_reslide_restrict", &vm_shared_region_reslide_restrict, sizeof(vm_shared_region_reslide_restrict));
6440#endif /* __has_feature(ptrauth_calls) */
6441
6442#if DEBUG || DEVELOPMENT
6443#if XNU_TARGET_OS_OSX
6444 PE_parse_boot_argn("unentitled_ios_sim_launch", &unentitled_ios_sim_launch, sizeof(unentitled_ios_sim_launch));
6445#endif /* XNU_TARGET_OS_OSX */
6446
6447 /* Check for boot-arg suffix first */
6448 char launchd_suffix[64];
6449 if (PE_parse_boot_argn("launchdsuffix", launchd_suffix, sizeof(launchd_suffix))) {
6450 char launchd_path[128];
6451 boolean_t is_release_suffix = ((launchd_suffix[0] == 0) ||
6452 (strcmp(launchd_suffix, "release") == 0));
6453
6454 if (is_release_suffix) {
6455 printf("load_init_program: attempting to load /sbin/launchd\n");
6456 error = load_init_program_at_path(p, (user_addr_t)scratch_addr, "/sbin/launchd");
6457 if (!error) {
6458 return;
6459 }
6460
6461 panic("Process 1 exec of launchd.release failed, errno %d", error);
6462 } else {
6463 strlcpy(launchd_path, "/usr/appleinternal/sbin/launchd.", sizeof(launchd_path));
6464 strlcat(launchd_path, launchd_suffix, sizeof(launchd_path));
6465
6466 printf("load_init_program: attempting to load %s\n", launchd_path);
6467 error = load_init_program_at_path(p, (user_addr_t)scratch_addr, launchd_path);
6468 if (!error) {
6469 return;
6470 } else if (error != ENOENT) {
6471 printf("load_init_program: failed loading %s: errno %d\n", launchd_path, error);
6472 }
6473 }
6474 }
6475#endif
6476
6477 error = ENOENT;
6478 for (i = 0; i < sizeof(init_programs) / sizeof(init_programs[0]); i++) {
6479 printf("load_init_program: attempting to load %s\n", init_programs[i]);
6480 error = load_init_program_at_path(p, (user_addr_t)scratch_addr, init_programs[i]);
6481 if (!error) {
6482 return;
6483 } else if (error != ENOENT) {
6484 printf("load_init_program: failed loading %s: errno %d\n", init_programs[i], error);
6485 }
6486 }
6487
6488 panic("Process 1 exec of %s failed, errno %d", ((i == 0) ? "<null>" : init_programs[i - 1]), error);
6489}
6490
6491/*
6492 * load_return_to_errno
6493 *
6494 * Description: Convert a load_return_t (Mach error) to an errno (BSD error)
6495 *
6496 * Parameters: lrtn Mach error number
6497 *
6498 * Returns: (int) BSD error number
6499 * 0 Success
6500 * EBADARCH Bad architecture
6501 * EBADMACHO Bad Mach object file
6502 * ESHLIBVERS Bad shared library version
6503 * ENOMEM Out of memory/resource shortage
6504 * EACCES Access denied
6505 * ENOENT Entry not found (usually "file does
6506 * does not exist")
6507 * EIO An I/O error occurred
6508 * EBADEXEC The executable is corrupt/unknown
6509 */
6510static int
6511load_return_to_errno(load_return_t lrtn)
6512{
6513 switch (lrtn) {
6514 case LOAD_SUCCESS:
6515 return 0;
6516 case LOAD_BADARCH:
6517 return EBADARCH;
6518 case LOAD_BADMACHO:
6519 case LOAD_BADMACHO_UPX:
6520 return EBADMACHO;
6521 case LOAD_SHLIB:
6522 return ESHLIBVERS;
6523 case LOAD_NOSPACE:
6524 case LOAD_RESOURCE:
6525 return ENOMEM;
6526 case LOAD_PROTECT:
6527 return EACCES;
6528 case LOAD_ENOENT:
6529 return ENOENT;
6530 case LOAD_IOERROR:
6531 return EIO;
6532 case LOAD_DECRYPTFAIL:
6533 return EAUTH;
6534 case LOAD_FAILURE:
6535 default:
6536 return EBADEXEC;
6537 }
6538}
6539
6540#include <mach/mach_types.h>
6541#include <mach/vm_prot.h>
6542#include <mach/semaphore.h>
6543#include <mach/sync_policy.h>
6544#include <kern/clock.h>
6545#include <mach/kern_return.h>
6546
6547/*
6548 * execargs_alloc
6549 *
6550 * Description: Allocate the block of memory used by the execve arguments.
6551 * At the same time, we allocate a page so that we can read in
6552 * the first page of the image.
6553 *
6554 * Parameters: struct image_params * the image parameter block
6555 *
6556 * Returns: 0 Success
6557 * EINVAL Invalid argument
6558 * EACCES Permission denied
6559 * EINTR Interrupted function
6560 * ENOMEM Not enough space
6561 *
6562 * Notes: This is a temporary allocation into the kernel address space
6563 * to enable us to copy arguments in from user space. This is
6564 * necessitated by not mapping the process calling execve() into
6565 * the kernel address space during the execve() system call.
6566 *
6567 * We assemble the argument and environment, etc., into this
6568 * region before copying it as a single block into the child
6569 * process address space (at the top or bottom of the stack,
6570 * depending on which way the stack grows; see the function
6571 * exec_copyout_strings() for details).
6572 *
6573 * This ends up with a second (possibly unnecessary) copy compared
6574 * with assembing the data directly into the child address space,
6575 * instead, but since we cannot be guaranteed that the parent has
6576 * not modified its environment, we can't really know that it's
6577 * really a block there as well.
6578 */
6579
6580
6581static int execargs_waiters = 0;
6582static LCK_MTX_DECLARE_ATTR(execargs_cache_lock, &proc_lck_grp, &proc_lck_attr);
6583
6584static void
6585execargs_lock_lock(void)
6586{
6587 lck_mtx_lock_spin(&execargs_cache_lock);
6588}
6589
6590static void
6591execargs_lock_unlock(void)
6592{
6593 lck_mtx_unlock(&execargs_cache_lock);
6594}
6595
6596static wait_result_t
6597execargs_lock_sleep(void)
6598{
6599 return lck_mtx_sleep(&execargs_cache_lock, LCK_SLEEP_DEFAULT, &execargs_free_count, THREAD_INTERRUPTIBLE);
6600}
6601
6602static kern_return_t
6603execargs_purgeable_allocate(char **execarg_address)
6604{
6605 kern_return_t kr = vm_allocate_kernel(bsd_pageable_map, (vm_offset_t *)execarg_address, BSD_PAGEABLE_SIZE_PER_EXEC, VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE, VM_KERN_MEMORY_NONE);
6606 assert(kr == KERN_SUCCESS);
6607 return kr;
6608}
6609
6610static kern_return_t
6611execargs_purgeable_reference(void *execarg_address)
6612{
6613 int state = VM_PURGABLE_NONVOLATILE;
6614 kern_return_t kr = vm_purgable_control(bsd_pageable_map, (vm_offset_t) execarg_address, VM_PURGABLE_SET_STATE, &state);
6615
6616 assert(kr == KERN_SUCCESS);
6617 return kr;
6618}
6619
6620static kern_return_t
6621execargs_purgeable_volatilize(void *execarg_address)
6622{
6623 int state = VM_PURGABLE_VOLATILE | VM_PURGABLE_ORDERING_OBSOLETE;
6624 kern_return_t kr;
6625 kr = vm_purgable_control(bsd_pageable_map, (vm_offset_t) execarg_address, VM_PURGABLE_SET_STATE, &state);
6626
6627 assert(kr == KERN_SUCCESS);
6628
6629 return kr;
6630}
6631
6632static void
6633execargs_wakeup_waiters(void)
6634{
6635 thread_wakeup(&execargs_free_count);
6636}
6637
6638static int
6639execargs_alloc(struct image_params *imgp)
6640{
6641 kern_return_t kret;
6642 wait_result_t res;
6643 int i, cache_index = -1;
6644
6645 execargs_lock_lock();
6646
6647 while (execargs_free_count == 0) {
6648 execargs_waiters++;
6649 res = execargs_lock_sleep();
6650 execargs_waiters--;
6651 if (res != THREAD_AWAKENED) {
6652 execargs_lock_unlock();
6653 return EINTR;
6654 }
6655 }
6656
6657 execargs_free_count--;
6658
6659 for (i = 0; i < execargs_cache_size; i++) {
6660 vm_offset_t element = execargs_cache[i];
6661 if (element) {
6662 cache_index = i;
6663 imgp->ip_strings = (char *)(execargs_cache[i]);
6664 execargs_cache[i] = 0;
6665 break;
6666 }
6667 }
6668
6669 assert(execargs_free_count >= 0);
6670
6671 execargs_lock_unlock();
6672
6673 if (cache_index == -1) {
6674 kret = execargs_purgeable_allocate(&imgp->ip_strings);
6675 } else {
6676 kret = execargs_purgeable_reference(imgp->ip_strings);
6677 }
6678
6679 assert(kret == KERN_SUCCESS);
6680 if (kret != KERN_SUCCESS) {
6681 return ENOMEM;
6682 }
6683
6684 /* last page used to read in file headers */
6685 imgp->ip_vdata = imgp->ip_strings + (NCARGS + PAGE_SIZE);
6686 imgp->ip_strendp = imgp->ip_strings;
6687 imgp->ip_argspace = NCARGS;
6688 imgp->ip_strspace = (NCARGS + PAGE_SIZE);
6689
6690 return 0;
6691}
6692
6693/*
6694 * execargs_free
6695 *
6696 * Description: Free the block of memory used by the execve arguments and the
6697 * first page of the executable by a previous call to the function
6698 * execargs_alloc().
6699 *
6700 * Parameters: struct image_params * the image parameter block
6701 *
6702 * Returns: 0 Success
6703 * EINVAL Invalid argument
6704 * EINTR Oeration interrupted
6705 */
6706static int
6707execargs_free(struct image_params *imgp)
6708{
6709 kern_return_t kret;
6710 int i;
6711 boolean_t needs_wakeup = FALSE;
6712
6713 kret = execargs_purgeable_volatilize(imgp->ip_strings);
6714
6715 execargs_lock_lock();
6716 execargs_free_count++;
6717
6718 for (i = 0; i < execargs_cache_size; i++) {
6719 vm_offset_t element = execargs_cache[i];
6720 if (element == 0) {
6721 execargs_cache[i] = (vm_offset_t) imgp->ip_strings;
6722 imgp->ip_strings = NULL;
6723 break;
6724 }
6725 }
6726
6727 assert(imgp->ip_strings == NULL);
6728
6729 if (execargs_waiters > 0) {
6730 needs_wakeup = TRUE;
6731 }
6732
6733 execargs_lock_unlock();
6734
6735 if (needs_wakeup == TRUE) {
6736 execargs_wakeup_waiters();
6737 }
6738
6739 return kret == KERN_SUCCESS ? 0 : EINVAL;
6740}
6741
6742static void
6743exec_resettextvp(proc_t p, struct image_params *imgp)
6744{
6745 vnode_t vp;
6746 off_t offset;
6747 vnode_t tvp = p->p_textvp;
6748 int ret;
6749
6750 vp = imgp->ip_vp;
6751 offset = imgp->ip_arch_offset;
6752
6753 if (vp == NULLVP) {
6754 panic("exec_resettextvp: expected valid vp");
6755 }
6756
6757 ret = vnode_ref(vp);
6758 proc_lock(p);
6759 if (ret == 0) {
6760 p->p_textvp = vp;
6761 p->p_textoff = offset;
6762 } else {
6763 p->p_textvp = NULLVP; /* this is paranoia */
6764 p->p_textoff = 0;
6765 }
6766 proc_unlock(p);
6767
6768 if (tvp != NULLVP) {
6769 if (vnode_getwithref(tvp) == 0) {
6770 vnode_rele(tvp);
6771 vnode_put(tvp);
6772 }
6773 }
6774}
6775
6776// Includes the 0-byte (therefore "SIZE" instead of "LEN").
6777static const size_t CS_CDHASH_STRING_SIZE = CS_CDHASH_LEN * 2 + 1;
6778
6779static void
6780cdhash_to_string(char str[CS_CDHASH_STRING_SIZE], uint8_t const * const cdhash)
6781{
6782 static char const nibble[] = "0123456789abcdef";
6783
6784 /* Apparently still the safest way to get a hex representation
6785 * of binary data.
6786 * xnu's printf routines have %*D/%20D in theory, but "not really", see:
6787 * <rdar://problem/33328859> confusion around %*D/%nD in printf
6788 */
6789 for (int i = 0; i < CS_CDHASH_LEN; ++i) {
6790 str[i * 2] = nibble[(cdhash[i] & 0xf0) >> 4];
6791 str[i * 2 + 1] = nibble[cdhash[i] & 0x0f];
6792 }
6793 str[CS_CDHASH_STRING_SIZE - 1] = 0;
6794}
6795
6796/*
6797 * __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__
6798 *
6799 * Description: Waits for the userspace daemon to respond to the request
6800 * we made. Function declared non inline to be visible in
6801 * stackshots and spindumps as well as debugging.
6802 */
6803__attribute__((noinline)) int
6804__EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(mach_port_t task_access_port, int32_t new_pid)
6805{
6806 return find_code_signature(task_access_port, new_pid);
6807}
6808
6809static int
6810check_for_signature(proc_t p, struct image_params *imgp)
6811{
6812 mach_port_t port = IPC_PORT_NULL;
6813 kern_return_t kr = KERN_FAILURE;
6814 int error = EACCES;
6815 boolean_t unexpected_failure = FALSE;
6816 struct cs_blob *csb;
6817 boolean_t require_success = FALSE;
6818 int spawn = (imgp->ip_flags & IMGPF_SPAWN);
6819 int vfexec = (imgp->ip_flags & IMGPF_VFORK_EXEC);
6820 os_reason_t signature_failure_reason = OS_REASON_NULL;
6821
6822 /*
6823 * Override inherited code signing flags with the
6824 * ones for the process that is being successfully
6825 * loaded
6826 */
6827 proc_lock(p);
6828 p->p_csflags = imgp->ip_csflags;
6829 proc_unlock(p);
6830
6831 /* Set the switch_protect flag on the map */
6832 if (p->p_csflags & (CS_HARD | CS_KILL)) {
6833 vm_map_switch_protect(get_task_map(p->task), TRUE);
6834 }
6835 /* set the cs_enforced flags in the map */
6836 if (p->p_csflags & CS_ENFORCEMENT) {
6837 vm_map_cs_enforcement_set(get_task_map(p->task), TRUE);
6838 } else {
6839 vm_map_cs_enforcement_set(get_task_map(p->task), FALSE);
6840 }
6841
6842 /*
6843 * image activation may be failed due to policy
6844 * which is unexpected but security framework does not
6845 * approve of exec, kill and return immediately.
6846 */
6847 if (imgp->ip_mac_return != 0) {
6848 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
6849 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_SECURITY_POLICY, 0, 0);
6850 signature_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_SECURITY_POLICY);
6851 error = imgp->ip_mac_return;
6852 unexpected_failure = TRUE;
6853 goto done;
6854 }
6855
6856 if (imgp->ip_cs_error != OS_REASON_NULL) {
6857 signature_failure_reason = imgp->ip_cs_error;
6858 imgp->ip_cs_error = OS_REASON_NULL;
6859 error = EACCES;
6860 goto done;
6861 }
6862
6863#if XNU_TARGET_OS_OSX
6864 /* Check for platform passed in spawn attr if iOS binary is being spawned */
6865 if (proc_platform(p) == PLATFORM_IOS) {
6866 struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
6867 if (psa == NULL || psa->psa_platform == 0) {
6868 boolean_t no_sandbox_entitled = FALSE;
6869#if DEBUG || DEVELOPMENT
6870 /*
6871 * Allow iOS binaries to spawn on internal systems
6872 * if no-sandbox entitlement is present of unentitled_ios_sim_launch
6873 * boot-arg set to true
6874 */
6875 if (unentitled_ios_sim_launch) {
6876 no_sandbox_entitled = TRUE;
6877 } else {
6878 no_sandbox_entitled = IOVnodeHasEntitlement(imgp->ip_vp,
6879 (int64_t)imgp->ip_arch_offset, "com.apple.private.security.no-sandbox");
6880 }
6881#endif /* DEBUG || DEVELOPMENT */
6882 if (!no_sandbox_entitled) {
6883 signature_failure_reason = os_reason_create(OS_REASON_EXEC,
6884 EXEC_EXIT_REASON_WRONG_PLATFORM);
6885 error = EACCES;
6886 goto done;
6887 }
6888 printf("Allowing spawn of iOS binary %s since it has "
6889 "com.apple.private.security.no-sandbox entitlement or unentitled_ios_sim_launch "
6890 "boot-arg set to true\n", p->p_name);
6891 } else if (psa->psa_platform != PLATFORM_IOS) {
6892 /* Simulator binary spawned with wrong platform */
6893 signature_failure_reason = os_reason_create(OS_REASON_EXEC,
6894 EXEC_EXIT_REASON_WRONG_PLATFORM);
6895 error = EACCES;
6896 goto done;
6897 } else {
6898 printf("Allowing spawn of iOS binary %s since correct platform was passed in spawn\n",
6899 p->p_name);
6900 }
6901 }
6902#endif /* XNU_TARGET_OS_OSX */
6903
6904 /* If the code signature came through the image activation path, we skip the
6905 * taskgated / externally attached path. */
6906 if (imgp->ip_csflags & CS_SIGNED) {
6907 error = 0;
6908 goto done;
6909 }
6910
6911 /* The rest of the code is for signatures that either already have been externally
6912 * attached (likely, but not necessarily by a previous run through the taskgated
6913 * path), or that will now be attached by taskgated. */
6914
6915 kr = task_get_task_access_port(p->task, &port);
6916 if (KERN_SUCCESS != kr || !IPC_PORT_VALID(port)) {
6917 error = 0;
6918 if (require_success) {
6919 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
6920 p->p_pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASK_ACCESS_PORT, 0, 0);
6921 signature_failure_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASK_ACCESS_PORT);
6922 error = EACCES;
6923 }
6924 goto done;
6925 }
6926
6927 /*
6928 * taskgated returns KERN_SUCCESS if it has completed its work
6929 * and the exec should continue, KERN_FAILURE if the exec should
6930 * fail, or it may error out with different error code in an
6931 * event of mig failure (e.g. process was signalled during the
6932 * rpc call, taskgated died, mig server died etc.).
6933 */
6934
6935 kr = __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(port, p->p_pid);
6936 switch (kr) {
6937 case KERN_SUCCESS:
6938 error = 0;
6939 break;
6940 case KERN_FAILURE:
6941 error = EACCES;
6942
6943 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
6944 p->p_pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG, 0, 0);
6945 signature_failure_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG);
6946 goto done;
6947 default:
6948 error = EACCES;
6949
6950 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
6951 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_TASKGATED_OTHER, 0, 0);
6952 signature_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_TASKGATED_OTHER);
6953 unexpected_failure = TRUE;
6954 goto done;
6955 }
6956
6957 /* Only do this if exec_resettextvp() did not fail */
6958 if (p->p_textvp != NULLVP) {
6959 csb = ubc_cs_blob_get(p->p_textvp, -1, -1, p->p_textoff);
6960
6961 if (csb != NULL) {
6962 /* As the enforcement we can do here is very limited, we only allow things that
6963 * are the only reason why this code path still exists:
6964 * Adhoc signed non-platform binaries without special cs_flags and without any
6965 * entitlements (unrestricted ones still pass AMFI). */
6966 if (
6967 /* Revalidate the blob if necessary through bumped generation count. */
6968 (ubc_cs_generation_check(p->p_textvp) == 0 ||
6969 ubc_cs_blob_revalidate(p->p_textvp, csb, imgp, 0, proc_platform(p)) == 0) &&
6970 /* Only CS_ADHOC, no CS_KILL, CS_HARD etc. */
6971 (csb->csb_flags & CS_ALLOWED_MACHO) == CS_ADHOC &&
6972 /* If it has a CMS blob, it's not adhoc. The CS_ADHOC flag can lie. */
6973 csblob_find_blob_bytes((const uint8_t *)csb->csb_mem_kaddr, csb->csb_mem_size,
6974 CSSLOT_SIGNATURESLOT,
6975 CSMAGIC_BLOBWRAPPER) == NULL &&
6976 /* It could still be in a trust cache (unlikely with CS_ADHOC), or a magic path. */
6977 csb->csb_platform_binary == 0 &&
6978 /* No entitlements, not even unrestricted ones. */
6979 csb->csb_entitlements_blob == NULL) {
6980 proc_lock(p);
6981 p->p_csflags |= CS_SIGNED | CS_VALID;
6982 proc_unlock(p);
6983 } else {
6984 uint8_t cdhash[CS_CDHASH_LEN];
6985 char cdhash_string[CS_CDHASH_STRING_SIZE];
6986 proc_getcdhash(p, cdhash);
6987 cdhash_to_string(cdhash_string, cdhash);
6988 printf("ignoring detached code signature on '%s' with cdhash '%s' "
6989 "because it is invalid, or not a simple adhoc signature.\n",
6990 p->p_name, cdhash_string);
6991 }
6992 }
6993 }
6994
6995done:
6996 if (0 == error) {
6997 /* The process's code signature related properties are
6998 * fully set up, so this is an opportune moment to log
6999 * platform binary execution, if desired. */
7000 if (platform_exec_logging != 0 && csproc_get_platform_binary(p)) {
7001 uint8_t cdhash[CS_CDHASH_LEN];
7002 char cdhash_string[CS_CDHASH_STRING_SIZE];
7003 proc_getcdhash(p, cdhash);
7004 cdhash_to_string(cdhash_string, cdhash);
7005
7006 os_log(peLog, "CS Platform Exec Logging: Executing platform signed binary "
7007 "'%s' with cdhash %s\n", p->p_name, cdhash_string);
7008 }
7009 } else {
7010 if (!unexpected_failure) {
7011 p->p_csflags |= CS_KILLED;
7012 }
7013 /* make very sure execution fails */
7014 if (vfexec || spawn) {
7015 assert(signature_failure_reason != OS_REASON_NULL);
7016 psignal_vfork_with_reason(p, p->task, imgp->ip_new_thread,
7017 SIGKILL, signature_failure_reason);
7018 signature_failure_reason = OS_REASON_NULL;
7019 error = 0;
7020 } else {
7021 assert(signature_failure_reason != OS_REASON_NULL);
7022 psignal_with_reason(p, SIGKILL, signature_failure_reason);
7023 signature_failure_reason = OS_REASON_NULL;
7024 }
7025 }
7026
7027 if (port != IPC_PORT_NULL) {
7028 ipc_port_release_send(port);
7029 }
7030
7031 /* If we hit this, we likely would have leaked an exit reason */
7032 assert(signature_failure_reason == OS_REASON_NULL);
7033 return error;
7034}
7035
7036/*
7037 * Typically as soon as we start executing this process, the
7038 * first instruction will trigger a VM fault to bring the text
7039 * pages (as executable) into the address space, followed soon
7040 * thereafter by dyld data structures (for dynamic executable).
7041 * To optimize this, as well as improve support for hardware
7042 * debuggers that can only access resident pages present
7043 * in the process' page tables, we prefault some pages if
7044 * possible. Errors are non-fatal.
7045 */
7046#ifndef PREVENT_CALLER_STACK_USE
7047#define PREVENT_CALLER_STACK_USE __attribute__((noinline))
7048#endif
7049static void PREVENT_CALLER_STACK_USE
7050exec_prefault_data(proc_t p __unused, struct image_params *imgp, load_result_t *load_result)
7051{
7052 int ret;
7053 size_t expected_all_image_infos_size;
7054 kern_return_t kr;
7055
7056 /*
7057 * Prefault executable or dyld entry point.
7058 */
7059 if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) {
7060 DEBUG4K_LOAD("entry_point 0x%llx\n", (uint64_t)load_result->entry_point);
7061 }
7062 kr = vm_fault(current_map(),
7063 vm_map_trunc_page(load_result->entry_point,
7064 vm_map_page_mask(current_map())),
7065 VM_PROT_READ | VM_PROT_EXECUTE,
7066 FALSE, VM_KERN_MEMORY_NONE,
7067 THREAD_UNINT, NULL, 0);
7068 if (kr != KERN_SUCCESS) {
7069 DEBUG4K_ERROR("map %p va 0x%llx -> 0x%x\n", current_map(), (uint64_t)vm_map_trunc_page(load_result->entry_point, vm_map_page_mask(current_map())), kr);
7070 }
7071
7072 if (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) {
7073 expected_all_image_infos_size = sizeof(struct user64_dyld_all_image_infos);
7074 } else {
7075 expected_all_image_infos_size = sizeof(struct user32_dyld_all_image_infos);
7076 }
7077
7078 /* Decode dyld anchor structure from <mach-o/dyld_images.h> */
7079 if (load_result->dynlinker &&
7080 load_result->all_image_info_addr &&
7081 load_result->all_image_info_size >= expected_all_image_infos_size) {
7082 union {
7083 struct user64_dyld_all_image_infos infos64;
7084 struct user32_dyld_all_image_infos infos32;
7085 } all_image_infos;
7086
7087 /*
7088 * Pre-fault to avoid copyin() going through the trap handler
7089 * and recovery path.
7090 */
7091 if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) {
7092 DEBUG4K_LOAD("all_image_info_addr 0x%llx\n", load_result->all_image_info_addr);
7093 }
7094 kr = vm_fault(current_map(),
7095 vm_map_trunc_page(load_result->all_image_info_addr,
7096 vm_map_page_mask(current_map())),
7097 VM_PROT_READ | VM_PROT_WRITE,
7098 FALSE, VM_KERN_MEMORY_NONE,
7099 THREAD_UNINT, NULL, 0);
7100 if (kr != KERN_SUCCESS) {
7101// printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(load_result->all_image_info_addr, vm_map_page_mask(current_map())), kr);
7102 }
7103 if ((load_result->all_image_info_addr & PAGE_MASK) + expected_all_image_infos_size > PAGE_SIZE) {
7104 /* all_image_infos straddles a page */
7105 kr = vm_fault(current_map(),
7106 vm_map_trunc_page(load_result->all_image_info_addr + expected_all_image_infos_size - 1,
7107 vm_map_page_mask(current_map())),
7108 VM_PROT_READ | VM_PROT_WRITE,
7109 FALSE, VM_KERN_MEMORY_NONE,
7110 THREAD_UNINT, NULL, 0);
7111 if (kr != KERN_SUCCESS) {
7112// printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(load_result->all_image_info_addr + expected_all_image_infos_size -1, vm_map_page_mask(current_map())), kr);
7113 }
7114 }
7115
7116 if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) {
7117 DEBUG4K_LOAD("copyin(0x%llx, 0x%lx)\n", load_result->all_image_info_addr, expected_all_image_infos_size);
7118 }
7119 ret = copyin((user_addr_t)load_result->all_image_info_addr,
7120 &all_image_infos,
7121 expected_all_image_infos_size);
7122 if (ret == 0 && all_image_infos.infos32.version >= DYLD_ALL_IMAGE_INFOS_ADDRESS_MINIMUM_VERSION) {
7123 user_addr_t notification_address;
7124 user_addr_t dyld_image_address;
7125 user_addr_t dyld_version_address;
7126 user_addr_t dyld_all_image_infos_address;
7127 user_addr_t dyld_slide_amount;
7128
7129 if (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) {
7130 notification_address = (user_addr_t)all_image_infos.infos64.notification;
7131 dyld_image_address = (user_addr_t)all_image_infos.infos64.dyldImageLoadAddress;
7132 dyld_version_address = (user_addr_t)all_image_infos.infos64.dyldVersion;
7133 dyld_all_image_infos_address = (user_addr_t)all_image_infos.infos64.dyldAllImageInfosAddress;
7134 } else {
7135 notification_address = all_image_infos.infos32.notification;
7136 dyld_image_address = all_image_infos.infos32.dyldImageLoadAddress;
7137 dyld_version_address = all_image_infos.infos32.dyldVersion;
7138 dyld_all_image_infos_address = all_image_infos.infos32.dyldAllImageInfosAddress;
7139 }
7140
7141 /*
7142 * dyld statically sets up the all_image_infos in its Mach-O
7143 * binary at static link time, with pointers relative to its default
7144 * load address. Since ASLR might slide dyld before its first
7145 * instruction is executed, "dyld_slide_amount" tells us how far
7146 * dyld was loaded compared to its default expected load address.
7147 * All other pointers into dyld's image should be adjusted by this
7148 * amount. At some point later, dyld will fix up pointers to take
7149 * into account the slide, at which point the all_image_infos_address
7150 * field in the structure will match the runtime load address, and
7151 * "dyld_slide_amount" will be 0, if we were to consult it again.
7152 */
7153
7154 dyld_slide_amount = (user_addr_t)load_result->all_image_info_addr - dyld_all_image_infos_address;
7155
7156#if 0
7157 kprintf("exec_prefault: 0x%016llx 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
7158 (uint64_t)load_result->all_image_info_addr,
7159 all_image_infos.infos32.version,
7160 (uint64_t)notification_address,
7161 (uint64_t)dyld_image_address,
7162 (uint64_t)dyld_version_address,
7163 (uint64_t)dyld_all_image_infos_address);
7164#endif
7165
7166 if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) {
7167 DEBUG4K_LOAD("notification_address 0x%llx dyld_slide_amount 0x%llx\n", (uint64_t)notification_address, (uint64_t)dyld_slide_amount);
7168 }
7169 kr = vm_fault(current_map(),
7170 vm_map_trunc_page(notification_address + dyld_slide_amount,
7171 vm_map_page_mask(current_map())),
7172 VM_PROT_READ | VM_PROT_EXECUTE,
7173 FALSE, VM_KERN_MEMORY_NONE,
7174 THREAD_UNINT, NULL, 0);
7175 if (kr != KERN_SUCCESS) {
7176// printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(notification_address + dyld_slide_amount, vm_map_page_mask(current_map())), kr);
7177 }
7178 if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) {
7179 DEBUG4K_LOAD("dyld_image_address 0x%llx dyld_slide_amount 0x%llx\n", (uint64_t)dyld_image_address, (uint64_t)dyld_slide_amount);
7180 }
7181 kr = vm_fault(current_map(),
7182 vm_map_trunc_page(dyld_image_address + dyld_slide_amount,
7183 vm_map_page_mask(current_map())),
7184 VM_PROT_READ | VM_PROT_EXECUTE,
7185 FALSE, VM_KERN_MEMORY_NONE,
7186 THREAD_UNINT, NULL, 0);
7187 if (kr != KERN_SUCCESS) {
7188// printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(dyld_image_address + dyld_slide_amount, vm_map_page_mask(current_map())), kr);
7189 }
7190 if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) {
7191 DEBUG4K_LOAD("dyld_version_address 0x%llx dyld_slide_amount 0x%llx\n", (uint64_t)dyld_version_address, (uint64_t)dyld_slide_amount);
7192 }
7193 kr = vm_fault(current_map(),
7194 vm_map_trunc_page(dyld_version_address + dyld_slide_amount,
7195 vm_map_page_mask(current_map())),
7196 VM_PROT_READ,
7197 FALSE, VM_KERN_MEMORY_NONE,
7198 THREAD_UNINT, NULL, 0);
7199 if (kr != KERN_SUCCESS) {
7200// printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(dyld_version_address + dyld_slide_amount, vm_map_page_mask(current_map())), kr);
7201 }
7202 if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) {
7203 DEBUG4K_LOAD("dyld_all_image_infos_address 0x%llx dyld_slide_amount 0x%llx\n", (uint64_t)dyld_version_address, (uint64_t)dyld_slide_amount);
7204 }
7205 kr = vm_fault(current_map(),
7206 vm_map_trunc_page(dyld_all_image_infos_address + dyld_slide_amount,
7207 vm_map_page_mask(current_map())),
7208 VM_PROT_READ | VM_PROT_WRITE,
7209 FALSE, VM_KERN_MEMORY_NONE,
7210 THREAD_UNINT, NULL, 0);
7211 if (kr != KERN_SUCCESS) {
7212// printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(dyld_all_image_infos_address + dyld_slide_amount, vm_map_page_mask(current_map())), kr);
7213 }
7214 }
7215 }
7216}