2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Mach Operating System
31 * Copyright (c) 1987 Carnegie-Mellon University
32 * All rights reserved. The CMU software License Agreement specifies
33 * the terms and conditions for use and redistribution.
37 * Copyright (c) 1982, 1986, 1991, 1993
38 * The Regents of the University of California. All rights reserved.
39 * (c) UNIX System Laboratories, Inc.
40 * All or some portions of this file are derived from material licensed
41 * to the University of California by American Telephone and Telegraph
42 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
43 * the permission of UNIX System Laboratories, Inc.
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 * 3. All advertising materials mentioning features or use of this software
54 * must display the following acknowledgement:
55 * This product includes software developed by the University of
56 * California, Berkeley and its contributors.
57 * 4. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * from: @(#)kern_exec.c 8.1 (Berkeley) 6/10/93
76 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
77 * support for mandatory and extensible security protections. This notice
78 * is included in support of clause 2.2 (b) of the Apple Public License,
81 #include <machine/reg.h>
82 #include <machine/cpu_capabilities.h>
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/filedesc.h>
87 #include <sys/kernel.h>
88 #include <sys/proc_internal.h>
89 #include <sys/kauth.h>
91 #include <sys/socketvar.h>
92 #include <sys/malloc.h>
93 #include <sys/namei.h>
94 #include <sys/mount_internal.h>
95 #include <sys/vnode_internal.h>
96 #include <sys/file_internal.h>
98 #include <sys/uio_internal.h>
100 #include <sys/exec.h>
101 #include <sys/kdebug.h>
102 #include <sys/signal.h>
103 #include <sys/aio_kern.h>
104 #include <sys/sysproto.h>
105 #include <sys/persona.h>
106 #include <sys/reason.h>
108 #include <sys/shm_internal.h> /* shmexec() */
110 #include <sys/ubc_internal.h> /* ubc_map() */
111 #include <sys/spawn.h>
112 #include <sys/spawn_internal.h>
113 #include <sys/process_policy.h>
114 #include <sys/codesign.h>
115 #include <sys/random.h>
116 #include <crypto/sha1.h>
118 #include <libkern/libkern.h>
120 #include <security/audit/audit.h>
122 #include <ipc/ipc_types.h>
124 #include <mach/mach_types.h>
125 #include <mach/port.h>
126 #include <mach/task.h>
127 #include <mach/task_access.h>
128 #include <mach/thread_act.h>
129 #include <mach/vm_map.h>
130 #include <mach/mach_vm.h>
131 #include <mach/vm_param.h>
133 #include <kern/sched_prim.h> /* thread_wakeup() */
134 #include <kern/affinity.h>
135 #include <kern/assert.h>
136 #include <kern/task.h>
137 #include <kern/coalition.h>
138 #include <kern/policy_internal.h>
139 #include <kern/kalloc.h>
144 #include <security/mac_framework.h>
145 #include <security/mac_mach_internal.h>
148 #include <vm/vm_map.h>
149 #include <vm/vm_kern.h>
150 #include <vm/vm_protos.h>
151 #include <vm/vm_kern.h>
152 #include <vm/vm_fault.h>
153 #include <vm/vm_pageout.h>
155 #include <kdp/kdp_dyld.h>
157 #include <machine/pal_routines.h>
159 #include <pexpert/pexpert.h>
161 #if CONFIG_MEMORYSTATUS
162 #include <sys/kern_memorystatus.h>
166 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
167 extern void dtrace_proc_exec(proc_t
);
168 extern void (*dtrace_proc_waitfor_exec_ptr
)(proc_t
);
171 * Since dtrace_proc_waitfor_exec_ptr can be added/removed in dtrace_subr.c,
172 * we will store its value before actually calling it.
174 static void (*dtrace_proc_waitfor_hook
)(proc_t
) = NULL
;
176 #include <sys/dtrace_ptss.h>
179 /* support for child creation in exec after vfork */
180 thread_t
fork_create_child(task_t parent_task
, coalition_t
*parent_coalition
, proc_t child_proc
, int inherit_memory
, int is64bit
, int in_exec
);
181 void vfork_exit(proc_t p
, int rv
);
182 extern void proc_apply_task_networkbg_internal(proc_t
, thread_t
);
183 extern void task_set_did_exec_flag(task_t task
);
184 extern void task_clear_exec_copy_flag(task_t task
);
185 proc_t
proc_exec_switch_task(proc_t p
, task_t old_task
, task_t new_task
, thread_t new_thread
);
186 boolean_t
task_is_active(task_t
);
187 boolean_t
thread_is_active(thread_t thread
);
188 void thread_copy_resource_info(thread_t dst_thread
, thread_t src_thread
);
189 void *ipc_importance_exec_switch_task(task_t old_task
, task_t new_task
);
190 extern void ipc_importance_release(void *elem
);
193 * Mach things for which prototypes are unavailable from Mach headers
197 void ipc_thread_reset(
199 kern_return_t
ipc_object_copyin(
201 mach_port_name_t name
,
202 mach_msg_type_name_t msgt_name
,
203 ipc_object_t
*objectp
);
204 void ipc_port_release_send(ipc_port_t
);
206 #if DEVELOPMENT || DEBUG
207 void task_importance_update_owner_info(task_t
);
210 extern struct savearea
*get_user_regs(thread_t
);
212 __attribute__((noinline
)) int __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(mach_port_t task_access_port
, int32_t new_pid
);
214 #include <kern/thread.h>
215 #include <kern/task.h>
216 #include <kern/ast.h>
217 #include <kern/mach_loader.h>
218 #include <kern/mach_fat.h>
219 #include <mach-o/fat.h>
220 #include <mach-o/loader.h>
221 #include <machine/vmparam.h>
222 #include <sys/imgact.h>
228 * EAI_ITERLIMIT The maximum number of times to iterate an image
229 * activator in exec_activate_image() before treating
230 * it as malformed/corrupt.
232 #define EAI_ITERLIMIT 3
235 * For #! interpreter parsing
237 #define IS_WHITESPACE(ch) ((ch == ' ') || (ch == '\t'))
238 #define IS_EOL(ch) ((ch == '#') || (ch == '\n'))
240 extern vm_map_t bsd_pageable_map
;
241 extern const struct fileops vnops
;
243 #define USER_ADDR_ALIGN(addr, val) \
244 ( ( (user_addr_t)(addr) + (val) - 1) \
247 /* Platform Code Exec Logging */
248 static int platform_exec_logging
= 0;
250 SYSCTL_DECL(_security_mac
);
252 SYSCTL_INT(_security_mac
, OID_AUTO
, platform_exec_logging
, CTLFLAG_RW
, &platform_exec_logging
, 0,
253 "log cdhashes for all platform binary executions");
255 static os_log_t peLog
= OS_LOG_DEFAULT
;
257 struct image_params
; /* Forward */
258 static int exec_activate_image(struct image_params
*imgp
);
259 static int exec_copyout_strings(struct image_params
*imgp
, user_addr_t
*stackp
);
260 static int load_return_to_errno(load_return_t lrtn
);
261 static int execargs_alloc(struct image_params
*imgp
);
262 static int execargs_free(struct image_params
*imgp
);
263 static int exec_check_permissions(struct image_params
*imgp
);
264 static int exec_extract_strings(struct image_params
*imgp
);
265 static int exec_add_apple_strings(struct image_params
*imgp
, const load_result_t
*load_result
);
266 static int exec_handle_sugid(struct image_params
*imgp
);
267 static int sugid_scripts
= 0;
268 SYSCTL_INT (_kern
, OID_AUTO
, sugid_scripts
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &sugid_scripts
, 0, "");
269 static kern_return_t
create_unix_stack(vm_map_t map
, load_result_t
* load_result
, proc_t p
);
270 static int copyoutptr(user_addr_t ua
, user_addr_t ptr
, int ptr_size
);
271 static void exec_resettextvp(proc_t
, struct image_params
*);
272 static int check_for_signature(proc_t
, struct image_params
*);
273 static void exec_prefault_data(proc_t
, struct image_params
*, load_result_t
*);
274 static errno_t
exec_handle_port_actions(struct image_params
*imgp
, boolean_t
* portwatch_present
, ipc_port_t
* portwatch_ports
);
275 static errno_t
exec_handle_spawnattr_policy(proc_t p
, int psa_apptype
, uint64_t psa_qos_clamp
, uint64_t psa_darwin_role
,
276 ipc_port_t
* portwatch_ports
, int portwatch_count
);
279 * exec_add_user_string
281 * Add the requested string to the string space area.
283 * Parameters; struct image_params * image parameter block
284 * user_addr_t string to add to strings area
285 * int segment from which string comes
286 * boolean_t TRUE if string contributes to NCARGS
289 * !0 Failure errno from copyinstr()
292 * (imgp->ip_strendp) updated location of next add, if any
293 * (imgp->ip_strspace) updated byte count of space remaining
294 * (imgp->ip_argspace) updated byte count of space in NCARGS
297 exec_add_user_string(struct image_params
*imgp
, user_addr_t str
, int seg
, boolean_t is_ncargs
)
306 space
= imgp
->ip_argspace
; /* by definition smaller than ip_strspace */
308 space
= imgp
->ip_strspace
;
315 if (!UIO_SEG_IS_USER_SPACE(seg
)) {
316 char *kstr
= CAST_DOWN(char *,str
); /* SAFE */
317 error
= copystr(kstr
, imgp
->ip_strendp
, space
, &len
);
319 error
= copyinstr(str
, imgp
->ip_strendp
, space
, &len
);
322 imgp
->ip_strendp
+= len
;
323 imgp
->ip_strspace
-= len
;
325 imgp
->ip_argspace
-= len
;
327 } while (error
== ENAMETOOLONG
);
333 * dyld is now passed the executable path as a getenv-like variable
334 * in the same fashion as the stack_guard and malloc_entropy keys.
336 #define EXECUTABLE_KEY "executable_path="
341 * To support new app package launching for Mac OS X, the dyld needs the
342 * first argument to execve() stored on the user stack.
344 * Save the executable path name at the bottom of the strings area and set
345 * the argument vector pointer to the location following that to indicate
346 * the start of the argument and environment tuples, setting the remaining
347 * string space count to the size of the string area minus the path length.
349 * Parameters; struct image_params * image parameter block
350 * char * path used to invoke program
351 * int segment from which path comes
353 * Returns: int 0 Success
355 * copy[in]str:EFAULT Bad address
356 * copy[in]str:ENAMETOOLONG Filename too long
359 * (imgp->ip_strings) saved path
360 * (imgp->ip_strspace) space remaining in ip_strings
361 * (imgp->ip_strendp) start of remaining copy area
362 * (imgp->ip_argspace) space remaining of NCARGS
363 * (imgp->ip_applec) Initial applev[0]
365 * Note: We have to do this before the initial namei() since in the
366 * path contains symbolic links, namei() will overwrite the
367 * original path buffer contents. If the last symbolic link
368 * resolved was a relative pathname, we would lose the original
369 * "path", which could be an absolute pathname. This might be
370 * unacceptable for dyld.
373 exec_save_path(struct image_params
*imgp
, user_addr_t path
, int seg
, const char **excpath
)
379 // imgp->ip_strings can come out of a cache, so we need to obliterate the
381 memset(imgp
->ip_strings
, '\0', strlen(EXECUTABLE_KEY
) + MAXPATHLEN
);
383 len
= MIN(MAXPATHLEN
, imgp
->ip_strspace
);
386 case UIO_USERSPACE32
:
387 case UIO_USERSPACE64
: /* Same for copyin()... */
388 error
= copyinstr(path
, imgp
->ip_strings
+ strlen(EXECUTABLE_KEY
), len
, &len
);
391 kpath
= CAST_DOWN(char *,path
); /* SAFE */
392 error
= copystr(kpath
, imgp
->ip_strings
+ strlen(EXECUTABLE_KEY
), len
, &len
);
400 bcopy(EXECUTABLE_KEY
, imgp
->ip_strings
, strlen(EXECUTABLE_KEY
));
401 len
+= strlen(EXECUTABLE_KEY
);
403 imgp
->ip_strendp
+= len
;
404 imgp
->ip_strspace
-= len
;
407 *excpath
= imgp
->ip_strings
+ strlen(EXECUTABLE_KEY
);
415 * exec_reset_save_path
417 * If we detect a shell script, we need to reset the string area
418 * state so that the interpreter can be saved onto the stack.
420 * Parameters; struct image_params * image parameter block
422 * Returns: int 0 Success
425 * (imgp->ip_strings) saved path
426 * (imgp->ip_strspace) space remaining in ip_strings
427 * (imgp->ip_strendp) start of remaining copy area
428 * (imgp->ip_argspace) space remaining of NCARGS
432 exec_reset_save_path(struct image_params
*imgp
)
434 imgp
->ip_strendp
= imgp
->ip_strings
;
435 imgp
->ip_argspace
= NCARGS
;
436 imgp
->ip_strspace
= ( NCARGS
+ PAGE_SIZE
);
444 * Image activator for interpreter scripts. If the image begins with
445 * the characters "#!", then it is an interpreter script. Verify the
446 * length of the script line indicating the interpreter is not in
447 * excess of the maximum allowed size. If this is the case, then
448 * break out the arguments, if any, which are separated by white
449 * space, and copy them into the argument save area as if they were
450 * provided on the command line before all other arguments. The line
451 * ends when we encounter a comment character ('#') or newline.
453 * Parameters; struct image_params * image parameter block
455 * Returns: -1 not an interpreter (keep looking)
456 * -3 Success: interpreter: relookup
457 * >0 Failure: interpreter: error number
459 * A return value other than -1 indicates subsequent image activators should
460 * not be given the opportunity to attempt to activate the image.
463 exec_shell_imgact(struct image_params
*imgp
)
465 char *vdata
= imgp
->ip_vdata
;
467 char *line_startp
, *line_endp
;
471 * Make sure it's a shell script. If we've already redirected
472 * from an interpreted file once, don't do it again.
474 if (vdata
[0] != '#' ||
476 (imgp
->ip_flags
& IMGPF_INTERPRET
) != 0) {
480 if (imgp
->ip_origcputype
!= 0) {
481 /* Fat header previously matched, don't allow shell script inside */
485 imgp
->ip_flags
|= IMGPF_INTERPRET
;
486 imgp
->ip_interp_sugid_fd
= -1;
487 imgp
->ip_interp_buffer
[0] = '\0';
489 /* Check to see if SUGID scripts are permitted. If they aren't then
490 * clear the SUGID bits.
491 * imgp->ip_vattr is known to be valid.
493 if (sugid_scripts
== 0) {
494 imgp
->ip_origvattr
->va_mode
&= ~(VSUID
| VSGID
);
497 /* Try to find the first non-whitespace character */
498 for( ihp
= &vdata
[2]; ihp
< &vdata
[IMG_SHSIZE
]; ihp
++ ) {
500 /* Did not find interpreter, "#!\n" */
502 } else if (IS_WHITESPACE(*ihp
)) {
503 /* Whitespace, like "#! /bin/sh\n", keep going. */
505 /* Found start of interpreter */
510 if (ihp
== &vdata
[IMG_SHSIZE
]) {
511 /* All whitespace, like "#! " */
517 /* Try to find the end of the interpreter+args string */
518 for ( ; ihp
< &vdata
[IMG_SHSIZE
]; ihp
++ ) {
523 /* Still part of interpreter or args */
527 if (ihp
== &vdata
[IMG_SHSIZE
]) {
528 /* A long line, like "#! blah blah blah" without end */
532 /* Backtrack until we find the last non-whitespace */
533 while (IS_EOL(*ihp
) || IS_WHITESPACE(*ihp
)) {
537 /* The character after the last non-whitespace is our logical end of line */
541 * Now we have pointers to the usable part of:
543 * "#! /usr/bin/int first second third \n"
544 * ^ line_startp ^ line_endp
547 /* copy the interpreter name */
548 interp
= imgp
->ip_interp_buffer
;
549 for ( ihp
= line_startp
; (ihp
< line_endp
) && !IS_WHITESPACE(*ihp
); ihp
++)
553 exec_reset_save_path(imgp
);
554 exec_save_path(imgp
, CAST_USER_ADDR_T(imgp
->ip_interp_buffer
),
557 /* Copy the entire interpreter + args for later processing into argv[] */
558 interp
= imgp
->ip_interp_buffer
;
559 for ( ihp
= line_startp
; (ihp
< line_endp
); ihp
++)
565 * If we have an SUID or SGID script, create a file descriptor
566 * from the vnode and pass /dev/fd/%d instead of the actual
567 * path name so that the script does not get opened twice
569 if (imgp
->ip_origvattr
->va_mode
& (VSUID
| VSGID
)) {
575 p
= vfs_context_proc(imgp
->ip_vfs_context
);
576 error
= falloc(p
, &fp
, &fd
, imgp
->ip_vfs_context
);
580 fp
->f_fglob
->fg_flag
= FREAD
;
581 fp
->f_fglob
->fg_ops
= &vnops
;
582 fp
->f_fglob
->fg_data
= (caddr_t
)imgp
->ip_vp
;
585 procfdtbl_releasefd(p
, fd
, NULL
);
586 fp_drop(p
, fd
, fp
, 1);
588 vnode_ref(imgp
->ip_vp
);
590 imgp
->ip_interp_sugid_fd
= fd
;
602 * Image activator for fat 1.0 binaries. If the binary is fat, then we
603 * need to select an image from it internally, and make that the image
604 * we are going to attempt to execute. At present, this consists of
605 * reloading the first page for the image with a first page from the
606 * offset location indicated by the fat header.
608 * Parameters; struct image_params * image parameter block
610 * Returns: -1 not a fat binary (keep looking)
611 * -2 Success: encapsulated binary: reread
612 * >0 Failure: error number
614 * Important: This image activator is byte order neutral.
616 * Note: A return value other than -1 indicates subsequent image
617 * activators should not be given the opportunity to attempt
618 * to activate the image.
620 * If we find an encapsulated binary, we make no assertions
621 * about its validity; instead, we leave that up to a rescan
622 * for an activator to claim it, and, if it is claimed by one,
623 * that activator is responsible for determining validity.
626 exec_fat_imgact(struct image_params
*imgp
)
628 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
629 kauth_cred_t cred
= kauth_cred_proc_ref(p
);
630 struct fat_header
*fat_header
= (struct fat_header
*)imgp
->ip_vdata
;
631 struct _posix_spawnattr
*psa
= NULL
;
632 struct fat_arch fat_arch
;
636 if (imgp
->ip_origcputype
!= 0) {
637 /* Fat header previously matched, don't allow another fat file inside */
638 error
= -1; /* not claimed */
642 /* Make sure it's a fat binary */
643 if (OSSwapBigToHostInt32(fat_header
->magic
) != FAT_MAGIC
) {
644 error
= -1; /* not claimed */
648 /* imgp->ip_vdata has PAGE_SIZE, zerofilled if the file is smaller */
649 lret
= fatfile_validate_fatarches((vm_offset_t
)fat_header
, PAGE_SIZE
);
650 if (lret
!= LOAD_SUCCESS
) {
651 error
= load_return_to_errno(lret
);
655 /* If posix_spawn binprefs exist, respect those prefs. */
656 psa
= (struct _posix_spawnattr
*) imgp
->ip_px_sa
;
657 if (psa
!= NULL
&& psa
->psa_binprefs
[0] != 0) {
660 /* Check each preference listed against all arches in header */
661 for (pr
= 0; pr
< NBINPREFS
; pr
++) {
662 cpu_type_t pref
= psa
->psa_binprefs
[pr
];
664 /* No suitable arch in the pref list */
669 if (pref
== CPU_TYPE_ANY
) {
670 /* Fall through to regular grading */
671 goto regular_grading
;
674 lret
= fatfile_getbestarch_for_cputype(pref
,
675 (vm_offset_t
)fat_header
,
678 if (lret
== LOAD_SUCCESS
) {
683 /* Requested binary preference was not honored */
689 /* Look up our preferred architecture in the fat file. */
690 lret
= fatfile_getbestarch((vm_offset_t
)fat_header
,
693 if (lret
!= LOAD_SUCCESS
) {
694 error
= load_return_to_errno(lret
);
699 /* Read the Mach-O header out of fat_arch */
700 error
= vn_rdwr(UIO_READ
, imgp
->ip_vp
, imgp
->ip_vdata
,
701 PAGE_SIZE
, fat_arch
.offset
,
702 UIO_SYSSPACE
, (IO_UNIT
|IO_NODELOCKED
),
709 memset(imgp
->ip_vdata
+ (PAGE_SIZE
- resid
), 0x0, resid
);
712 /* Success. Indicate we have identified an encapsulated binary */
714 imgp
->ip_arch_offset
= (user_size_t
)fat_arch
.offset
;
715 imgp
->ip_arch_size
= (user_size_t
)fat_arch
.size
;
716 imgp
->ip_origcputype
= fat_arch
.cputype
;
717 imgp
->ip_origcpusubtype
= fat_arch
.cpusubtype
;
720 kauth_cred_unref(&cred
);
725 activate_exec_state(task_t task
, proc_t p
, thread_t thread
, load_result_t
*result
)
729 task_set_dyld_info(task
, MACH_VM_MIN_ADDRESS
, 0);
730 if (result
->is64bit
) {
731 task_set_64bit(task
, TRUE
);
732 OSBitOrAtomic(P_LP64
, &p
->p_flag
);
734 task_set_64bit(task
, FALSE
);
735 OSBitAndAtomic(~((uint32_t)P_LP64
), &p
->p_flag
);
738 ret
= thread_state_initialize(thread
);
739 if (ret
!= KERN_SUCCESS
) {
743 if (result
->threadstate
) {
744 uint32_t *ts
= result
->threadstate
;
745 uint32_t total_size
= result
->threadstate_sz
;
747 while (total_size
> 0) {
748 uint32_t flavor
= *ts
++;
749 uint32_t size
= *ts
++;
751 ret
= thread_setstatus(thread
, flavor
, (thread_state_t
)ts
, size
);
756 total_size
-= (size
+ 2) * sizeof(uint32_t);
760 thread_setentrypoint(thread
, result
->entry_point
);
767 * Set p->p_comm and p->p_name to the name passed to exec
770 set_proc_name(struct image_params
*imgp
, proc_t p
)
772 int p_name_len
= sizeof(p
->p_name
) - 1;
774 if (imgp
->ip_ndp
->ni_cnd
.cn_namelen
> p_name_len
) {
775 imgp
->ip_ndp
->ni_cnd
.cn_namelen
= p_name_len
;
778 bcopy((caddr_t
)imgp
->ip_ndp
->ni_cnd
.cn_nameptr
, (caddr_t
)p
->p_name
,
779 (unsigned)imgp
->ip_ndp
->ni_cnd
.cn_namelen
);
780 p
->p_name
[imgp
->ip_ndp
->ni_cnd
.cn_namelen
] = '\0';
782 if (imgp
->ip_ndp
->ni_cnd
.cn_namelen
> MAXCOMLEN
) {
783 imgp
->ip_ndp
->ni_cnd
.cn_namelen
= MAXCOMLEN
;
786 bcopy((caddr_t
)imgp
->ip_ndp
->ni_cnd
.cn_nameptr
, (caddr_t
)p
->p_comm
,
787 (unsigned)imgp
->ip_ndp
->ni_cnd
.cn_namelen
);
788 p
->p_comm
[imgp
->ip_ndp
->ni_cnd
.cn_namelen
] = '\0';
791 static uint64_t get_va_fsid(struct vnode_attr
*vap
)
793 if (VATTR_IS_SUPPORTED(vap
, va_fsid64
)) {
794 return *(uint64_t *)&vap
->va_fsid64
;
803 * Image activator for mach-o 1.0 binaries.
805 * Parameters; struct image_params * image parameter block
807 * Returns: -1 not a fat binary (keep looking)
808 * -2 Success: encapsulated binary: reread
809 * >0 Failure: error number
810 * EBADARCH Mach-o binary, but with an unrecognized
812 * ENOMEM No memory for child process after -
813 * can only happen after vfork()
815 * Important: This image activator is NOT byte order neutral.
817 * Note: A return value other than -1 indicates subsequent image
818 * activators should not be given the opportunity to attempt
819 * to activate the image.
821 * TODO: More gracefully handle failures after vfork
824 exec_mach_imgact(struct image_params
*imgp
)
826 struct mach_header
*mach_header
= (struct mach_header
*)imgp
->ip_vdata
;
827 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
830 task_t new_task
= NULL
; /* protected by vfexec */
832 struct uthread
*uthread
;
833 vm_map_t old_map
= VM_MAP_NULL
;
834 vm_map_t map
= VM_MAP_NULL
;
836 load_result_t load_result
;
837 struct _posix_spawnattr
*psa
= NULL
;
838 int spawn
= (imgp
->ip_flags
& IMGPF_SPAWN
);
839 int vfexec
= (imgp
->ip_flags
& IMGPF_VFORK_EXEC
);
840 int exec
= (imgp
->ip_flags
& IMGPF_EXEC
);
841 os_reason_t exec_failure_reason
= OS_REASON_NULL
;
844 * make sure it's a Mach-O 1.0 or Mach-O 2.0 binary; the difference
845 * is a reserved field on the end, so for the most part, we can
846 * treat them as if they were identical. Reverse-endian Mach-O
847 * binaries are recognized but not compatible.
849 if ((mach_header
->magic
== MH_CIGAM
) ||
850 (mach_header
->magic
== MH_CIGAM_64
)) {
855 if ((mach_header
->magic
!= MH_MAGIC
) &&
856 (mach_header
->magic
!= MH_MAGIC_64
)) {
861 if (mach_header
->filetype
!= MH_EXECUTE
) {
866 if (imgp
->ip_origcputype
!= 0) {
867 /* Fat header previously had an idea about this thin file */
868 if (imgp
->ip_origcputype
!= mach_header
->cputype
||
869 imgp
->ip_origcpusubtype
!= mach_header
->cpusubtype
) {
874 imgp
->ip_origcputype
= mach_header
->cputype
;
875 imgp
->ip_origcpusubtype
= mach_header
->cpusubtype
;
878 task
= current_task();
879 thread
= current_thread();
880 uthread
= get_bsdthread_info(thread
);
882 if ((mach_header
->cputype
& CPU_ARCH_ABI64
) == CPU_ARCH_ABI64
)
883 imgp
->ip_flags
|= IMGPF_IS_64BIT
;
885 /* If posix_spawn binprefs exist, respect those prefs. */
886 psa
= (struct _posix_spawnattr
*) imgp
->ip_px_sa
;
887 if (psa
!= NULL
&& psa
->psa_binprefs
[0] != 0) {
889 for (pr
= 0; pr
< NBINPREFS
; pr
++) {
890 cpu_type_t pref
= psa
->psa_binprefs
[pr
];
892 /* No suitable arch in the pref list */
897 if (pref
== CPU_TYPE_ANY
) {
898 /* Jump to regular grading */
902 if (pref
== imgp
->ip_origcputype
) {
903 /* We have a match! */
911 if (!grade_binary(imgp
->ip_origcputype
, imgp
->ip_origcpusubtype
& ~CPU_SUBTYPE_MASK
)) {
916 /* Copy in arguments/environment from the old process */
917 error
= exec_extract_strings(imgp
);
921 AUDIT_ARG(argv
, imgp
->ip_startargv
, imgp
->ip_argc
,
922 imgp
->ip_endargv
- imgp
->ip_startargv
);
923 AUDIT_ARG(envv
, imgp
->ip_endargv
, imgp
->ip_envc
,
924 imgp
->ip_endenvv
- imgp
->ip_endargv
);
927 * We are being called to activate an image subsequent to a vfork()
928 * operation; in this case, we know that our task, thread, and
929 * uthread are actually those of our parent, and our proc, which we
930 * obtained indirectly from the image_params vfs_context_t, is the
934 imgp
->ip_new_thread
= fork_create_child(task
, NULL
, p
, FALSE
, (imgp
->ip_flags
& IMGPF_IS_64BIT
), FALSE
);
935 /* task and thread ref returned, will be released in __mac_execve */
936 if (imgp
->ip_new_thread
== NULL
) {
943 /* reset local idea of thread, uthread, task */
944 thread
= imgp
->ip_new_thread
;
945 uthread
= get_bsdthread_info(thread
);
946 task
= new_task
= get_threadtask(thread
);
949 * Load the Mach-O file.
951 * NOTE: An error after this point indicates we have potentially
952 * destroyed or overwritten some process state while attempting an
953 * execve() following a vfork(), which is an unrecoverable condition.
954 * We send the new process an immediate SIGKILL to avoid it executing
955 * any instructions in the mutated address space. For true spawns,
956 * this is not the case, and "too late" is still not too late to
957 * return an error code to the parent process.
961 * Actually load the image file we previously decided to load.
963 lret
= load_machfile(imgp
, mach_header
, thread
, &map
, &load_result
);
964 if (lret
!= LOAD_SUCCESS
) {
965 error
= load_return_to_errno(lret
);
967 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
968 p
->p_pid
, OS_REASON_EXEC
, EXEC_EXIT_REASON_BAD_MACHO
, 0, 0);
969 if (lret
== LOAD_BADMACHO_UPX
) {
970 /* set anything that might be useful in the crash report */
971 set_proc_name(imgp
, p
);
973 exec_failure_reason
= os_reason_create(OS_REASON_EXEC
, EXEC_EXIT_REASON_UPX
);
974 exec_failure_reason
->osr_flags
|= OS_REASON_FLAG_GENERATE_CRASH_REPORT
;
975 exec_failure_reason
->osr_flags
|= OS_REASON_FLAG_CONSISTENT_FAILURE
;
977 exec_failure_reason
= os_reason_create(OS_REASON_EXEC
, EXEC_EXIT_REASON_BAD_MACHO
);
984 p
->p_cputype
= imgp
->ip_origcputype
;
985 p
->p_cpusubtype
= imgp
->ip_origcpusubtype
;
988 vm_map_set_user_wire_limit(map
, p
->p_rlimit
[RLIMIT_MEMLOCK
].rlim_cur
);
991 * Set code-signing flags if this binary is signed, or if parent has
992 * requested them on exec.
994 if (load_result
.csflags
& CS_VALID
) {
995 imgp
->ip_csflags
|= load_result
.csflags
&
996 (CS_VALID
|CS_SIGNED
|CS_DEV_CODE
|
997 CS_HARD
|CS_KILL
|CS_RESTRICT
|CS_ENFORCEMENT
|CS_REQUIRE_LV
|
998 CS_ENTITLEMENTS_VALIDATED
|CS_DYLD_PLATFORM
|
999 CS_ENTITLEMENT_FLAGS
|
1000 CS_EXEC_SET_HARD
|CS_EXEC_SET_KILL
|CS_EXEC_SET_ENFORCEMENT
);
1002 imgp
->ip_csflags
&= ~CS_VALID
;
1005 if (p
->p_csflags
& CS_EXEC_SET_HARD
)
1006 imgp
->ip_csflags
|= CS_HARD
;
1007 if (p
->p_csflags
& CS_EXEC_SET_KILL
)
1008 imgp
->ip_csflags
|= CS_KILL
;
1009 if (p
->p_csflags
& CS_EXEC_SET_ENFORCEMENT
)
1010 imgp
->ip_csflags
|= CS_ENFORCEMENT
;
1011 if (p
->p_csflags
& CS_EXEC_INHERIT_SIP
) {
1012 if (p
->p_csflags
& CS_INSTALLER
)
1013 imgp
->ip_csflags
|= CS_INSTALLER
;
1014 if (p
->p_csflags
& CS_DATAVAULT_CONTROLLER
)
1015 imgp
->ip_csflags
|= CS_DATAVAULT_CONTROLLER
;
1016 if (p
->p_csflags
& CS_NVRAM_UNRESTRICTED
)
1017 imgp
->ip_csflags
|= CS_NVRAM_UNRESTRICTED
;
1021 * Set up the system reserved areas in the new address space.
1023 vm_map_exec(map
, task
, load_result
.is64bit
, (void *)p
->p_fd
->fd_rdir
, cpu_type());
1026 * Close file descriptors which specify close-on-exec.
1028 fdexec(p
, psa
!= NULL
? psa
->psa_flags
: 0, exec
);
1031 * deal with set[ug]id.
1033 error
= exec_handle_sugid(imgp
);
1035 vm_map_deallocate(map
);
1037 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
1038 p
->p_pid
, OS_REASON_EXEC
, EXEC_EXIT_REASON_SUGID_FAILURE
, 0, 0);
1039 exec_failure_reason
= os_reason_create(OS_REASON_EXEC
, EXEC_EXIT_REASON_SUGID_FAILURE
);
1044 * Commit to new map.
1046 * Swap the new map for the old for target task, which consumes
1047 * our new map reference but each leaves us responsible for the
1048 * old_map reference. That lets us get off the pmap associated
1049 * with it, and then we can release it.
1051 * The map needs to be set on the target task which is different
1052 * than current task, thus swap_task_map is used instead of
1055 old_map
= swap_task_map(task
, thread
, map
);
1056 vm_map_deallocate(old_map
);
1059 lret
= activate_exec_state(task
, p
, thread
, &load_result
);
1060 if (lret
!= KERN_SUCCESS
) {
1062 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
1063 p
->p_pid
, OS_REASON_EXEC
, EXEC_EXIT_REASON_ACTV_THREADSTATE
, 0, 0);
1064 exec_failure_reason
= os_reason_create(OS_REASON_EXEC
, EXEC_EXIT_REASON_ACTV_THREADSTATE
);
1069 * deal with voucher on exec-calling thread.
1071 if (imgp
->ip_new_thread
== NULL
)
1072 thread_set_mach_voucher(current_thread(), IPC_VOUCHER_NULL
);
1074 /* Make sure we won't interrupt ourself signalling a partial process */
1075 if (!vfexec
&& !spawn
&& (p
->p_lflag
& P_LTRACED
))
1076 psignal(p
, SIGTRAP
);
1078 if (load_result
.unixproc
&&
1079 create_unix_stack(get_task_map(task
),
1081 p
) != KERN_SUCCESS
) {
1082 error
= load_return_to_errno(LOAD_NOSPACE
);
1084 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
1085 p
->p_pid
, OS_REASON_EXEC
, EXEC_EXIT_REASON_STACK_ALLOC
, 0, 0);
1086 exec_failure_reason
= os_reason_create(OS_REASON_EXEC
, EXEC_EXIT_REASON_STACK_ALLOC
);
1090 error
= exec_add_apple_strings(imgp
, &load_result
);
1093 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
1094 p
->p_pid
, OS_REASON_EXEC
, EXEC_EXIT_REASON_APPLE_STRING_INIT
, 0, 0);
1095 exec_failure_reason
= os_reason_create(OS_REASON_EXEC
, EXEC_EXIT_REASON_APPLE_STRING_INIT
);
1099 /* Switch to target task's map to copy out strings */
1100 old_map
= vm_map_switch(get_task_map(task
));
1102 if (load_result
.unixproc
) {
1106 * Copy the strings area out into the new process address
1110 error
= exec_copyout_strings(imgp
, &ap
);
1112 vm_map_switch(old_map
);
1114 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
1115 p
->p_pid
, OS_REASON_EXEC
, EXEC_EXIT_REASON_COPYOUT_STRINGS
, 0, 0);
1116 exec_failure_reason
= os_reason_create(OS_REASON_EXEC
, EXEC_EXIT_REASON_COPYOUT_STRINGS
);
1120 thread_setuserstack(thread
, ap
);
1123 if (load_result
.dynlinker
) {
1125 int new_ptr_size
= (imgp
->ip_flags
& IMGPF_IS_64BIT
) ? 8 : 4;
1127 /* Adjust the stack */
1128 ap
= thread_adjuserstack(thread
, -new_ptr_size
);
1129 error
= copyoutptr(load_result
.mach_header
, ap
, new_ptr_size
);
1132 vm_map_switch(old_map
);
1134 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
1135 p
->p_pid
, OS_REASON_EXEC
, EXEC_EXIT_REASON_COPYOUT_DYNLINKER
, 0, 0);
1136 exec_failure_reason
= os_reason_create(OS_REASON_EXEC
, EXEC_EXIT_REASON_COPYOUT_DYNLINKER
);
1139 task_set_dyld_info(task
, load_result
.all_image_info_addr
,
1140 load_result
.all_image_info_size
);
1143 /* Avoid immediate VM faults back into kernel */
1144 exec_prefault_data(p
, imgp
, &load_result
);
1146 vm_map_switch(old_map
);
1148 /* Stop profiling */
1152 * Reset signal state.
1154 execsigs(p
, thread
);
1157 * need to cancel async IO requests that can be cancelled and wait for those
1158 * already active. MAY BLOCK!
1163 /* FIXME: Till vmspace inherit is fixed: */
1164 if (!vfexec
&& p
->vm_shm
)
1168 /* Clean up the semaphores */
1173 * Remember file name for accounting.
1175 p
->p_acflag
&= ~AFORK
;
1177 set_proc_name(imgp
, p
);
1179 #if CONFIG_SECLUDED_MEMORY
1180 if (secluded_for_apps
&&
1181 load_result
.platform_binary
) {
1182 if (strncmp(p
->p_name
,
1184 sizeof (p
->p_name
)) == 0) {
1185 task_set_could_use_secluded_mem(task
, TRUE
);
1187 task_set_could_use_secluded_mem(task
, FALSE
);
1189 if (strncmp(p
->p_name
,
1191 sizeof (p
->p_name
)) == 0) {
1192 task_set_could_also_use_secluded_mem(task
, TRUE
);
1195 #endif /* CONFIG_SECLUDED_MEMORY */
1197 pal_dbg_set_task_name(task
);
1200 * The load result will have already been munged by AMFI to include the
1201 * platform binary flag if boot-args dictated it (AMFI will mark anything
1202 * that doesn't go through the upcall path as a platform binary if its
1203 * enforcement is disabled).
1205 if (load_result
.platform_binary
) {
1207 printf("setting platform binary on task: pid = %d\n", p
->p_pid
);
1211 * We must use 'task' here because the proc's task has not yet been
1212 * switched to the new one.
1214 task_set_platform_binary(task
, TRUE
);
1217 printf("clearing platform binary on task: pid = %d\n", p
->p_pid
);
1220 task_set_platform_binary(task
, FALSE
);
1223 #if DEVELOPMENT || DEBUG
1225 * Update the pid an proc name for importance base if any
1227 task_importance_update_owner_info(task
);
1230 memcpy(&p
->p_uuid
[0], &load_result
.uuid
[0], sizeof(p
->p_uuid
));
1233 dtrace_proc_exec(p
);
1236 if (kdebug_enable
) {
1237 long dbg_arg1
, dbg_arg2
, dbg_arg3
, dbg_arg4
;
1240 * Collect the pathname for tracing
1242 kdbg_trace_string(p
, &dbg_arg1
, &dbg_arg2
, &dbg_arg3
, &dbg_arg4
);
1244 uintptr_t fsid
= 0, fileid
= 0;
1245 if (imgp
->ip_vattr
) {
1246 uint64_t fsid64
= get_va_fsid(imgp
->ip_vattr
);
1248 fileid
= imgp
->ip_vattr
->va_fileid
;
1249 // check for (unexpected) overflow and trace zero in that case
1250 if (fsid
!= fsid64
|| fileid
!= imgp
->ip_vattr
->va_fileid
) {
1254 KERNEL_DEBUG_CONSTANT1(TRACE_DATA_EXEC
| DBG_FUNC_NONE
,
1255 p
->p_pid
, fsid
, fileid
, 0, (uintptr_t)thread_tid(thread
));
1256 KERNEL_DEBUG_CONSTANT1(TRACE_STRING_EXEC
| DBG_FUNC_NONE
,
1257 dbg_arg1
, dbg_arg2
, dbg_arg3
, dbg_arg4
, (uintptr_t)thread_tid(thread
));
1261 * If posix_spawned with the START_SUSPENDED flag, stop the
1262 * process before it runs.
1264 if (imgp
->ip_px_sa
!= NULL
) {
1265 psa
= (struct _posix_spawnattr
*) imgp
->ip_px_sa
;
1266 if (psa
->psa_flags
& POSIX_SPAWN_START_SUSPENDED
) {
1270 (void) task_suspend_internal(task
);
1275 * mark as execed, wakeup the process that vforked (if any) and tell
1276 * it that it now has its own resources back
1278 OSBitOrAtomic(P_EXEC
, &p
->p_flag
);
1279 proc_resetregister(p
);
1280 if (p
->p_pptr
&& (p
->p_lflag
& P_LPPWAIT
)) {
1282 p
->p_lflag
&= ~P_LPPWAIT
;
1284 wakeup((caddr_t
)p
->p_pptr
);
1288 * Pay for our earlier safety; deliver the delayed signals from
1289 * the incomplete vfexec process now that it's complete.
1291 if (vfexec
&& (p
->p_lflag
& P_LTRACED
)) {
1292 psignal_vfork(p
, new_task
, thread
, SIGTRAP
);
1298 /* Don't allow child process to execute any instructions */
1301 assert(exec_failure_reason
!= OS_REASON_NULL
);
1302 psignal_vfork_with_reason(p
, new_task
, thread
, SIGKILL
, exec_failure_reason
);
1303 exec_failure_reason
= OS_REASON_NULL
;
1305 assert(exec_failure_reason
!= OS_REASON_NULL
);
1306 psignal_with_reason(p
, SIGKILL
, exec_failure_reason
);
1307 exec_failure_reason
= OS_REASON_NULL
;
1310 /* Terminate the exec copy task */
1311 task_terminate_internal(task
);
1315 /* We can't stop this system call at this point, so just pretend we succeeded */
1318 os_reason_free(exec_failure_reason
);
1319 exec_failure_reason
= OS_REASON_NULL
;
1323 if (load_result
.threadstate
) {
1324 kfree(load_result
.threadstate
, load_result
.threadstate_sz
);
1325 load_result
.threadstate
= NULL
;
1329 /* If we hit this, we likely would have leaked an exit reason */
1330 assert(exec_failure_reason
== OS_REASON_NULL
);
1338 * Our image activator table; this is the table of the image types we are
1339 * capable of loading. We list them in order of preference to ensure the
1340 * fastest image load speed.
1342 * XXX hardcoded, for now; should use linker sets
1345 int (*ex_imgact
)(struct image_params
*);
1346 const char *ex_name
;
1348 { exec_mach_imgact
, "Mach-o Binary" },
1349 { exec_fat_imgact
, "Fat Binary" },
1350 { exec_shell_imgact
, "Interpreter Script" },
1356 * exec_activate_image
1358 * Description: Iterate through the available image activators, and activate
1359 * the image associated with the imgp structure. We start with
1360 * the activator for Mach-o binaries followed by that for Fat binaries
1361 * for Interpreter scripts.
1363 * Parameters: struct image_params * Image parameter block
1365 * Returns: 0 Success
1366 * EBADEXEC The executable is corrupt/unknown
1367 * execargs_alloc:EINVAL Invalid argument
1368 * execargs_alloc:EACCES Permission denied
1369 * execargs_alloc:EINTR Interrupted function
1370 * execargs_alloc:ENOMEM Not enough space
1371 * exec_save_path:EFAULT Bad address
1372 * exec_save_path:ENAMETOOLONG Filename too long
1373 * exec_check_permissions:EACCES Permission denied
1374 * exec_check_permissions:ENOEXEC Executable file format error
1375 * exec_check_permissions:ETXTBSY Text file busy [misuse of error code]
1376 * exec_check_permissions:???
1378 * vn_rdwr:??? [anything vn_rdwr can return]
1379 * <ex_imgact>:??? [anything an imgact can return]
1380 * EDEADLK Process is being terminated
1383 exec_activate_image(struct image_params
*imgp
)
1385 struct nameidata
*ndp
= NULL
;
1386 const char *excpath
;
1389 int once
= 1; /* save SGUID-ness for interpreted files */
1392 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
1394 error
= execargs_alloc(imgp
);
1398 error
= exec_save_path(imgp
, imgp
->ip_user_fname
, imgp
->ip_seg
, &excpath
);
1403 /* Use excpath, which contains the copyin-ed exec path */
1404 DTRACE_PROC1(exec
, uintptr_t, excpath
);
1406 MALLOC(ndp
, struct nameidata
*, sizeof(*ndp
), M_TEMP
, M_WAITOK
| M_ZERO
);
1412 NDINIT(ndp
, LOOKUP
, OP_LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
,
1413 UIO_SYSSPACE
, CAST_USER_ADDR_T(excpath
), imgp
->ip_vfs_context
);
1419 imgp
->ip_ndp
= ndp
; /* successful namei(); call nameidone() later */
1420 imgp
->ip_vp
= ndp
->ni_vp
; /* if set, need to vnode_put() at some point */
1423 * Before we start the transition from binary A to binary B, make
1424 * sure another thread hasn't started exiting the process. We grab
1425 * the proc lock to check p_lflag initially, and the transition
1426 * mechanism ensures that the value doesn't change after we release
1430 if (p
->p_lflag
& P_LEXIT
) {
1435 error
= proc_transstart(p
, 1, 0);
1440 error
= exec_check_permissions(imgp
);
1444 /* Copy; avoid invocation of an interpreter overwriting the original */
1447 *imgp
->ip_origvattr
= *imgp
->ip_vattr
;
1450 error
= vn_rdwr(UIO_READ
, imgp
->ip_vp
, imgp
->ip_vdata
, PAGE_SIZE
, 0,
1451 UIO_SYSSPACE
, IO_NODELOCKED
,
1452 vfs_context_ucred(imgp
->ip_vfs_context
),
1453 &resid
, vfs_context_proc(imgp
->ip_vfs_context
));
1458 memset(imgp
->ip_vdata
+ (PAGE_SIZE
- resid
), 0x0, resid
);
1461 encapsulated_binary
:
1462 /* Limit the number of iterations we will attempt on each binary */
1463 if (++itercount
> EAI_ITERLIMIT
) {
1468 for(i
= 0; error
== -1 && execsw
[i
].ex_imgact
!= NULL
; i
++) {
1470 error
= (*execsw
[i
].ex_imgact
)(imgp
);
1473 /* case -1: not claimed: continue */
1474 case -2: /* Encapsulated binary, imgp->ip_XXX set for next iteration */
1475 goto encapsulated_binary
;
1477 case -3: /* Interpreter */
1480 * Copy the script label for later use. Note that
1481 * the label can be different when the script is
1482 * actually read by the interpreter.
1484 if (imgp
->ip_scriptlabelp
)
1485 mac_vnode_label_free(imgp
->ip_scriptlabelp
);
1486 imgp
->ip_scriptlabelp
= mac_vnode_label_alloc();
1487 if (imgp
->ip_scriptlabelp
== NULL
) {
1491 mac_vnode_label_copy(imgp
->ip_vp
->v_label
,
1492 imgp
->ip_scriptlabelp
);
1495 * Take a ref of the script vnode for later use.
1497 if (imgp
->ip_scriptvp
)
1498 vnode_put(imgp
->ip_scriptvp
);
1499 if (vnode_getwithref(imgp
->ip_vp
) == 0)
1500 imgp
->ip_scriptvp
= imgp
->ip_vp
;
1505 vnode_put(imgp
->ip_vp
);
1506 imgp
->ip_vp
= NULL
; /* already put */
1507 imgp
->ip_ndp
= NULL
; /* already nameidone */
1509 /* Use excpath, which exec_shell_imgact reset to the interpreter */
1510 NDINIT(ndp
, LOOKUP
, OP_LOOKUP
, FOLLOW
| LOCKLEAF
,
1511 UIO_SYSSPACE
, CAST_USER_ADDR_T(excpath
), imgp
->ip_vfs_context
);
1513 proc_transend(p
, 0);
1522 * Call out to allow 3rd party notification of exec.
1523 * Ignore result of kauth_authorize_fileop call.
1525 if (error
== 0 && kauth_authorize_fileop_has_listeners()) {
1526 kauth_authorize_fileop(vfs_context_ucred(imgp
->ip_vfs_context
),
1528 (uintptr_t)ndp
->ni_vp
, 0);
1531 proc_transend(p
, 0);
1534 if (imgp
->ip_strings
)
1535 execargs_free(imgp
);
1537 nameidone(imgp
->ip_ndp
);
1546 * exec_handle_spawnattr_policy
1548 * Description: Decode and apply the posix_spawn apptype, qos clamp, and watchport ports to the task.
1550 * Parameters: proc_t p process to apply attributes to
1551 * int psa_apptype posix spawn attribute apptype
1553 * Returns: 0 Success
1556 exec_handle_spawnattr_policy(proc_t p
, int psa_apptype
, uint64_t psa_qos_clamp
, uint64_t psa_darwin_role
,
1557 ipc_port_t
* portwatch_ports
, int portwatch_count
)
1559 int apptype
= TASK_APPTYPE_NONE
;
1560 int qos_clamp
= THREAD_QOS_UNSPECIFIED
;
1561 int role
= TASK_UNSPECIFIED
;
1563 if ((psa_apptype
& POSIX_SPAWN_PROC_TYPE_MASK
) != 0) {
1564 int proctype
= psa_apptype
& POSIX_SPAWN_PROC_TYPE_MASK
;
1567 case POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE
:
1568 apptype
= TASK_APPTYPE_DAEMON_INTERACTIVE
;
1570 case POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD
:
1571 apptype
= TASK_APPTYPE_DAEMON_STANDARD
;
1573 case POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE
:
1574 apptype
= TASK_APPTYPE_DAEMON_ADAPTIVE
;
1576 case POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND
:
1577 apptype
= TASK_APPTYPE_DAEMON_BACKGROUND
;
1579 case POSIX_SPAWN_PROC_TYPE_APP_DEFAULT
:
1580 apptype
= TASK_APPTYPE_APP_DEFAULT
;
1582 #if !CONFIG_EMBEDDED
1583 case POSIX_SPAWN_PROC_TYPE_APP_TAL
:
1584 apptype
= TASK_APPTYPE_APP_TAL
;
1586 #endif /* !CONFIG_EMBEDDED */
1588 apptype
= TASK_APPTYPE_NONE
;
1589 /* TODO: Should an invalid value here fail the spawn? */
1594 if (psa_qos_clamp
!= POSIX_SPAWN_PROC_CLAMP_NONE
) {
1595 switch (psa_qos_clamp
) {
1596 case POSIX_SPAWN_PROC_CLAMP_UTILITY
:
1597 qos_clamp
= THREAD_QOS_UTILITY
;
1599 case POSIX_SPAWN_PROC_CLAMP_BACKGROUND
:
1600 qos_clamp
= THREAD_QOS_BACKGROUND
;
1602 case POSIX_SPAWN_PROC_CLAMP_MAINTENANCE
:
1603 qos_clamp
= THREAD_QOS_MAINTENANCE
;
1606 qos_clamp
= THREAD_QOS_UNSPECIFIED
;
1607 /* TODO: Should an invalid value here fail the spawn? */
1612 if (psa_darwin_role
!= PRIO_DARWIN_ROLE_DEFAULT
) {
1613 proc_darwin_role_to_task_role(psa_darwin_role
, &role
);
1616 if (apptype
!= TASK_APPTYPE_NONE
||
1617 qos_clamp
!= THREAD_QOS_UNSPECIFIED
||
1618 role
!= TASK_UNSPECIFIED
) {
1619 proc_set_task_spawnpolicy(p
->task
, apptype
, qos_clamp
, role
,
1620 portwatch_ports
, portwatch_count
);
1628 * exec_handle_port_actions
1630 * Description: Go through the _posix_port_actions_t contents,
1631 * calling task_set_special_port, task_set_exception_ports
1632 * and/or audit_session_spawnjoin for the current task.
1634 * Parameters: struct image_params * Image parameter block
1636 * Returns: 0 Success
1638 * ENOTSUP Illegal posix_spawn attr flag was set
1641 exec_handle_port_actions(struct image_params
*imgp
, boolean_t
* portwatch_present
,
1642 ipc_port_t
* portwatch_ports
)
1644 _posix_spawn_port_actions_t pacts
= imgp
->ip_px_spa
;
1646 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
1648 _ps_port_action_t
*act
= NULL
;
1649 task_t task
= get_threadtask(imgp
->ip_new_thread
);
1650 ipc_port_t port
= NULL
;
1655 *portwatch_present
= FALSE
;
1657 for (i
= 0; i
< pacts
->pspa_count
; i
++) {
1658 act
= &pacts
->pspa_actions
[i
];
1660 if (MACH_PORT_VALID(act
->new_port
)) {
1661 kr
= ipc_object_copyin(get_task_ipcspace(current_task()),
1662 act
->new_port
, MACH_MSG_TYPE_COPY_SEND
,
1663 (ipc_object_t
*) &port
);
1665 if (kr
!= KERN_SUCCESS
) {
1670 /* it's NULL or DEAD */
1671 port
= CAST_MACH_NAME_TO_PORT(act
->new_port
);
1674 switch (act
->port_type
) {
1676 kr
= task_set_special_port(task
, act
->which
, port
);
1678 if (kr
!= KERN_SUCCESS
)
1682 case PSPA_EXCEPTION
:
1683 kr
= task_set_exception_ports(task
, act
->mask
, port
,
1684 act
->behavior
, act
->flavor
);
1685 if (kr
!= KERN_SUCCESS
)
1689 case PSPA_AU_SESSION
:
1690 ret
= audit_session_spawnjoin(p
, task
, port
);
1692 /* audit_session_spawnjoin() has already dropped the reference in case of error. */
1698 case PSPA_IMP_WATCHPORTS
:
1699 if (portwatch_ports
!= NULL
&& IPC_PORT_VALID(port
)) {
1700 *portwatch_present
= TRUE
;
1701 /* hold on to this till end of spawn */
1702 portwatch_ports
[i
] = port
;
1704 ipc_port_release_send(port
);
1714 /* action failed, so release port resources */
1715 ipc_port_release_send(port
);
1722 DTRACE_PROC1(spawn__port__failure
, mach_port_name_t
, act
->new_port
);
1727 * exec_handle_file_actions
1729 * Description: Go through the _posix_file_actions_t contents applying the
1730 * open, close, and dup2 operations to the open file table for
1731 * the current process.
1733 * Parameters: struct image_params * Image parameter block
1735 * Returns: 0 Success
1738 * Note: Actions are applied in the order specified, with the credential
1739 * of the parent process. This is done to permit the parent
1740 * process to utilize POSIX_SPAWN_RESETIDS to drop privilege in
1741 * the child following operations the child may in fact not be
1742 * normally permitted to perform.
1745 exec_handle_file_actions(struct image_params
*imgp
, short psa_flags
)
1749 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
1750 _posix_spawn_file_actions_t px_sfap
= imgp
->ip_px_sfa
;
1751 int ival
[2]; /* dummy retval for system calls) */
1753 for (action
= 0; action
< px_sfap
->psfa_act_count
; action
++) {
1754 _psfa_action_t
*psfa
= &px_sfap
->psfa_act_acts
[ action
];
1756 switch(psfa
->psfaa_type
) {
1759 * Open is different, in that it requires the use of
1760 * a path argument, which is normally copied in from
1761 * user space; because of this, we have to support an
1762 * open from kernel space that passes an address space
1763 * context of UIO_SYSSPACE, and casts the address
1764 * argument to a user_addr_t.
1767 struct vnode_attr
*vap
;
1768 struct nameidata
*ndp
;
1769 int mode
= psfa
->psfaa_openargs
.psfao_mode
;
1770 struct dup2_args dup2a
;
1771 struct close_nocancel_args ca
;
1774 MALLOC(bufp
, char *, sizeof(*vap
) + sizeof(*ndp
), M_TEMP
, M_WAITOK
| M_ZERO
);
1780 vap
= (struct vnode_attr
*) bufp
;
1781 ndp
= (struct nameidata
*) (bufp
+ sizeof(*vap
));
1784 /* Mask off all but regular access permissions */
1785 mode
= ((mode
&~ p
->p_fd
->fd_cmask
) & ALLPERMS
) & ~S_ISTXT
;
1786 VATTR_SET(vap
, va_mode
, mode
& ACCESSPERMS
);
1788 NDINIT(ndp
, LOOKUP
, OP_OPEN
, FOLLOW
| AUDITVNPATH1
, UIO_SYSSPACE
,
1789 CAST_USER_ADDR_T(psfa
->psfaa_openargs
.psfao_path
),
1790 imgp
->ip_vfs_context
);
1792 error
= open1(imgp
->ip_vfs_context
,
1794 psfa
->psfaa_openargs
.psfao_oflag
,
1796 fileproc_alloc_init
, NULL
,
1802 * If there's an error, or we get the right fd by
1803 * accident, then drop out here. This is easier than
1804 * reworking all the open code to preallocate fd
1805 * slots, and internally taking one as an argument.
1807 if (error
|| ival
[0] == psfa
->psfaa_filedes
)
1812 * If we didn't fall out from an error, we ended up
1813 * with the wrong fd; so now we've got to try to dup2
1814 * it to the right one.
1816 dup2a
.from
= origfd
;
1817 dup2a
.to
= psfa
->psfaa_filedes
;
1820 * The dup2() system call implementation sets
1821 * ival to newfd in the success case, but we
1822 * can ignore that, since if we didn't get the
1823 * fd we wanted, the error will stop us.
1825 error
= dup2(p
, &dup2a
, ival
);
1830 * Finally, close the original fd.
1834 error
= close_nocancel(p
, &ca
, ival
);
1839 struct dup2_args dup2a
;
1841 dup2a
.from
= psfa
->psfaa_filedes
;
1842 dup2a
.to
= psfa
->psfaa_openargs
.psfao_oflag
;
1845 * The dup2() system call implementation sets
1846 * ival to newfd in the success case, but we
1847 * can ignore that, since if we didn't get the
1848 * fd we wanted, the error will stop us.
1850 error
= dup2(p
, &dup2a
, ival
);
1855 struct close_nocancel_args ca
;
1857 ca
.fd
= psfa
->psfaa_filedes
;
1859 error
= close_nocancel(p
, &ca
, ival
);
1863 case PSFA_INHERIT
: {
1864 struct fcntl_nocancel_args fcntla
;
1867 * Check to see if the descriptor exists, and
1868 * ensure it's -not- marked as close-on-exec.
1870 * Attempting to "inherit" a guarded fd will
1871 * result in a error.
1873 fcntla
.fd
= psfa
->psfaa_filedes
;
1874 fcntla
.cmd
= F_GETFD
;
1875 if ((error
= fcntl_nocancel(p
, &fcntla
, ival
)) != 0)
1878 if ((ival
[0] & FD_CLOEXEC
) == FD_CLOEXEC
) {
1879 fcntla
.fd
= psfa
->psfaa_filedes
;
1880 fcntla
.cmd
= F_SETFD
;
1881 fcntla
.arg
= ival
[0] & ~FD_CLOEXEC
;
1882 error
= fcntl_nocancel(p
, &fcntla
, ival
);
1893 /* All file actions failures are considered fatal, per POSIX */
1896 if (PSFA_OPEN
== psfa
->psfaa_type
) {
1897 DTRACE_PROC1(spawn__open__failure
, uintptr_t,
1898 psfa
->psfaa_openargs
.psfao_path
);
1900 DTRACE_PROC1(spawn__fd__failure
, int, psfa
->psfaa_filedes
);
1906 if (error
!= 0 || (psa_flags
& POSIX_SPAWN_CLOEXEC_DEFAULT
) == 0)
1910 * If POSIX_SPAWN_CLOEXEC_DEFAULT is set, behave (during
1911 * this spawn only) as if "close on exec" is the default
1912 * disposition of all pre-existing file descriptors. In this case,
1913 * the list of file descriptors mentioned in the file actions
1914 * are the only ones that can be inherited, so mark them now.
1916 * The actual closing part comes later, in fdexec().
1919 for (action
= 0; action
< px_sfap
->psfa_act_count
; action
++) {
1920 _psfa_action_t
*psfa
= &px_sfap
->psfa_act_acts
[action
];
1921 int fd
= psfa
->psfaa_filedes
;
1923 switch (psfa
->psfaa_type
) {
1925 fd
= psfa
->psfaa_openargs
.psfao_oflag
;
1929 *fdflags(p
, fd
) |= UF_INHERIT
;
1943 * exec_spawnattr_getmacpolicyinfo
1946 exec_spawnattr_getmacpolicyinfo(const void *macextensions
, const char *policyname
, size_t *lenp
)
1948 const struct _posix_spawn_mac_policy_extensions
*psmx
= macextensions
;
1954 for (i
= 0; i
< psmx
->psmx_count
; i
++) {
1955 const _ps_mac_policy_extension_t
*extension
= &psmx
->psmx_extensions
[i
];
1956 if (strncmp(extension
->policyname
, policyname
, sizeof(extension
->policyname
)) == 0) {
1958 *lenp
= extension
->datalen
;
1959 return extension
->datap
;
1969 spawn_copyin_macpolicyinfo(const struct user__posix_spawn_args_desc
*px_args
, _posix_spawn_mac_policy_extensions_t
*psmxp
)
1971 _posix_spawn_mac_policy_extensions_t psmx
= NULL
;
1978 if (px_args
->mac_extensions_size
< PS_MAC_EXTENSIONS_SIZE(1) ||
1979 px_args
->mac_extensions_size
> PAGE_SIZE
) {
1984 MALLOC(psmx
, _posix_spawn_mac_policy_extensions_t
, px_args
->mac_extensions_size
, M_TEMP
, M_WAITOK
);
1985 if ((error
= copyin(px_args
->mac_extensions
, psmx
, px_args
->mac_extensions_size
)) != 0)
1988 size_t extsize
= PS_MAC_EXTENSIONS_SIZE(psmx
->psmx_count
);
1989 if (extsize
== 0 || extsize
> px_args
->mac_extensions_size
) {
1994 for (i
= 0; i
< psmx
->psmx_count
; i
++) {
1995 _ps_mac_policy_extension_t
*extension
= &psmx
->psmx_extensions
[i
];
1996 if (extension
->datalen
== 0 || extension
->datalen
> PAGE_SIZE
) {
2002 for (copycnt
= 0; copycnt
< psmx
->psmx_count
; copycnt
++) {
2003 _ps_mac_policy_extension_t
*extension
= &psmx
->psmx_extensions
[copycnt
];
2006 MALLOC(data
, void *, extension
->datalen
, M_TEMP
, M_WAITOK
);
2007 if ((error
= copyin(extension
->data
, data
, extension
->datalen
)) != 0) {
2011 extension
->datap
= data
;
2019 for (i
= 0; i
< copycnt
; i
++)
2020 FREE(psmx
->psmx_extensions
[i
].datap
, M_TEMP
);
2027 spawn_free_macpolicyinfo(_posix_spawn_mac_policy_extensions_t psmx
)
2033 for (i
= 0; i
< psmx
->psmx_count
; i
++)
2034 FREE(psmx
->psmx_extensions
[i
].datap
, M_TEMP
);
2037 #endif /* CONFIG_MACF */
2039 #if CONFIG_COALITIONS
2040 static inline void spawn_coalitions_release_all(coalition_t coal
[COALITION_NUM_TYPES
])
2042 for (int c
= 0; c
< COALITION_NUM_TYPES
; c
++) {
2044 coalition_remove_active(coal
[c
]);
2045 coalition_release(coal
[c
]);
2052 static int spawn_validate_persona(struct _posix_spawn_persona_info
*px_persona
)
2055 struct persona
*persona
= NULL
;
2056 int verify
= px_persona
->pspi_flags
& POSIX_SPAWN_PERSONA_FLAGS_VERIFY
;
2059 * TODO: rdar://problem/19981151
2060 * Add entitlement check!
2062 if (!kauth_cred_issuser(kauth_cred_get()))
2065 persona
= persona_lookup(px_persona
->pspi_id
);
2072 if (px_persona
->pspi_flags
& POSIX_SPAWN_PERSONA_UID
) {
2073 if (px_persona
->pspi_uid
!= persona_get_uid(persona
)) {
2078 if (px_persona
->pspi_flags
& POSIX_SPAWN_PERSONA_GID
) {
2079 if (px_persona
->pspi_gid
!= persona_get_gid(persona
)) {
2084 if (px_persona
->pspi_flags
& POSIX_SPAWN_PERSONA_GROUPS
) {
2086 gid_t groups
[NGROUPS_MAX
];
2088 if (persona_get_groups(persona
, &ngroups
, groups
,
2089 px_persona
->pspi_ngroups
) != 0) {
2093 if (ngroups
!= (int)px_persona
->pspi_ngroups
) {
2098 if (px_persona
->pspi_groups
[ngroups
] != groups
[ngroups
]) {
2103 if (px_persona
->pspi_gmuid
!= persona_get_gmuid(persona
)) {
2112 persona_put(persona
);
2117 static int spawn_persona_adopt(proc_t p
, struct _posix_spawn_persona_info
*px_persona
)
2121 struct persona
*persona
= NULL
;
2122 int override
= !!(px_persona
->pspi_flags
& POSIX_SPAWN_PERSONA_FLAGS_OVERRIDE
);
2125 return persona_proc_adopt_id(p
, px_persona
->pspi_id
, NULL
);
2128 * we want to spawn into the given persona, but we want to override
2129 * the kauth with a different UID/GID combo
2131 persona
= persona_lookup(px_persona
->pspi_id
);
2135 cred
= persona_get_cred(persona
);
2141 if (px_persona
->pspi_flags
& POSIX_SPAWN_PERSONA_UID
) {
2142 cred
= kauth_cred_setresuid(cred
,
2143 px_persona
->pspi_uid
,
2144 px_persona
->pspi_uid
,
2145 px_persona
->pspi_uid
,
2149 if (px_persona
->pspi_flags
& POSIX_SPAWN_PERSONA_GID
) {
2150 cred
= kauth_cred_setresgid(cred
,
2151 px_persona
->pspi_gid
,
2152 px_persona
->pspi_gid
,
2153 px_persona
->pspi_gid
);
2156 if (px_persona
->pspi_flags
& POSIX_SPAWN_PERSONA_GROUPS
) {
2157 cred
= kauth_cred_setgroups(cred
,
2158 px_persona
->pspi_groups
,
2159 px_persona
->pspi_ngroups
,
2160 px_persona
->pspi_gmuid
);
2163 ret
= persona_proc_adopt(p
, persona
, cred
);
2166 persona_put(persona
);
2174 * Parameters: uap->pid Pointer to pid return area
2175 * uap->fname File name to exec
2176 * uap->argp Argument list
2177 * uap->envp Environment list
2179 * Returns: 0 Success
2180 * EINVAL Invalid argument
2181 * ENOTSUP Not supported
2182 * ENOEXEC Executable file format error
2183 * exec_activate_image:EINVAL Invalid argument
2184 * exec_activate_image:EACCES Permission denied
2185 * exec_activate_image:EINTR Interrupted function
2186 * exec_activate_image:ENOMEM Not enough space
2187 * exec_activate_image:EFAULT Bad address
2188 * exec_activate_image:ENAMETOOLONG Filename too long
2189 * exec_activate_image:ENOEXEC Executable file format error
2190 * exec_activate_image:ETXTBSY Text file busy [misuse of error code]
2191 * exec_activate_image:EBADEXEC The executable is corrupt/unknown
2192 * exec_activate_image:???
2193 * mac_execve_enter:???
2195 * TODO: Expect to need __mac_posix_spawn() at some point...
2196 * Handle posix_spawnattr_t
2197 * Handle posix_spawn_file_actions_t
2200 posix_spawn(proc_t ap
, struct posix_spawn_args
*uap
, int32_t *retval
)
2202 proc_t p
= ap
; /* quiet bogus GCC vfork() warning */
2203 user_addr_t pid
= uap
->pid
;
2204 int ival
[2]; /* dummy retval for setpgid() */
2206 struct image_params
*imgp
;
2207 struct vnode_attr
*vap
;
2208 struct vnode_attr
*origvap
;
2209 struct uthread
*uthread
= 0; /* compiler complains if not set to 0*/
2211 int is_64
= IS_64BIT_PROCESS(p
);
2212 struct vfs_context context
;
2213 struct user__posix_spawn_args_desc px_args
;
2214 struct _posix_spawnattr px_sa
;
2215 _posix_spawn_file_actions_t px_sfap
= NULL
;
2216 _posix_spawn_port_actions_t px_spap
= NULL
;
2217 struct __kern_sigaction vec
;
2218 boolean_t spawn_no_exec
= FALSE
;
2219 boolean_t proc_transit_set
= TRUE
;
2220 boolean_t exec_done
= FALSE
;
2221 int portwatch_count
= 0;
2222 ipc_port_t
* portwatch_ports
= NULL
;
2223 vm_size_t px_sa_offset
= offsetof(struct _posix_spawnattr
, psa_ports
);
2224 task_t new_task
= NULL
;
2225 boolean_t should_release_proc_ref
= FALSE
;
2226 void *inherit
= NULL
;
2228 struct _posix_spawn_persona_info
*px_persona
= NULL
;
2232 * Allocate a big chunk for locals instead of using stack since these
2233 * structures are pretty big.
2235 MALLOC(bufp
, char *, (sizeof(*imgp
) + sizeof(*vap
) + sizeof(*origvap
)), M_TEMP
, M_WAITOK
| M_ZERO
);
2236 imgp
= (struct image_params
*) bufp
;
2241 vap
= (struct vnode_attr
*) (bufp
+ sizeof(*imgp
));
2242 origvap
= (struct vnode_attr
*) (bufp
+ sizeof(*imgp
) + sizeof(*vap
));
2244 /* Initialize the common data in the image_params structure */
2245 imgp
->ip_user_fname
= uap
->path
;
2246 imgp
->ip_user_argv
= uap
->argv
;
2247 imgp
->ip_user_envv
= uap
->envp
;
2248 imgp
->ip_vattr
= vap
;
2249 imgp
->ip_origvattr
= origvap
;
2250 imgp
->ip_vfs_context
= &context
;
2251 imgp
->ip_flags
= (is_64
? IMGPF_WAS_64BIT
: IMGPF_NONE
);
2252 imgp
->ip_seg
= (is_64
? UIO_USERSPACE64
: UIO_USERSPACE32
);
2253 imgp
->ip_mac_return
= 0;
2254 imgp
->ip_px_persona
= NULL
;
2255 imgp
->ip_cs_error
= OS_REASON_NULL
;
2257 if (uap
->adesc
!= USER_ADDR_NULL
) {
2259 error
= copyin(uap
->adesc
, &px_args
, sizeof(px_args
));
2261 struct user32__posix_spawn_args_desc px_args32
;
2263 error
= copyin(uap
->adesc
, &px_args32
, sizeof(px_args32
));
2266 * Convert arguments descriptor from external 32 bit
2267 * representation to internal 64 bit representation
2269 px_args
.attr_size
= px_args32
.attr_size
;
2270 px_args
.attrp
= CAST_USER_ADDR_T(px_args32
.attrp
);
2271 px_args
.file_actions_size
= px_args32
.file_actions_size
;
2272 px_args
.file_actions
= CAST_USER_ADDR_T(px_args32
.file_actions
);
2273 px_args
.port_actions_size
= px_args32
.port_actions_size
;
2274 px_args
.port_actions
= CAST_USER_ADDR_T(px_args32
.port_actions
);
2275 px_args
.mac_extensions_size
= px_args32
.mac_extensions_size
;
2276 px_args
.mac_extensions
= CAST_USER_ADDR_T(px_args32
.mac_extensions
);
2277 px_args
.coal_info_size
= px_args32
.coal_info_size
;
2278 px_args
.coal_info
= CAST_USER_ADDR_T(px_args32
.coal_info
);
2279 px_args
.persona_info_size
= px_args32
.persona_info_size
;
2280 px_args
.persona_info
= CAST_USER_ADDR_T(px_args32
.persona_info
);
2285 if (px_args
.attr_size
!= 0) {
2287 * We are not copying the port_actions pointer,
2288 * because we already have it from px_args.
2289 * This is a bit fragile: <rdar://problem/16427422>
2292 if ((error
= copyin(px_args
.attrp
, &px_sa
, px_sa_offset
) != 0))
2295 bzero( (void *)( (unsigned long) &px_sa
+ px_sa_offset
), sizeof(px_sa
) - px_sa_offset
);
2297 imgp
->ip_px_sa
= &px_sa
;
2299 if (px_args
.file_actions_size
!= 0) {
2300 /* Limit file_actions to allowed number of open files */
2301 int maxfa
= (p
->p_limit
? p
->p_rlimit
[RLIMIT_NOFILE
].rlim_cur
: NOFILE
);
2302 size_t maxfa_size
= PSF_ACTIONS_SIZE(maxfa
);
2303 if (px_args
.file_actions_size
< PSF_ACTIONS_SIZE(1) ||
2304 maxfa_size
== 0 || px_args
.file_actions_size
> maxfa_size
) {
2308 MALLOC(px_sfap
, _posix_spawn_file_actions_t
, px_args
.file_actions_size
, M_TEMP
, M_WAITOK
);
2309 if (px_sfap
== NULL
) {
2313 imgp
->ip_px_sfa
= px_sfap
;
2315 if ((error
= copyin(px_args
.file_actions
, px_sfap
,
2316 px_args
.file_actions_size
)) != 0)
2319 /* Verify that the action count matches the struct size */
2320 size_t psfsize
= PSF_ACTIONS_SIZE(px_sfap
->psfa_act_count
);
2321 if (psfsize
== 0 || psfsize
!= px_args
.file_actions_size
) {
2326 if (px_args
.port_actions_size
!= 0) {
2327 /* Limit port_actions to one page of data */
2328 if (px_args
.port_actions_size
< PS_PORT_ACTIONS_SIZE(1) ||
2329 px_args
.port_actions_size
> PAGE_SIZE
) {
2334 MALLOC(px_spap
, _posix_spawn_port_actions_t
,
2335 px_args
.port_actions_size
, M_TEMP
, M_WAITOK
);
2336 if (px_spap
== NULL
) {
2340 imgp
->ip_px_spa
= px_spap
;
2342 if ((error
= copyin(px_args
.port_actions
, px_spap
,
2343 px_args
.port_actions_size
)) != 0)
2346 /* Verify that the action count matches the struct size */
2347 size_t pasize
= PS_PORT_ACTIONS_SIZE(px_spap
->pspa_count
);
2348 if (pasize
== 0 || pasize
!= px_args
.port_actions_size
) {
2354 /* copy in the persona info */
2355 if (px_args
.persona_info_size
!= 0 && px_args
.persona_info
!= 0) {
2356 /* for now, we need the exact same struct in user space */
2357 if (px_args
.persona_info_size
!= sizeof(*px_persona
)) {
2362 MALLOC(px_persona
, struct _posix_spawn_persona_info
*, px_args
.persona_info_size
, M_TEMP
, M_WAITOK
|M_ZERO
);
2363 if (px_persona
== NULL
) {
2367 imgp
->ip_px_persona
= px_persona
;
2369 if ((error
= copyin(px_args
.persona_info
, px_persona
,
2370 px_args
.persona_info_size
)) != 0)
2372 if ((error
= spawn_validate_persona(px_persona
)) != 0)
2377 if (px_args
.mac_extensions_size
!= 0) {
2378 if ((error
= spawn_copyin_macpolicyinfo(&px_args
, (_posix_spawn_mac_policy_extensions_t
*)&imgp
->ip_px_smpx
)) != 0)
2381 #endif /* CONFIG_MACF */
2384 /* set uthread to parent */
2385 uthread
= get_bsdthread_info(current_thread());
2388 * <rdar://6640530>; this does not result in a behaviour change
2389 * relative to Leopard, so there should not be any existing code
2390 * which depends on it.
2392 if (uthread
->uu_flag
& UT_VFORK
) {
2398 * If we don't have the extension flag that turns "posix_spawn()"
2399 * into "execve() with options", then we will be creating a new
2400 * process which does not inherit memory from the parent process,
2401 * which is one of the most expensive things about using fork()
2404 if (imgp
->ip_px_sa
== NULL
|| !(px_sa
.psa_flags
& POSIX_SPAWN_SETEXEC
)){
2406 /* Set the new task's coalition, if it is requested. */
2407 coalition_t coal
[COALITION_NUM_TYPES
] = { COALITION_NULL
};
2408 #if CONFIG_COALITIONS
2410 kern_return_t kr
= KERN_SUCCESS
;
2411 struct _posix_spawn_coalition_info coal_info
;
2412 int coal_role
[COALITION_NUM_TYPES
];
2414 if (imgp
->ip_px_sa
== NULL
|| !px_args
.coal_info
)
2417 memset(&coal_info
, 0, sizeof(coal_info
));
2419 if (px_args
.coal_info_size
> sizeof(coal_info
))
2420 px_args
.coal_info_size
= sizeof(coal_info
);
2421 error
= copyin(px_args
.coal_info
,
2422 &coal_info
, px_args
.coal_info_size
);
2427 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
2428 uint64_t cid
= coal_info
.psci_info
[i
].psci_id
;
2431 * don't allow tasks which are not in a
2432 * privileged coalition to spawn processes
2433 * into coalitions other than their own
2435 if (!task_is_in_privileged_coalition(p
->task
, i
)) {
2436 coal_dbg("ERROR: %d not in privilegd "
2437 "coalition of type %d",
2439 spawn_coalitions_release_all(coal
);
2444 coal_dbg("searching for coalition id:%llu", cid
);
2446 * take a reference and activation on the
2447 * coalition to guard against free-while-spawn
2450 coal
[i
] = coalition_find_and_activate_by_id(cid
);
2451 if (coal
[i
] == COALITION_NULL
) {
2452 coal_dbg("could not find coalition id:%llu "
2453 "(perhaps it has been terminated or reaped)", cid
);
2455 * release any other coalition's we
2456 * may have a reference to
2458 spawn_coalitions_release_all(coal
);
2462 if (coalition_type(coal
[i
]) != i
) {
2463 coal_dbg("coalition with id:%lld is not of type:%d"
2464 " (it's type:%d)", cid
, i
, coalition_type(coal
[i
]));
2468 coal_role
[i
] = coal_info
.psci_info
[i
].psci_role
;
2472 if (ncoals
< COALITION_NUM_TYPES
) {
2474 * If the user is attempting to spawn into a subset of
2475 * the known coalition types, then make sure they have
2476 * _at_least_ specified a resource coalition. If not,
2477 * the following fork1() call will implicitly force an
2478 * inheritance from 'p' and won't actually spawn the
2479 * new task into the coalitions the user specified.
2480 * (also the call to coalitions_set_roles will panic)
2482 if (coal
[COALITION_TYPE_RESOURCE
] == COALITION_NULL
) {
2483 spawn_coalitions_release_all(coal
);
2489 #endif /* CONFIG_COALITIONS */
2492 * note that this will implicitly inherit the
2493 * caller's persona (if it exists)
2495 error
= fork1(p
, &imgp
->ip_new_thread
, PROC_CREATE_SPAWN
, coal
);
2496 /* returns a thread and task reference */
2499 new_task
= get_threadtask(imgp
->ip_new_thread
);
2501 #if CONFIG_COALITIONS
2502 /* set the roles of this task within each given coalition */
2504 kr
= coalitions_set_roles(coal
, new_task
, coal_role
);
2505 if (kr
!= KERN_SUCCESS
)
2507 if (kdebug_debugid_enabled(MACHDBG_CODE(DBG_MACH_COALITION
,
2508 MACH_COALITION_ADOPT
))) {
2509 for (i
= 0; i
< COALITION_NUM_TYPES
; i
++) {
2510 if (coal
[i
] != COALITION_NULL
) {
2512 * On 32-bit targets, uniqueid
2513 * will get truncated to 32 bits
2515 KDBG_RELEASE(MACHDBG_CODE(
2517 MACH_COALITION_ADOPT
),
2518 coalition_id(coal
[i
]),
2519 get_task_uniqueid(new_task
));
2525 /* drop our references and activations - fork1() now holds them */
2526 spawn_coalitions_release_all(coal
);
2527 #endif /* CONFIG_COALITIONS */
2531 imgp
->ip_flags
|= IMGPF_SPAWN
; /* spawn w/o exec */
2532 spawn_no_exec
= TRUE
; /* used in later tests */
2536 * If the parent isn't in a persona (launchd), and
2537 * hasn't specified a new persona for the process,
2538 * then we'll put the process into the system persona
2540 * TODO: this will have to be re-worked because as of
2541 * now, without any launchd adoption, the resulting
2542 * xpcproxy process will not have sufficient
2543 * privileges to setuid/gid.
2546 if (!proc_has_persona(p
) && imgp
->ip_px_persona
== NULL
) {
2547 MALLOC(px_persona
, struct _posix_spawn_persona_info
*,
2548 sizeof(*px_persona
), M_TEMP
, M_WAITOK
|M_ZERO
);
2549 if (px_persona
== NULL
) {
2553 px_persona
->pspi_id
= persona_get_id(g_system_persona
);
2554 imgp
->ip_px_persona
= px_persona
;
2557 #endif /* CONFIG_PERSONAS */
2560 * For execve case, create a new task and thread
2561 * which points to current_proc. The current_proc will point
2562 * to the new task after image activation and proc ref drain.
2564 * proc (current_proc) <----- old_task (current_task)
2567 * | ----------------------------------
2569 * --------- new_task (task marked as TF_EXEC_COPY)
2571 * After image activation, the proc will point to the new task
2572 * and would look like following.
2574 * proc (current_proc) <----- old_task (current_task, marked as TPF_DID_EXEC)
2577 * | ----------> new_task
2581 * During exec any transition from new_task -> proc is fine, but don't allow
2582 * transition from proc->task, since it will modify old_task.
2584 imgp
->ip_new_thread
= fork_create_child(current_task(),
2585 NULL
, p
, FALSE
, p
->p_flag
& P_LP64
, TRUE
);
2586 /* task and thread ref returned by fork_create_child */
2587 if (imgp
->ip_new_thread
== NULL
) {
2592 new_task
= get_threadtask(imgp
->ip_new_thread
);
2593 imgp
->ip_flags
|= IMGPF_EXEC
;
2596 if (spawn_no_exec
) {
2597 p
= (proc_t
)get_bsdthreadtask_info(imgp
->ip_new_thread
);
2600 * We had to wait until this point before firing the
2601 * proc:::create probe, otherwise p would not point to the
2604 DTRACE_PROC1(create
, proc_t
, p
);
2608 context
.vc_thread
= imgp
->ip_new_thread
;
2609 context
.vc_ucred
= p
->p_ucred
; /* XXX must NOT be kauth_cred_get() */
2612 * Post fdcopy(), pre exec_handle_sugid() - this is where we want
2613 * to handle the file_actions. Since vfork() also ends up setting
2614 * us into the parent process group, and saved off the signal flags,
2615 * this is also where we want to handle the spawn flags.
2618 /* Has spawn file actions? */
2619 if (imgp
->ip_px_sfa
!= NULL
) {
2621 * The POSIX_SPAWN_CLOEXEC_DEFAULT flag
2622 * is handled in exec_handle_file_actions().
2624 if ((error
= exec_handle_file_actions(imgp
,
2625 imgp
->ip_px_sa
!= NULL
? px_sa
.psa_flags
: 0)) != 0)
2629 /* Has spawn port actions? */
2630 if (imgp
->ip_px_spa
!= NULL
) {
2631 boolean_t is_adaptive
= FALSE
;
2632 boolean_t portwatch_present
= FALSE
;
2634 /* Will this process become adaptive? The apptype isn't ready yet, so we can't look there. */
2635 if (imgp
->ip_px_sa
!= NULL
&& px_sa
.psa_apptype
== POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE
)
2640 * Allocate a place to store the ports we want to bind to the new task
2641 * We can't bind them until after the apptype is set.
2643 if (px_spap
->pspa_count
!= 0 && is_adaptive
) {
2644 portwatch_count
= px_spap
->pspa_count
;
2645 MALLOC(portwatch_ports
, ipc_port_t
*, (sizeof(ipc_port_t
) * portwatch_count
), M_TEMP
, M_WAITOK
| M_ZERO
);
2647 portwatch_ports
= NULL
;
2650 if ((error
= exec_handle_port_actions(imgp
, &portwatch_present
, portwatch_ports
)) != 0)
2653 if (portwatch_present
== FALSE
&& portwatch_ports
!= NULL
) {
2654 FREE(portwatch_ports
, M_TEMP
);
2655 portwatch_ports
= NULL
;
2656 portwatch_count
= 0;
2660 /* Has spawn attr? */
2661 if (imgp
->ip_px_sa
!= NULL
) {
2663 * Set the process group ID of the child process; this has
2664 * to happen before the image activation.
2666 if (px_sa
.psa_flags
& POSIX_SPAWN_SETPGROUP
) {
2667 struct setpgid_args spga
;
2668 spga
.pid
= p
->p_pid
;
2669 spga
.pgid
= px_sa
.psa_pgroup
;
2671 * Effectively, call setpgid() system call; works
2672 * because there are no pointer arguments.
2674 if((error
= setpgid(p
, &spga
, ival
)) != 0)
2679 * Reset UID/GID to parent's RUID/RGID; This works only
2680 * because the operation occurs *after* the vfork() and
2681 * before the call to exec_handle_sugid() by the image
2682 * activator called from exec_activate_image(). POSIX
2683 * requires that any setuid/setgid bits on the process
2684 * image will take precedence over the spawn attributes
2687 * Modifications to p_ucred must be guarded using the
2688 * proc's ucred lock. This prevents others from accessing
2689 * a garbage credential.
2691 while (px_sa
.psa_flags
& POSIX_SPAWN_RESETIDS
) {
2692 kauth_cred_t my_cred
= kauth_cred_proc_ref(p
);
2693 kauth_cred_t my_new_cred
= kauth_cred_setuidgid(my_cred
, kauth_cred_getruid(my_cred
), kauth_cred_getrgid(my_cred
));
2695 if (my_cred
== my_new_cred
) {
2696 kauth_cred_unref(&my_cred
);
2700 /* update cred on proc */
2703 if (p
->p_ucred
!= my_cred
) {
2704 proc_ucred_unlock(p
);
2705 kauth_cred_unref(&my_new_cred
);
2709 /* donate cred reference on my_new_cred to p->p_ucred */
2710 p
->p_ucred
= my_new_cred
;
2711 PROC_UPDATE_CREDS_ONPROC(p
);
2712 proc_ucred_unlock(p
);
2714 /* drop additional reference that was taken on the previous cred */
2715 kauth_cred_unref(&my_cred
);
2719 if (spawn_no_exec
&& imgp
->ip_px_persona
!= NULL
) {
2721 * If we were asked to spawn a process into a new persona,
2722 * do the credential switch now (which may override the UID/GID
2723 * inherit done just above). It's important to do this switch
2724 * before image activation both for reasons stated above, and
2725 * to ensure that the new persona has access to the image/file
2728 error
= spawn_persona_adopt(p
, imgp
->ip_px_persona
);
2732 #endif /* CONFIG_PERSONAS */
2735 * Disable ASLR for the spawned process.
2737 * But only do so if we are not embedded + RELEASE.
2738 * While embedded allows for a boot-arg (-disable_aslr)
2739 * to deal with this (which itself is only honored on
2740 * DEVELOPMENT or DEBUG builds of xnu), it is often
2741 * useful or necessary to disable ASLR on a per-process
2742 * basis for unit testing and debugging.
2744 if (px_sa
.psa_flags
& _POSIX_SPAWN_DISABLE_ASLR
)
2745 OSBitOrAtomic(P_DISABLE_ASLR
, &p
->p_flag
);
2746 #endif /* !SECURE_KERNEL */
2748 /* Randomize high bits of ASLR slide */
2749 if (px_sa
.psa_flags
& _POSIX_SPAWN_HIGH_BITS_ASLR
)
2750 imgp
->ip_flags
|= IMGPF_HIGH_BITS_ASLR
;
2753 * Forcibly disallow execution from data pages for the spawned process
2754 * even if it would otherwise be permitted by the architecture default.
2756 if (px_sa
.psa_flags
& _POSIX_SPAWN_ALLOW_DATA_EXEC
)
2757 imgp
->ip_flags
|= IMGPF_ALLOW_DATA_EXEC
;
2761 * Disable ASLR during image activation. This occurs either if the
2762 * _POSIX_SPAWN_DISABLE_ASLR attribute was found above or if
2763 * P_DISABLE_ASLR was inherited from the parent process.
2765 if (p
->p_flag
& P_DISABLE_ASLR
)
2766 imgp
->ip_flags
|= IMGPF_DISABLE_ASLR
;
2769 * Clear transition flag so we won't hang if exec_activate_image() causes
2770 * an automount (and launchd does a proc sysctl to service it).
2772 * <rdar://problem/6848672>, <rdar://problem/5959568>.
2774 if (spawn_no_exec
) {
2775 proc_transend(p
, 0);
2776 proc_transit_set
= 0;
2779 #if MAC_SPAWN /* XXX */
2780 if (uap
->mac_p
!= USER_ADDR_NULL
) {
2781 error
= mac_execve_enter(uap
->mac_p
, imgp
);
2788 * Activate the image
2790 error
= exec_activate_image(imgp
);
2792 if (error
== 0 && !spawn_no_exec
) {
2793 p
= proc_exec_switch_task(p
, current_task(), new_task
, imgp
->ip_new_thread
);
2794 /* proc ref returned */
2795 should_release_proc_ref
= TRUE
;
2799 /* process completed the exec */
2801 } else if (error
== -1) {
2802 /* Image not claimed by any activator? */
2807 * If we have a spawn attr, and it contains signal related flags,
2808 * the we need to process them in the "context" of the new child
2809 * process, so we have to process it following image activation,
2810 * prior to making the thread runnable in user space. This is
2811 * necessitated by some signal information being per-thread rather
2812 * than per-process, and we don't have the new allocation in hand
2813 * until after the image is activated.
2815 if (!error
&& imgp
->ip_px_sa
!= NULL
) {
2816 thread_t child_thread
= imgp
->ip_new_thread
;
2817 uthread_t child_uthread
= get_bsdthread_info(child_thread
);
2820 * Mask a list of signals, instead of them being unmasked, if
2821 * they were unmasked in the parent; note that some signals
2824 if (px_sa
.psa_flags
& POSIX_SPAWN_SETSIGMASK
)
2825 child_uthread
->uu_sigmask
= (px_sa
.psa_sigmask
& ~sigcantmask
);
2827 * Default a list of signals instead of ignoring them, if
2828 * they were ignored in the parent. Note that we pass
2829 * spawn_no_exec to setsigvec() to indicate that we called
2830 * fork1() and therefore do not need to call proc_signalstart()
2833 if (px_sa
.psa_flags
& POSIX_SPAWN_SETSIGDEF
) {
2834 vec
.sa_handler
= SIG_DFL
;
2838 for (sig
= 1; sig
< NSIG
; sig
++)
2839 if (px_sa
.psa_sigdefault
& (1 << (sig
-1))) {
2840 error
= setsigvec(p
, child_thread
, sig
, &vec
, spawn_no_exec
);
2845 * Activate the CPU usage monitor, if requested. This is done via a task-wide, per-thread CPU
2846 * usage limit, which will generate a resource exceeded exception if any one thread exceeds the
2849 * Userland gives us interval in seconds, and the kernel SPI expects nanoseconds.
2851 if (px_sa
.psa_cpumonitor_percent
!= 0) {
2853 * Always treat a CPU monitor activation coming from spawn as entitled. Requiring
2854 * an entitlement to configure the monitor a certain way seems silly, since
2855 * whomever is turning it on could just as easily choose not to do so.
2857 error
= proc_set_task_ruse_cpu(p
->task
,
2858 TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC
,
2859 px_sa
.psa_cpumonitor_percent
,
2860 px_sa
.psa_cpumonitor_interval
* NSEC_PER_SEC
,
2868 /* reset delay idle sleep status if set */
2869 #if !CONFIG_EMBEDDED
2870 if ((p
->p_flag
& P_DELAYIDLESLEEP
) == P_DELAYIDLESLEEP
)
2871 OSBitAndAtomic(~((uint32_t)P_DELAYIDLESLEEP
), &p
->p_flag
);
2872 #endif /* !CONFIG_EMBEDDED */
2873 /* upon successful spawn, re/set the proc control state */
2874 if (imgp
->ip_px_sa
!= NULL
) {
2875 switch (px_sa
.psa_pcontrol
) {
2876 case POSIX_SPAWN_PCONTROL_THROTTLE
:
2877 p
->p_pcaction
= P_PCTHROTTLE
;
2879 case POSIX_SPAWN_PCONTROL_SUSPEND
:
2880 p
->p_pcaction
= P_PCSUSP
;
2882 case POSIX_SPAWN_PCONTROL_KILL
:
2883 p
->p_pcaction
= P_PCKILL
;
2885 case POSIX_SPAWN_PCONTROL_NONE
:
2891 exec_resettextvp(p
, imgp
);
2893 #if CONFIG_MEMORYSTATUS
2894 /* Has jetsam attributes? */
2895 if (imgp
->ip_px_sa
!= NULL
&& (px_sa
.psa_jetsam_flags
& POSIX_SPAWN_JETSAM_SET
)) {
2897 * With 2-level high-water-mark support, POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND is no
2898 * longer relevant, as background limits are described via the inactive limit slots.
2900 * That said, however, if the POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND is passed in,
2901 * we attempt to mimic previous behavior by forcing the BG limit data into the
2902 * inactive/non-fatal mode and force the active slots to hold system_wide/fatal mode.
2904 if (px_sa
.psa_jetsam_flags
& POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND
) {
2905 memorystatus_update(p
, px_sa
.psa_priority
, 0,
2906 (px_sa
.psa_jetsam_flags
& POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY
),
2909 px_sa
.psa_memlimit_inactive
, FALSE
);
2911 memorystatus_update(p
, px_sa
.psa_priority
, 0,
2912 (px_sa
.psa_jetsam_flags
& POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY
),
2914 px_sa
.psa_memlimit_active
,
2915 (px_sa
.psa_jetsam_flags
& POSIX_SPAWN_JETSAM_MEMLIMIT_ACTIVE_FATAL
),
2916 px_sa
.psa_memlimit_inactive
,
2917 (px_sa
.psa_jetsam_flags
& POSIX_SPAWN_JETSAM_MEMLIMIT_INACTIVE_FATAL
));
2921 #endif /* CONFIG_MEMORYSTATUS */
2925 * If we successfully called fork1(), we always need to do this;
2926 * we identify this case by noting the IMGPF_SPAWN flag. This is
2927 * because we come back from that call with signals blocked in the
2928 * child, and we have to unblock them, but we want to wait until
2929 * after we've performed any spawn actions. This has to happen
2930 * before check_for_signature(), which uses psignal.
2932 if (spawn_no_exec
) {
2933 if (proc_transit_set
)
2934 proc_transend(p
, 0);
2937 * Drop the signal lock on the child which was taken on our
2938 * behalf by forkproc()/cloneproc() to prevent signals being
2939 * received by the child in a partially constructed state.
2941 proc_signalend(p
, 0);
2943 /* flag the 'fork' has occurred */
2944 proc_knote(p
->p_pptr
, NOTE_FORK
| p
->p_pid
);
2947 /* flag exec has occurred, notify only if it has not failed due to FP Key error */
2948 if (!error
&& ((p
->p_lflag
& P_LTERM_DECRYPTFAIL
) == 0))
2949 proc_knote(p
, NOTE_EXEC
);
2954 * We need to initialize the bank context behind the protection of
2955 * the proc_trans lock to prevent a race with exit. We can't do this during
2956 * exec_activate_image because task_bank_init checks entitlements that
2957 * aren't loaded until subsequent calls (including exec_resettextvp).
2959 error
= proc_transstart(p
, 0, 0);
2962 task_bank_init(get_threadtask(imgp
->ip_new_thread
));
2963 proc_transend(p
, 0);
2967 /* Inherit task role from old task to new task for exec */
2968 if (error
== 0 && !spawn_no_exec
) {
2969 proc_inherit_task_role(get_threadtask(imgp
->ip_new_thread
), current_task());
2973 * Apply the spawnattr policy, apptype (which primes the task for importance donation),
2974 * and bind any portwatch ports to the new task.
2975 * This must be done after the exec so that the child's thread is ready,
2976 * and after the in transit state has been released, because priority is
2977 * dropped here so we need to be prepared for a potentially long preemption interval
2979 * TODO: Consider splitting this up into separate phases
2981 if (error
== 0 && imgp
->ip_px_sa
!= NULL
) {
2982 struct _posix_spawnattr
*psa
= (struct _posix_spawnattr
*) imgp
->ip_px_sa
;
2984 exec_handle_spawnattr_policy(p
, psa
->psa_apptype
, psa
->psa_qos_clamp
, psa
->psa_darwin_role
,
2985 portwatch_ports
, portwatch_count
);
2989 * Need to transfer pending watch port boosts to the new task while still making
2990 * sure that the old task remains in the importance linkage. Create an importance
2991 * linkage from old task to new task, then switch the task importance base
2992 * of old task and new task. After the switch the port watch boost will be
2993 * boosting the new task and new task will be donating importance to old task.
2995 if (error
== 0 && task_did_exec(current_task())) {
2996 inherit
= ipc_importance_exec_switch_task(current_task(), get_threadtask(imgp
->ip_new_thread
));
3000 /* Apply the main thread qos */
3001 thread_t main_thread
= imgp
->ip_new_thread
;
3002 task_set_main_thread_qos(get_threadtask(imgp
->ip_new_thread
), main_thread
);
3006 * Processes with the MAP_JIT entitlement are permitted to have
3009 if (mac_proc_check_map_anon(p
, 0, 0, 0, MAP_JIT
, NULL
) == 0) {
3010 vm_map_set_jumbo(get_task_map(p
->task
));
3012 #endif /* CONFIG_MACF */
3016 * Release any ports we kept around for binding to the new task
3017 * We need to release the rights even if the posix_spawn has failed.
3019 if (portwatch_ports
!= NULL
) {
3020 for (int i
= 0; i
< portwatch_count
; i
++) {
3021 ipc_port_t port
= NULL
;
3022 if ((port
= portwatch_ports
[i
]) != NULL
) {
3023 ipc_port_release_send(port
);
3026 FREE(portwatch_ports
, M_TEMP
);
3027 portwatch_ports
= NULL
;
3028 portwatch_count
= 0;
3032 * We have to delay operations which might throw a signal until after
3033 * the signals have been unblocked; however, we want that to happen
3034 * after exec_resettextvp() so that the textvp is correct when they
3038 error
= check_for_signature(p
, imgp
);
3041 * Pay for our earlier safety; deliver the delayed signals from
3042 * the incomplete spawn process now that it's complete.
3044 if (imgp
!= NULL
&& spawn_no_exec
&& (p
->p_lflag
& P_LTRACED
)) {
3045 psignal_vfork(p
, p
->task
, imgp
->ip_new_thread
, SIGTRAP
);
3048 if (error
== 0 && !spawn_no_exec
)
3049 KDBG(BSDDBG_CODE(DBG_BSD_PROC
,BSD_PROC_EXEC
),
3056 vnode_put(imgp
->ip_vp
);
3057 if (imgp
->ip_scriptvp
)
3058 vnode_put(imgp
->ip_scriptvp
);
3059 if (imgp
->ip_strings
)
3060 execargs_free(imgp
);
3061 if (imgp
->ip_px_sfa
!= NULL
)
3062 FREE(imgp
->ip_px_sfa
, M_TEMP
);
3063 if (imgp
->ip_px_spa
!= NULL
)
3064 FREE(imgp
->ip_px_spa
, M_TEMP
);
3066 if (imgp
->ip_px_persona
!= NULL
)
3067 FREE(imgp
->ip_px_persona
, M_TEMP
);
3070 if (imgp
->ip_px_smpx
!= NULL
)
3071 spawn_free_macpolicyinfo(imgp
->ip_px_smpx
);
3072 if (imgp
->ip_execlabelp
)
3073 mac_cred_label_free(imgp
->ip_execlabelp
);
3074 if (imgp
->ip_scriptlabelp
)
3075 mac_vnode_label_free(imgp
->ip_scriptlabelp
);
3076 if (imgp
->ip_cs_error
!= OS_REASON_NULL
) {
3077 os_reason_free(imgp
->ip_cs_error
);
3078 imgp
->ip_cs_error
= OS_REASON_NULL
;
3084 if (spawn_no_exec
) {
3086 * In the original DTrace reference implementation,
3087 * posix_spawn() was a libc routine that just
3088 * did vfork(2) then exec(2). Thus the proc::: probes
3089 * are very fork/exec oriented. The details of this
3090 * in-kernel implementation of posix_spawn() is different
3091 * (while producing the same process-observable effects)
3092 * particularly w.r.t. errors, and which thread/process
3093 * is constructing what on behalf of whom.
3096 DTRACE_PROC1(spawn__failure
, int, error
);
3098 DTRACE_PROC(spawn__success
);
3100 * Some DTrace scripts, e.g. newproc.d in
3101 * /usr/bin, rely on the the 'exec-success'
3102 * probe being fired in the child after the
3103 * new process image has been constructed
3104 * in order to determine the associated pid.
3106 * So, even though the parent built the image
3107 * here, for compatibility, mark the new thread
3108 * so 'exec-success' fires on it as it leaves
3111 dtrace_thread_didexec(imgp
->ip_new_thread
);
3115 DTRACE_PROC1(exec__failure
, int, error
);
3117 dtrace_thread_didexec(imgp
->ip_new_thread
);
3121 if ((dtrace_proc_waitfor_hook
= dtrace_proc_waitfor_exec_ptr
) != NULL
) {
3122 (*dtrace_proc_waitfor_hook
)(p
);
3126 * clear bsd_info from old task if it did exec.
3128 if (task_did_exec(current_task())) {
3129 set_bsdtask_info(current_task(), NULL
);
3132 /* clear bsd_info from new task and terminate it if exec failed */
3133 if (new_task
!= NULL
&& task_is_exec_copy(new_task
)) {
3134 set_bsdtask_info(new_task
, NULL
);
3135 task_terminate_internal(new_task
);
3138 /* Return to both the parent and the child? */
3139 if (imgp
!= NULL
&& spawn_no_exec
) {
3141 * If the parent wants the pid, copy it out
3143 if (pid
!= USER_ADDR_NULL
)
3144 (void)suword(pid
, p
->p_pid
);
3148 * If we had an error, perform an internal reap ; this is
3149 * entirely safe, as we have a real process backing us.
3153 p
->p_listflag
|= P_LIST_DEADPARENT
;
3156 /* make sure no one else has killed it off... */
3157 if (p
->p_stat
!= SZOMB
&& p
->exit_thread
== NULL
) {
3158 p
->exit_thread
= current_thread();
3160 exit1(p
, 1, (int *)NULL
);
3162 /* someone is doing it for us; just skip it */
3169 * Do not terminate the current task, if proc_exec_switch_task did not
3170 * switch the tasks, terminating the current task without the switch would
3171 * result in loosing the SIGKILL status.
3173 if (task_did_exec(current_task())) {
3174 /* Terminate the current task, since exec will start in new task */
3175 task_terminate_internal(current_task());
3178 /* Release the thread ref returned by fork_create_child/fork1 */
3179 if (imgp
!= NULL
&& imgp
->ip_new_thread
) {
3180 /* wake up the new thread */
3181 task_clear_return_wait(get_threadtask(imgp
->ip_new_thread
));
3182 thread_deallocate(imgp
->ip_new_thread
);
3183 imgp
->ip_new_thread
= NULL
;
3186 /* Release the ref returned by fork_create_child/fork1 */
3188 task_deallocate(new_task
);
3192 if (should_release_proc_ref
) {
3200 if (inherit
!= NULL
) {
3201 ipc_importance_release(inherit
);
3208 * proc_exec_switch_task
3210 * Parameters: p proc
3211 * old_task task before exec
3212 * new_task task after exec
3213 * new_thread thread in new task
3217 * Note: The function will switch the task pointer of proc
3218 * from old task to new task. The switch needs to happen
3219 * after draining all proc refs and inside a proc translock.
3220 * In the case of failure to switch the task, which might happen
3221 * if the process received a SIGKILL or jetsam killed it, it will make
3222 * sure that the new tasks terminates. User proc ref returned
3225 * This function is called after point of no return, in the case
3226 * failure to switch, it will terminate the new task and swallow the
3227 * error and let the terminated process complete exec and die.
3230 proc_exec_switch_task(proc_t p
, task_t old_task
, task_t new_task
, thread_t new_thread
)
3233 boolean_t task_active
;
3234 boolean_t proc_active
;
3235 boolean_t thread_active
;
3236 thread_t old_thread
= current_thread();
3239 * Switch the task pointer of proc to new task.
3240 * Before switching the task, wait for proc_refdrain.
3241 * After the switch happens, the proc can disappear,
3242 * take a ref before it disappears. Waiting for
3243 * proc_refdrain in exec will block all other threads
3244 * trying to take a proc ref, boost the current thread
3245 * to avoid priority inversion.
3247 thread_set_exec_promotion(old_thread
);
3248 p
= proc_refdrain_with_refwait(p
, TRUE
);
3249 /* extra proc ref returned to the caller */
3251 assert(get_threadtask(new_thread
) == new_task
);
3252 task_active
= task_is_active(new_task
);
3254 /* Take the proc_translock to change the task ptr */
3256 proc_active
= !(p
->p_lflag
& P_LEXIT
);
3258 /* Check if the current thread is not aborted due to SIGKILL */
3259 thread_active
= thread_is_active(old_thread
);
3262 * Do not switch the task if the new task or proc is already terminated
3263 * as a result of error in exec past point of no return
3265 if (proc_active
&& task_active
&& thread_active
) {
3266 error
= proc_transstart(p
, 1, 0);
3268 uthread_t new_uthread
= get_bsdthread_info(new_thread
);
3269 uthread_t old_uthread
= get_bsdthread_info(current_thread());
3272 * bsd_info of old_task will get cleared in execve and posix_spawn
3273 * after firing exec-success/error dtrace probe.
3277 /* Clear dispatchqueue and workloop ast offset */
3278 p
->p_dispatchqueue_offset
= 0;
3279 p
->p_dispatchqueue_serialno_offset
= 0;
3280 p
->p_return_to_kernel_offset
= 0;
3282 /* Copy the signal state, dtrace state and set bsd ast on new thread */
3283 act_set_astbsd(new_thread
);
3284 new_uthread
->uu_siglist
= old_uthread
->uu_siglist
;
3285 new_uthread
->uu_sigwait
= old_uthread
->uu_sigwait
;
3286 new_uthread
->uu_sigmask
= old_uthread
->uu_sigmask
;
3287 new_uthread
->uu_oldmask
= old_uthread
->uu_oldmask
;
3288 new_uthread
->uu_vforkmask
= old_uthread
->uu_vforkmask
;
3289 new_uthread
->uu_exit_reason
= old_uthread
->uu_exit_reason
;
3291 new_uthread
->t_dtrace_sig
= old_uthread
->t_dtrace_sig
;
3292 new_uthread
->t_dtrace_stop
= old_uthread
->t_dtrace_stop
;
3293 new_uthread
->t_dtrace_resumepid
= old_uthread
->t_dtrace_resumepid
;
3294 assert(new_uthread
->t_dtrace_scratch
== NULL
);
3295 new_uthread
->t_dtrace_scratch
= old_uthread
->t_dtrace_scratch
;
3297 old_uthread
->t_dtrace_sig
= 0;
3298 old_uthread
->t_dtrace_stop
= 0;
3299 old_uthread
->t_dtrace_resumepid
= 0;
3300 old_uthread
->t_dtrace_scratch
= NULL
;
3302 /* Copy the resource accounting info */
3303 thread_copy_resource_info(new_thread
, current_thread());
3305 /* Clear the exit reason and signal state on old thread */
3306 old_uthread
->uu_exit_reason
= NULL
;
3307 old_uthread
->uu_siglist
= 0;
3309 /* Add the new uthread to proc uthlist and remove the old one */
3310 TAILQ_INSERT_TAIL(&p
->p_uthlist
, new_uthread
, uu_list
);
3311 TAILQ_REMOVE(&p
->p_uthlist
, old_uthread
, uu_list
);
3313 task_set_did_exec_flag(old_task
);
3314 task_clear_exec_copy_flag(new_task
);
3316 task_copy_fields_for_exec(new_task
, old_task
);
3318 proc_transend(p
, 1);
3324 thread_clear_exec_promotion(old_thread
);
3326 if (error
!= 0 || !task_active
|| !proc_active
|| !thread_active
) {
3327 task_terminate_internal(new_task
);
3336 * Parameters: uap->fname File name to exec
3337 * uap->argp Argument list
3338 * uap->envp Environment list
3340 * Returns: 0 Success
3341 * __mac_execve:EINVAL Invalid argument
3342 * __mac_execve:ENOTSUP Invalid argument
3343 * __mac_execve:EACCES Permission denied
3344 * __mac_execve:EINTR Interrupted function
3345 * __mac_execve:ENOMEM Not enough space
3346 * __mac_execve:EFAULT Bad address
3347 * __mac_execve:ENAMETOOLONG Filename too long
3348 * __mac_execve:ENOEXEC Executable file format error
3349 * __mac_execve:ETXTBSY Text file busy [misuse of error code]
3352 * TODO: Dynamic linker header address on stack is copied via suword()
3356 execve(proc_t p
, struct execve_args
*uap
, int32_t *retval
)
3358 struct __mac_execve_args muap
;
3361 memoryshot(VM_EXECVE
, DBG_FUNC_NONE
);
3363 muap
.fname
= uap
->fname
;
3364 muap
.argp
= uap
->argp
;
3365 muap
.envp
= uap
->envp
;
3366 muap
.mac_p
= USER_ADDR_NULL
;
3367 err
= __mac_execve(p
, &muap
, retval
);
3375 * Parameters: uap->fname File name to exec
3376 * uap->argp Argument list
3377 * uap->envp Environment list
3378 * uap->mac_p MAC label supplied by caller
3380 * Returns: 0 Success
3381 * EINVAL Invalid argument
3382 * ENOTSUP Not supported
3383 * ENOEXEC Executable file format error
3384 * exec_activate_image:EINVAL Invalid argument
3385 * exec_activate_image:EACCES Permission denied
3386 * exec_activate_image:EINTR Interrupted function
3387 * exec_activate_image:ENOMEM Not enough space
3388 * exec_activate_image:EFAULT Bad address
3389 * exec_activate_image:ENAMETOOLONG Filename too long
3390 * exec_activate_image:ENOEXEC Executable file format error
3391 * exec_activate_image:ETXTBSY Text file busy [misuse of error code]
3392 * exec_activate_image:EBADEXEC The executable is corrupt/unknown
3393 * exec_activate_image:???
3394 * mac_execve_enter:???
3396 * TODO: Dynamic linker header address on stack is copied via suword()
3399 __mac_execve(proc_t p
, struct __mac_execve_args
*uap
, int32_t *retval
)
3402 struct image_params
*imgp
;
3403 struct vnode_attr
*vap
;
3404 struct vnode_attr
*origvap
;
3406 int is_64
= IS_64BIT_PROCESS(p
);
3407 struct vfs_context context
;
3408 struct uthread
*uthread
;
3409 task_t new_task
= NULL
;
3410 boolean_t should_release_proc_ref
= FALSE
;
3411 boolean_t exec_done
= FALSE
;
3412 boolean_t in_vfexec
= FALSE
;
3413 void *inherit
= NULL
;
3415 context
.vc_thread
= current_thread();
3416 context
.vc_ucred
= kauth_cred_proc_ref(p
); /* XXX must NOT be kauth_cred_get() */
3418 /* Allocate a big chunk for locals instead of using stack since these
3419 * structures a pretty big.
3421 MALLOC(bufp
, char *, (sizeof(*imgp
) + sizeof(*vap
) + sizeof(*origvap
)), M_TEMP
, M_WAITOK
| M_ZERO
);
3422 imgp
= (struct image_params
*) bufp
;
3425 goto exit_with_error
;
3427 vap
= (struct vnode_attr
*) (bufp
+ sizeof(*imgp
));
3428 origvap
= (struct vnode_attr
*) (bufp
+ sizeof(*imgp
) + sizeof(*vap
));
3430 /* Initialize the common data in the image_params structure */
3431 imgp
->ip_user_fname
= uap
->fname
;
3432 imgp
->ip_user_argv
= uap
->argp
;
3433 imgp
->ip_user_envv
= uap
->envp
;
3434 imgp
->ip_vattr
= vap
;
3435 imgp
->ip_origvattr
= origvap
;
3436 imgp
->ip_vfs_context
= &context
;
3437 imgp
->ip_flags
= (is_64
? IMGPF_WAS_64BIT
: IMGPF_NONE
) | ((p
->p_flag
& P_DISABLE_ASLR
) ? IMGPF_DISABLE_ASLR
: IMGPF_NONE
);
3438 imgp
->ip_seg
= (is_64
? UIO_USERSPACE64
: UIO_USERSPACE32
);
3439 imgp
->ip_mac_return
= 0;
3440 imgp
->ip_cs_error
= OS_REASON_NULL
;
3443 if (uap
->mac_p
!= USER_ADDR_NULL
) {
3444 error
= mac_execve_enter(uap
->mac_p
, imgp
);
3446 kauth_cred_unref(&context
.vc_ucred
);
3447 goto exit_with_error
;
3451 uthread
= get_bsdthread_info(current_thread());
3452 if (uthread
->uu_flag
& UT_VFORK
) {
3453 imgp
->ip_flags
|= IMGPF_VFORK_EXEC
;
3456 imgp
->ip_flags
|= IMGPF_EXEC
;
3459 * For execve case, create a new task and thread
3460 * which points to current_proc. The current_proc will point
3461 * to the new task after image activation and proc ref drain.
3463 * proc (current_proc) <----- old_task (current_task)
3466 * | ----------------------------------
3468 * --------- new_task (task marked as TF_EXEC_COPY)
3470 * After image activation, the proc will point to the new task
3471 * and would look like following.
3473 * proc (current_proc) <----- old_task (current_task, marked as TPF_DID_EXEC)
3476 * | ----------> new_task
3480 * During exec any transition from new_task -> proc is fine, but don't allow
3481 * transition from proc->task, since it will modify old_task.
3483 imgp
->ip_new_thread
= fork_create_child(current_task(),
3484 NULL
, p
, FALSE
, p
->p_flag
& P_LP64
, TRUE
);
3485 /* task and thread ref returned by fork_create_child */
3486 if (imgp
->ip_new_thread
== NULL
) {
3488 goto exit_with_error
;
3491 new_task
= get_threadtask(imgp
->ip_new_thread
);
3492 context
.vc_thread
= imgp
->ip_new_thread
;
3495 error
= exec_activate_image(imgp
);
3496 /* thread and task ref returned for vfexec case */
3498 if (imgp
->ip_new_thread
!= NULL
) {
3500 * task reference might be returned by exec_activate_image
3503 new_task
= get_threadtask(imgp
->ip_new_thread
);
3506 if (!error
&& !in_vfexec
) {
3507 p
= proc_exec_switch_task(p
, current_task(), new_task
, imgp
->ip_new_thread
);
3508 /* proc ref returned */
3509 should_release_proc_ref
= TRUE
;
3512 kauth_cred_unref(&context
.vc_ucred
);
3514 /* Image not claimed by any activator? */
3520 assert(imgp
->ip_new_thread
!= NULL
);
3522 exec_resettextvp(p
, imgp
);
3523 error
= check_for_signature(p
, imgp
);
3526 /* flag exec has occurred, notify only if it has not failed due to FP Key error */
3527 if (exec_done
&& ((p
->p_lflag
& P_LTERM_DECRYPTFAIL
) == 0))
3528 proc_knote(p
, NOTE_EXEC
);
3530 if (imgp
->ip_vp
!= NULLVP
)
3531 vnode_put(imgp
->ip_vp
);
3532 if (imgp
->ip_scriptvp
!= NULLVP
)
3533 vnode_put(imgp
->ip_scriptvp
);
3534 if (imgp
->ip_strings
)
3535 execargs_free(imgp
);
3537 if (imgp
->ip_execlabelp
)
3538 mac_cred_label_free(imgp
->ip_execlabelp
);
3539 if (imgp
->ip_scriptlabelp
)
3540 mac_vnode_label_free(imgp
->ip_scriptlabelp
);
3542 if (imgp
->ip_cs_error
!= OS_REASON_NULL
) {
3543 os_reason_free(imgp
->ip_cs_error
);
3544 imgp
->ip_cs_error
= OS_REASON_NULL
;
3549 * We need to initialize the bank context behind the protection of
3550 * the proc_trans lock to prevent a race with exit. We can't do this during
3551 * exec_activate_image because task_bank_init checks entitlements that
3552 * aren't loaded until subsequent calls (including exec_resettextvp).
3554 error
= proc_transstart(p
, 0, 0);
3558 task_bank_init(get_threadtask(imgp
->ip_new_thread
));
3559 proc_transend(p
, 0);
3561 /* Sever any extant thread affinity */
3562 thread_affinity_exec(current_thread());
3564 /* Inherit task role from old task to new task for exec */
3566 proc_inherit_task_role(get_threadtask(imgp
->ip_new_thread
), current_task());
3569 thread_t main_thread
= imgp
->ip_new_thread
;
3571 task_set_main_thread_qos(new_task
, main_thread
);
3575 * Processes with the MAP_JIT entitlement are permitted to have
3578 if (mac_proc_check_map_anon(p
, 0, 0, 0, MAP_JIT
, NULL
) == 0) {
3579 vm_map_set_jumbo(get_task_map(new_task
));
3581 #endif /* CONFIG_MACF */
3585 dtrace_thread_didexec(imgp
->ip_new_thread
);
3587 if ((dtrace_proc_waitfor_hook
= dtrace_proc_waitfor_exec_ptr
) != NULL
)
3588 (*dtrace_proc_waitfor_hook
)(p
);
3592 vfork_return(p
, retval
, p
->p_pid
);
3595 DTRACE_PROC1(exec__failure
, int, error
);
3601 * clear bsd_info from old task if it did exec.
3603 if (task_did_exec(current_task())) {
3604 set_bsdtask_info(current_task(), NULL
);
3607 /* clear bsd_info from new task and terminate it if exec failed */
3608 if (new_task
!= NULL
&& task_is_exec_copy(new_task
)) {
3609 set_bsdtask_info(new_task
, NULL
);
3610 task_terminate_internal(new_task
);
3614 * Need to transfer pending watch port boosts to the new task while still making
3615 * sure that the old task remains in the importance linkage. Create an importance
3616 * linkage from old task to new task, then switch the task importance base
3617 * of old task and new task. After the switch the port watch boost will be
3618 * boosting the new task and new task will be donating importance to old task.
3620 if (error
== 0 && task_did_exec(current_task())) {
3621 inherit
= ipc_importance_exec_switch_task(current_task(), get_threadtask(imgp
->ip_new_thread
));
3626 * Do not terminate the current task, if proc_exec_switch_task did not
3627 * switch the tasks, terminating the current task without the switch would
3628 * result in loosing the SIGKILL status.
3630 if (task_did_exec(current_task())) {
3631 /* Terminate the current task, since exec will start in new task */
3632 task_terminate_internal(current_task());
3635 /* Release the thread ref returned by fork_create_child */
3636 if (imgp
->ip_new_thread
) {
3637 /* wake up the new exec thread */
3638 task_clear_return_wait(get_threadtask(imgp
->ip_new_thread
));
3639 thread_deallocate(imgp
->ip_new_thread
);
3640 imgp
->ip_new_thread
= NULL
;
3644 /* Release the ref returned by fork_create_child */
3646 task_deallocate(new_task
);
3650 if (should_release_proc_ref
) {
3658 if (inherit
!= NULL
) {
3659 ipc_importance_release(inherit
);
3669 * Description: Copy a pointer in from user space to a user_addr_t in kernel
3670 * space, based on 32/64 bitness of the user space
3672 * Parameters: froma User space address
3673 * toptr Address of kernel space user_addr_t
3674 * ptr_size 4/8, based on 'froma' address space
3676 * Returns: 0 Success
3677 * EFAULT Bad 'froma'
3680 * *ptr_size Modified
3683 copyinptr(user_addr_t froma
, user_addr_t
*toptr
, int ptr_size
)
3687 if (ptr_size
== 4) {
3688 /* 64 bit value containing 32 bit address */
3691 error
= copyin(froma
, &i
, 4);
3692 *toptr
= CAST_USER_ADDR_T(i
); /* SAFE */
3694 error
= copyin(froma
, toptr
, 8);
3703 * Description: Copy a pointer out from a user_addr_t in kernel space to
3704 * user space, based on 32/64 bitness of the user space
3706 * Parameters: ua User space address to copy to
3707 * ptr Address of kernel space user_addr_t
3708 * ptr_size 4/8, based on 'ua' address space
3710 * Returns: 0 Success
3715 copyoutptr(user_addr_t ua
, user_addr_t ptr
, int ptr_size
)
3719 if (ptr_size
== 4) {
3720 /* 64 bit value containing 32 bit address */
3721 unsigned int i
= CAST_DOWN_EXPLICIT(unsigned int,ua
); /* SAFE */
3723 error
= copyout(&i
, ptr
, 4);
3725 error
= copyout(&ua
, ptr
, 8);
3732 * exec_copyout_strings
3734 * Copy out the strings segment to user space. The strings segment is put
3735 * on a preinitialized stack frame.
3737 * Parameters: struct image_params * the image parameter block
3738 * int * a pointer to the stack offset variable
3740 * Returns: 0 Success
3744 * (*stackp) The stack offset, modified
3746 * Note: The strings segment layout is backward, from the beginning
3747 * of the top of the stack to consume the minimal amount of
3748 * space possible; the returned stack pointer points to the
3749 * end of the area consumed (stacks grow downward).
3751 * argc is an int; arg[i] are pointers; env[i] are pointers;
3752 * the 0's are (void *)NULL's
3754 * The stack frame layout is:
3756 * +-------------+ <- p->user_stack
3797 * sp-> +-------------+
3799 * Although technically a part of the STRING AREA, we treat the PATH AREA as
3800 * a separate entity. This allows us to align the beginning of the PATH AREA
3801 * to a pointer boundary so that the exec_path, env[i], and argv[i] pointers
3802 * which preceed it on the stack are properly aligned.
3806 exec_copyout_strings(struct image_params
*imgp
, user_addr_t
*stackp
)
3808 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
3809 int ptr_size
= (imgp
->ip_flags
& IMGPF_IS_64BIT
) ? 8 : 4;
3811 void *ptr_buffer_start
, *ptr_buffer
;
3814 user_addr_t string_area
; /* *argv[], *env[] */
3815 user_addr_t ptr_area
; /* argv[], env[], applev[] */
3816 user_addr_t argc_area
; /* argc */
3821 struct copyout_desc
{
3825 user_addr_t
*dtrace_cookie
;
3827 boolean_t null_term
;
3830 .start_string
= imgp
->ip_startargv
,
3831 .count
= imgp
->ip_argc
,
3833 .dtrace_cookie
= &p
->p_dtrace_argv
,
3838 .start_string
= imgp
->ip_endargv
,
3839 .count
= imgp
->ip_envc
,
3841 .dtrace_cookie
= &p
->p_dtrace_envp
,
3846 .start_string
= imgp
->ip_strings
,
3849 .dtrace_cookie
= NULL
,
3854 .start_string
= imgp
->ip_endenvv
,
3855 .count
= imgp
->ip_applec
- 1, /* exec_path handled above */
3857 .dtrace_cookie
= NULL
,
3866 * All previous contributors to the string area
3867 * should have aligned their sub-area
3869 if (imgp
->ip_strspace
% ptr_size
!= 0) {
3874 /* Grow the stack down for the strings we've been building up */
3875 string_size
= imgp
->ip_strendp
- imgp
->ip_strings
;
3876 stack
-= string_size
;
3877 string_area
= stack
;
3880 * Need room for one pointer for each string, plus
3881 * one for the NULLs terminating the argv, envv, and apple areas.
3883 ptr_area_size
= (imgp
->ip_argc
+ imgp
->ip_envc
+ imgp
->ip_applec
+ 3) *
3885 stack
-= ptr_area_size
;
3888 /* We'll construct all the pointer arrays in our string buffer,
3889 * which we already know is aligned properly, and ip_argspace
3890 * was used to verify we have enough space.
3892 ptr_buffer_start
= ptr_buffer
= (void *)imgp
->ip_strendp
;
3895 * Need room for pointer-aligned argc slot.
3901 * Record the size of the arguments area so that sysctl_procargs()
3902 * can return the argument area without having to parse the arguments.
3905 p
->p_argc
= imgp
->ip_argc
;
3906 p
->p_argslen
= (int)(*stackp
- string_area
);
3909 /* Return the initial stack address: the location of argc */
3913 * Copy out the entire strings area.
3915 error
= copyout(imgp
->ip_strings
, string_area
,
3920 for (i
= 0; i
< sizeof(descriptors
)/sizeof(descriptors
[0]); i
++) {
3921 char *cur_string
= descriptors
[i
].start_string
;
3925 if (descriptors
[i
].dtrace_cookie
) {
3927 *descriptors
[i
].dtrace_cookie
= ptr_area
+ ((uintptr_t)ptr_buffer
- (uintptr_t)ptr_buffer_start
); /* dtrace convenience */
3930 #endif /* CONFIG_DTRACE */
3933 * For each segment (argv, envv, applev), copy as many pointers as requested
3934 * to our pointer buffer.
3936 for (j
= 0; j
< descriptors
[i
].count
; j
++) {
3937 user_addr_t cur_address
= string_area
+ (cur_string
- imgp
->ip_strings
);
3939 /* Copy out the pointer to the current string. Alignment has been verified */
3940 if (ptr_size
== 8) {
3941 *(uint64_t *)ptr_buffer
= (uint64_t)cur_address
;
3943 *(uint32_t *)ptr_buffer
= (uint32_t)cur_address
;
3946 ptr_buffer
= (void *)((uintptr_t)ptr_buffer
+ ptr_size
);
3947 cur_string
+= strlen(cur_string
) + 1; /* Only a NUL between strings in the same area */
3950 if (descriptors
[i
].null_term
) {
3951 if (ptr_size
== 8) {
3952 *(uint64_t *)ptr_buffer
= 0ULL;
3954 *(uint32_t *)ptr_buffer
= 0;
3957 ptr_buffer
= (void *)((uintptr_t)ptr_buffer
+ ptr_size
);
3962 * Copy out all our pointer arrays in bulk.
3964 error
= copyout(ptr_buffer_start
, ptr_area
,
3969 /* argc (int32, stored in a ptr_size area) */
3970 error
= copyoutptr((user_addr_t
)imgp
->ip_argc
, argc_area
, ptr_size
);
3980 * exec_extract_strings
3982 * Copy arguments and environment from user space into work area; we may
3983 * have already copied some early arguments into the work area, and if
3984 * so, any arguments opied in are appended to those already there.
3985 * This function is the primary manipulator of ip_argspace, since
3986 * these are the arguments the client of execve(2) knows about. After
3987 * each argv[]/envv[] string is copied, we charge the string length
3988 * and argv[]/envv[] pointer slot to ip_argspace, so that we can
3989 * full preflight the arg list size.
3991 * Parameters: struct image_params * the image parameter block
3993 * Returns: 0 Success
3997 * (imgp->ip_argc) Count of arguments, updated
3998 * (imgp->ip_envc) Count of environment strings, updated
3999 * (imgp->ip_argspace) Count of remaining of NCARGS
4000 * (imgp->ip_interp_buffer) Interpreter and args (mutated in place)
4003 * Note: The argument and environment vectors are user space pointers
4004 * to arrays of user space pointers.
4007 exec_extract_strings(struct image_params
*imgp
)
4010 int ptr_size
= (imgp
->ip_flags
& IMGPF_WAS_64BIT
) ? 8 : 4;
4011 int new_ptr_size
= (imgp
->ip_flags
& IMGPF_IS_64BIT
) ? 8 : 4;
4012 user_addr_t argv
= imgp
->ip_user_argv
;
4013 user_addr_t envv
= imgp
->ip_user_envv
;
4016 * Adjust space reserved for the path name by however much padding it
4017 * needs. Doing this here since we didn't know if this would be a 32-
4018 * or 64-bit process back in exec_save_path.
4020 while (imgp
->ip_strspace
% new_ptr_size
!= 0) {
4021 *imgp
->ip_strendp
++ = '\0';
4022 imgp
->ip_strspace
--;
4023 /* imgp->ip_argspace--; not counted towards exec args total */
4027 * From now on, we start attributing string space to ip_argspace
4029 imgp
->ip_startargv
= imgp
->ip_strendp
;
4032 if((imgp
->ip_flags
& IMGPF_INTERPRET
) != 0) {
4034 char *argstart
, *ch
;
4036 /* First, the arguments in the "#!" string are tokenized and extracted. */
4037 argstart
= imgp
->ip_interp_buffer
;
4040 while (*ch
&& !IS_WHITESPACE(*ch
)) {
4045 /* last argument, no need to NUL-terminate */
4046 error
= exec_add_user_string(imgp
, CAST_USER_ADDR_T(argstart
), UIO_SYSSPACE
, TRUE
);
4051 error
= exec_add_user_string(imgp
, CAST_USER_ADDR_T(argstart
), UIO_SYSSPACE
, TRUE
);
4054 * Find the next string. We know spaces at the end of the string have already
4058 while (IS_WHITESPACE(*argstart
)) {
4063 /* Error-check, regardless of whether this is the last interpreter arg or not */
4066 if (imgp
->ip_argspace
< new_ptr_size
) {
4070 imgp
->ip_argspace
-= new_ptr_size
; /* to hold argv[] entry */
4076 * If we are running an interpreter, replace the av[0] that was
4077 * passed to execve() with the path name that was
4078 * passed to execve() for interpreters which do not use the PATH
4079 * to locate their script arguments.
4081 error
= copyinptr(argv
, &arg
, ptr_size
);
4085 argv
+= ptr_size
; /* consume without using */
4089 if (imgp
->ip_interp_sugid_fd
!= -1) {
4090 char temp
[19]; /* "/dev/fd/" + 10 digits + NUL */
4091 snprintf(temp
, sizeof(temp
), "/dev/fd/%d", imgp
->ip_interp_sugid_fd
);
4092 error
= exec_add_user_string(imgp
, CAST_USER_ADDR_T(temp
), UIO_SYSSPACE
, TRUE
);
4094 error
= exec_add_user_string(imgp
, imgp
->ip_user_fname
, imgp
->ip_seg
, TRUE
);
4099 if (imgp
->ip_argspace
< new_ptr_size
) {
4103 imgp
->ip_argspace
-= new_ptr_size
; /* to hold argv[] entry */
4107 while (argv
!= 0LL) {
4110 error
= copyinptr(argv
, &arg
, ptr_size
);
4123 error
= exec_add_user_string(imgp
, arg
, imgp
->ip_seg
, TRUE
);
4126 if (imgp
->ip_argspace
< new_ptr_size
) {
4130 imgp
->ip_argspace
-= new_ptr_size
; /* to hold argv[] entry */
4134 /* Save space for argv[] NULL terminator */
4135 if (imgp
->ip_argspace
< new_ptr_size
) {
4139 imgp
->ip_argspace
-= new_ptr_size
;
4141 /* Note where the args ends and env begins. */
4142 imgp
->ip_endargv
= imgp
->ip_strendp
;
4145 /* Now, get the environment */
4146 while (envv
!= 0LL) {
4149 error
= copyinptr(envv
, &env
, ptr_size
);
4160 error
= exec_add_user_string(imgp
, env
, imgp
->ip_seg
, TRUE
);
4163 if (imgp
->ip_argspace
< new_ptr_size
) {
4167 imgp
->ip_argspace
-= new_ptr_size
; /* to hold envv[] entry */
4171 /* Save space for envv[] NULL terminator */
4172 if (imgp
->ip_argspace
< new_ptr_size
) {
4176 imgp
->ip_argspace
-= new_ptr_size
;
4178 /* Align the tail of the combined argv+envv area */
4179 while (imgp
->ip_strspace
% new_ptr_size
!= 0) {
4180 if (imgp
->ip_argspace
< 1) {
4184 *imgp
->ip_strendp
++ = '\0';
4185 imgp
->ip_strspace
--;
4186 imgp
->ip_argspace
--;
4189 /* Note where the envv ends and applev begins. */
4190 imgp
->ip_endenvv
= imgp
->ip_strendp
;
4193 * From now on, we are no longer charging argument
4194 * space to ip_argspace.
4202 * Libc has an 8-element array set up for stack guard values. It only fills
4203 * in one of those entries, and both gcc and llvm seem to use only a single
4204 * 8-byte guard. Until somebody needs more than an 8-byte guard value, don't
4205 * do the work to construct them.
4207 #define GUARD_VALUES 1
4208 #define GUARD_KEY "stack_guard="
4211 * System malloc needs some entropy when it is initialized.
4213 #define ENTROPY_VALUES 2
4214 #define ENTROPY_KEY "malloc_entropy="
4217 * System malloc engages nanozone for UIAPP.
4219 #define NANO_ENGAGE_KEY "MallocNanoZone=1"
4221 #define PFZ_KEY "pfz="
4222 extern user32_addr_t commpage_text32_location
;
4223 extern user64_addr_t commpage_text64_location
;
4225 #define MAIN_STACK_VALUES 4
4226 #define MAIN_STACK_KEY "main_stack="
4228 #define FSID_KEY "executable_file="
4229 #define DYLD_FSID_KEY "dyld_file="
4230 #define CDHASH_KEY "executable_cdhash="
4232 #define FSID_MAX_STRING "0x1234567890abcdef,0x1234567890abcdef"
4234 #define HEX_STR_LEN 18 // 64-bit hex value "0x0123456701234567"
4237 exec_add_entropy_key(struct image_params
*imgp
,
4242 const int limit
= 8;
4243 uint64_t entropy
[limit
];
4244 char str
[strlen(key
) + (HEX_STR_LEN
+ 1) * limit
+ 1];
4245 if (values
> limit
) {
4249 read_random(entropy
, sizeof(entropy
[0]) * values
);
4252 entropy
[0] &= ~(0xffull
<< 8);
4255 int len
= snprintf(str
, sizeof(str
), "%s0x%llx", key
, entropy
[0]);
4256 int remaining
= sizeof(str
) - len
;
4257 for (int i
= 1; i
< values
&& remaining
> 0; ++i
) {
4258 int start
= sizeof(str
) - remaining
;
4259 len
= snprintf(&str
[start
], remaining
, ",0x%llx", entropy
[i
]);
4263 return exec_add_user_string(imgp
, CAST_USER_ADDR_T(str
), UIO_SYSSPACE
, FALSE
);
4267 * Build up the contents of the apple[] string vector
4270 exec_add_apple_strings(struct image_params
*imgp
,
4271 const load_result_t
*load_result
)
4274 int img_ptr_size
= (imgp
->ip_flags
& IMGPF_IS_64BIT
) ? 8 : 4;
4276 /* exec_save_path stored the first string */
4277 imgp
->ip_applec
= 1;
4279 /* adding the pfz string */
4281 char pfz_string
[strlen(PFZ_KEY
) + HEX_STR_LEN
+ 1];
4283 if (img_ptr_size
== 8) {
4284 snprintf(pfz_string
, sizeof(pfz_string
), PFZ_KEY
"0x%llx", commpage_text64_location
);
4286 snprintf(pfz_string
, sizeof(pfz_string
), PFZ_KEY
"0x%x", commpage_text32_location
);
4288 error
= exec_add_user_string(imgp
, CAST_USER_ADDR_T(pfz_string
), UIO_SYSSPACE
, FALSE
);
4295 /* adding the NANO_ENGAGE_KEY key */
4296 if (imgp
->ip_px_sa
) {
4297 int proc_flags
= (((struct _posix_spawnattr
*) imgp
->ip_px_sa
)->psa_flags
);
4299 if ((proc_flags
& _POSIX_SPAWN_NANO_ALLOCATOR
) == _POSIX_SPAWN_NANO_ALLOCATOR
) {
4300 const char *nano_string
= NANO_ENGAGE_KEY
;
4301 error
= exec_add_user_string(imgp
, CAST_USER_ADDR_T(nano_string
), UIO_SYSSPACE
, FALSE
);
4310 * Supply libc with a collection of random values to use when
4311 * implementing -fstack-protector.
4313 * (The first random string always contains an embedded NUL so that
4314 * __stack_chk_guard also protects against C string vulnerabilities)
4316 error
= exec_add_entropy_key(imgp
, GUARD_KEY
, GUARD_VALUES
, TRUE
);
4323 * Supply libc with entropy for system malloc.
4325 error
= exec_add_entropy_key(imgp
, ENTROPY_KEY
, ENTROPY_VALUES
, FALSE
);
4332 * Add MAIN_STACK_KEY: Supplies the address and size of the main thread's
4333 * stack if it was allocated by the kernel.
4335 * The guard page is not included in this stack size as libpthread
4336 * expects to add it back in after receiving this value.
4338 if (load_result
->unixproc
) {
4339 char stack_string
[strlen(MAIN_STACK_KEY
) + (HEX_STR_LEN
+ 1) * MAIN_STACK_VALUES
+ 1];
4340 snprintf(stack_string
, sizeof(stack_string
),
4341 MAIN_STACK_KEY
"0x%llx,0x%llx,0x%llx,0x%llx",
4342 (uint64_t)load_result
->user_stack
,
4343 (uint64_t)load_result
->user_stack_size
,
4344 (uint64_t)load_result
->user_stack_alloc
,
4345 (uint64_t)load_result
->user_stack_alloc_size
);
4346 error
= exec_add_user_string(imgp
, CAST_USER_ADDR_T(stack_string
), UIO_SYSSPACE
, FALSE
);
4353 if (imgp
->ip_vattr
) {
4354 uint64_t fsid
= get_va_fsid(imgp
->ip_vattr
);
4355 uint64_t fsobjid
= imgp
->ip_vattr
->va_fileid
;
4357 char fsid_string
[strlen(FSID_KEY
) + strlen(FSID_MAX_STRING
) + 1];
4358 snprintf(fsid_string
, sizeof(fsid_string
),
4359 FSID_KEY
"0x%llx,0x%llx", fsid
, fsobjid
);
4360 error
= exec_add_user_string(imgp
, CAST_USER_ADDR_T(fsid_string
), UIO_SYSSPACE
, FALSE
);
4367 if (imgp
->ip_dyld_fsid
|| imgp
->ip_dyld_fsobjid
) {
4368 char fsid_string
[strlen(DYLD_FSID_KEY
) + strlen(FSID_MAX_STRING
) + 1];
4369 snprintf(fsid_string
, sizeof(fsid_string
),
4370 DYLD_FSID_KEY
"0x%llx,0x%llx", imgp
->ip_dyld_fsid
, imgp
->ip_dyld_fsobjid
);
4371 error
= exec_add_user_string(imgp
, CAST_USER_ADDR_T(fsid_string
), UIO_SYSSPACE
, FALSE
);
4378 uint8_t cdhash
[SHA1_RESULTLEN
];
4379 int cdhash_errror
= ubc_cs_getcdhash(imgp
->ip_vp
, imgp
->ip_arch_offset
, cdhash
);
4380 if (cdhash_errror
== 0) {
4381 char hash_string
[strlen(CDHASH_KEY
) + 2*SHA1_RESULTLEN
+ 1];
4382 strncpy(hash_string
, CDHASH_KEY
, sizeof(hash_string
));
4383 char *p
= hash_string
+ sizeof(CDHASH_KEY
) - 1;
4384 for (int i
= 0; i
< SHA1_RESULTLEN
; i
++) {
4385 snprintf(p
, 3, "%02x", (int) cdhash
[i
]);
4388 error
= exec_add_user_string(imgp
, CAST_USER_ADDR_T(hash_string
), UIO_SYSSPACE
, FALSE
);
4395 /* Align the tail of the combined applev area */
4396 while (imgp
->ip_strspace
% img_ptr_size
!= 0) {
4397 *imgp
->ip_strendp
++ = '\0';
4398 imgp
->ip_strspace
--;
4405 #define unix_stack_size(p) (p->p_rlimit[RLIMIT_STACK].rlim_cur)
4408 * exec_check_permissions
4410 * Description: Verify that the file that is being attempted to be executed
4411 * is in fact allowed to be executed based on it POSIX file
4412 * permissions and other access control criteria
4414 * Parameters: struct image_params * the image parameter block
4416 * Returns: 0 Success
4417 * EACCES Permission denied
4418 * ENOEXEC Executable file format error
4419 * ETXTBSY Text file busy [misuse of error code]
4421 * vnode_authorize:???
4424 exec_check_permissions(struct image_params
*imgp
)
4426 struct vnode
*vp
= imgp
->ip_vp
;
4427 struct vnode_attr
*vap
= imgp
->ip_vattr
;
4428 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
4430 kauth_action_t action
;
4432 /* Only allow execution of regular files */
4433 if (!vnode_isreg(vp
))
4436 /* Get the file attributes that we will be using here and elsewhere */
4438 VATTR_WANTED(vap
, va_uid
);
4439 VATTR_WANTED(vap
, va_gid
);
4440 VATTR_WANTED(vap
, va_mode
);
4441 VATTR_WANTED(vap
, va_fsid
);
4442 VATTR_WANTED(vap
, va_fsid64
);
4443 VATTR_WANTED(vap
, va_fileid
);
4444 VATTR_WANTED(vap
, va_data_size
);
4445 if ((error
= vnode_getattr(vp
, vap
, imgp
->ip_vfs_context
)) != 0)
4449 * Ensure that at least one execute bit is on - otherwise root
4450 * will always succeed, and we don't want to happen unless the
4451 * file really is executable.
4453 if (!vfs_authopaque(vnode_mount(vp
)) && ((vap
->va_mode
& (S_IXUSR
| S_IXGRP
| S_IXOTH
)) == 0))
4456 /* Disallow zero length files */
4457 if (vap
->va_data_size
== 0)
4460 imgp
->ip_arch_offset
= (user_size_t
)0;
4461 imgp
->ip_arch_size
= vap
->va_data_size
;
4463 /* Disable setuid-ness for traced programs or if MNT_NOSUID */
4464 if ((vp
->v_mount
->mnt_flag
& MNT_NOSUID
) || (p
->p_lflag
& P_LTRACED
))
4465 vap
->va_mode
&= ~(VSUID
| VSGID
);
4468 * Disable _POSIX_SPAWN_ALLOW_DATA_EXEC and _POSIX_SPAWN_DISABLE_ASLR
4469 * flags for setuid/setgid binaries.
4471 if (vap
->va_mode
& (VSUID
| VSGID
))
4472 imgp
->ip_flags
&= ~(IMGPF_ALLOW_DATA_EXEC
| IMGPF_DISABLE_ASLR
);
4475 error
= mac_vnode_check_exec(imgp
->ip_vfs_context
, vp
, imgp
);
4480 /* Check for execute permission */
4481 action
= KAUTH_VNODE_EXECUTE
;
4482 /* Traced images must also be readable */
4483 if (p
->p_lflag
& P_LTRACED
)
4484 action
|= KAUTH_VNODE_READ_DATA
;
4485 if ((error
= vnode_authorize(vp
, NULL
, action
, imgp
->ip_vfs_context
)) != 0)
4489 /* Don't let it run if anyone had it open for writing */
4491 if (vp
->v_writecount
) {
4492 panic("going to return ETXTBSY %x", vp
);
4500 /* XXX May want to indicate to underlying FS that vnode is open */
4509 * Initially clear the P_SUGID in the process flags; if an SUGID process is
4510 * exec'ing a non-SUGID image, then this is the point of no return.
4512 * If the image being activated is SUGID, then replace the credential with a
4513 * copy, disable tracing (unless the tracing process is root), reset the
4514 * mach task port to revoke it, set the P_SUGID bit,
4516 * If the saved user and group ID will be changing, then make sure it happens
4517 * to a new credential, rather than a shared one.
4519 * Set the security token (this is probably obsolete, given that the token
4520 * should not technically be separate from the credential itself).
4522 * Parameters: struct image_params * the image parameter block
4524 * Returns: void No failure indication
4527 * <process credential> Potentially modified/replaced
4528 * <task port> Potentially revoked
4529 * <process flags> P_SUGID bit potentially modified
4530 * <security token> Potentially modified
4533 exec_handle_sugid(struct image_params
*imgp
)
4535 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
4536 kauth_cred_t cred
= vfs_context_ucred(imgp
->ip_vfs_context
);
4537 kauth_cred_t my_cred
, my_new_cred
;
4539 int leave_sugid_clear
= 0;
4540 int mac_reset_ipc
= 0;
4544 int mac_transition
, disjoint_cred
= 0;
4545 int label_update_return
= 0;
4548 * Determine whether a call to update the MAC label will result in the
4549 * credential changing.
4551 * Note: MAC policies which do not actually end up modifying
4552 * the label subsequently are strongly encouraged to
4553 * return 0 for this check, since a non-zero answer will
4554 * slow down the exec fast path for normal binaries.
4556 mac_transition
= mac_cred_check_label_update_execve(
4557 imgp
->ip_vfs_context
,
4559 imgp
->ip_arch_offset
,
4561 imgp
->ip_scriptlabelp
,
4562 imgp
->ip_execlabelp
,
4567 OSBitAndAtomic(~((uint32_t)P_SUGID
), &p
->p_flag
);
4570 * Order of the following is important; group checks must go last,
4571 * as we use the success of the 'ismember' check combined with the
4572 * failure of the explicit match to indicate that we will be setting
4573 * the egid of the process even though the new process did not
4574 * require VSUID/VSGID bits in order for it to set the new group as
4577 * Note: Technically, by this we are implying a call to
4578 * setegid() in the new process, rather than implying
4579 * it used its VSGID bit to set the effective group,
4580 * even though there is no code in that process to make
4583 if (((imgp
->ip_origvattr
->va_mode
& VSUID
) != 0 &&
4584 kauth_cred_getuid(cred
) != imgp
->ip_origvattr
->va_uid
) ||
4585 ((imgp
->ip_origvattr
->va_mode
& VSGID
) != 0 &&
4586 ((kauth_cred_ismember_gid(cred
, imgp
->ip_origvattr
->va_gid
, &leave_sugid_clear
) || !leave_sugid_clear
) ||
4587 (kauth_cred_getgid(cred
) != imgp
->ip_origvattr
->va_gid
)))) {
4590 /* label for MAC transition and neither VSUID nor VSGID */
4591 handle_mac_transition
:
4596 * Replace the credential with a copy of itself if euid or
4599 * Note: setuid binaries will automatically opt out of
4600 * group resolver participation as a side effect
4601 * of this operation. This is an intentional
4602 * part of the security model, which requires a
4603 * participating credential be established by
4604 * escalating privilege, setting up all other
4605 * aspects of the credential including whether
4606 * or not to participate in external group
4607 * membership resolution, then dropping their
4608 * effective privilege to that of the desired
4609 * final credential state.
4611 * Modifications to p_ucred must be guarded using the
4612 * proc's ucred lock. This prevents others from accessing
4613 * a garbage credential.
4615 while (imgp
->ip_origvattr
->va_mode
& VSUID
) {
4616 my_cred
= kauth_cred_proc_ref(p
);
4617 my_new_cred
= kauth_cred_setresuid(my_cred
, KAUTH_UID_NONE
, imgp
->ip_origvattr
->va_uid
, imgp
->ip_origvattr
->va_uid
, KAUTH_UID_NONE
);
4619 if (my_new_cred
== my_cred
) {
4620 kauth_cred_unref(&my_cred
);
4624 /* update cred on proc */
4627 if (p
->p_ucred
!= my_cred
) {
4628 proc_ucred_unlock(p
);
4629 kauth_cred_unref(&my_new_cred
);
4633 /* donate cred reference on my_new_cred to p->p_ucred */
4634 p
->p_ucred
= my_new_cred
;
4635 PROC_UPDATE_CREDS_ONPROC(p
);
4636 proc_ucred_unlock(p
);
4638 /* drop additional reference that was taken on the previous cred */
4639 kauth_cred_unref(&my_cred
);
4644 while (imgp
->ip_origvattr
->va_mode
& VSGID
) {
4645 my_cred
= kauth_cred_proc_ref(p
);
4646 my_new_cred
= kauth_cred_setresgid(my_cred
, KAUTH_GID_NONE
, imgp
->ip_origvattr
->va_gid
, imgp
->ip_origvattr
->va_gid
);
4648 if (my_new_cred
== my_cred
) {
4649 kauth_cred_unref(&my_cred
);
4653 /* update cred on proc */
4656 if (p
->p_ucred
!= my_cred
) {
4657 proc_ucred_unlock(p
);
4658 kauth_cred_unref(&my_new_cred
);
4662 /* donate cred reference on my_new_cred to p->p_ucred */
4663 p
->p_ucred
= my_new_cred
;
4664 PROC_UPDATE_CREDS_ONPROC(p
);
4665 proc_ucred_unlock(p
);
4667 /* drop additional reference that was taken on the previous cred */
4668 kauth_cred_unref(&my_cred
);
4672 #endif /* !SECURE_KERNEL */
4676 * If a policy has indicated that it will transition the label,
4677 * before making the call into the MAC policies, get a new
4678 * duplicate credential, so they can modify it without
4679 * modifying any others sharing it.
4681 if (mac_transition
) {
4683 * This hook may generate upcalls that require
4684 * importance donation from the kernel.
4687 thread_t thread
= current_thread();
4688 thread_enable_send_importance(thread
, TRUE
);
4689 kauth_proc_label_update_execve(p
,
4690 imgp
->ip_vfs_context
,
4692 imgp
->ip_arch_offset
,
4694 imgp
->ip_scriptlabelp
,
4695 imgp
->ip_execlabelp
,
4698 &disjoint_cred
, /* will be non zero if disjoint */
4699 &label_update_return
);
4700 thread_enable_send_importance(thread
, FALSE
);
4702 if (disjoint_cred
) {
4704 * If updating the MAC label resulted in a
4705 * disjoint credential, flag that we need to
4706 * set the P_SUGID bit. This protects
4707 * against debuggers being attached by an
4708 * insufficiently privileged process onto the
4709 * result of a transition to a more privileged
4712 leave_sugid_clear
= 0;
4715 imgp
->ip_mac_return
= label_update_return
;
4718 mac_reset_ipc
= mac_proc_check_inherit_ipc_ports(p
, p
->p_textvp
, p
->p_textoff
, imgp
->ip_vp
, imgp
->ip_arch_offset
, imgp
->ip_scriptvp
);
4720 #endif /* CONFIG_MACF */
4723 * If 'leave_sugid_clear' is non-zero, then we passed the
4724 * VSUID and MACF checks, and successfully determined that
4725 * the previous cred was a member of the VSGID group, but
4726 * that it was not the default at the time of the execve,
4727 * and that the post-labelling credential was not disjoint.
4728 * So we don't set the P_SUGID or reset mach ports and fds
4729 * on the basis of simply running this code.
4731 if (mac_reset_ipc
|| !leave_sugid_clear
) {
4733 * Have mach reset the task and thread ports.
4734 * We don't want anyone who had the ports before
4735 * a setuid exec to be able to access/control the
4736 * task/thread after.
4738 ipc_task_reset((imgp
->ip_new_thread
!= NULL
) ?
4739 get_threadtask(imgp
->ip_new_thread
) : p
->task
);
4740 ipc_thread_reset((imgp
->ip_new_thread
!= NULL
) ?
4741 imgp
->ip_new_thread
: current_thread());
4744 if (!leave_sugid_clear
) {
4746 * Flag the process as setuid.
4748 OSBitOrAtomic(P_SUGID
, &p
->p_flag
);
4751 * Radar 2261856; setuid security hole fix
4752 * XXX For setuid processes, attempt to ensure that
4753 * stdin, stdout, and stderr are already allocated.
4754 * We do not want userland to accidentally allocate
4755 * descriptors in this range which has implied meaning
4758 for (i
= 0; i
< 3; i
++) {
4760 if (p
->p_fd
->fd_ofiles
[i
] != NULL
)
4764 * Do the kernel equivalent of
4767 * (void) open("/dev/null", O_RDONLY);
4769 * (void) open("/dev/null", O_WRONLY);
4772 struct fileproc
*fp
;
4775 struct nameidata
*ndp
= NULL
;
4782 if ((error
= falloc(p
,
4783 &fp
, &indx
, imgp
->ip_vfs_context
)) != 0)
4786 MALLOC(ndp
, struct nameidata
*, sizeof(*ndp
), M_TEMP
, M_WAITOK
| M_ZERO
);
4788 fp_free(p
, indx
, fp
);
4793 NDINIT(ndp
, LOOKUP
, OP_OPEN
, FOLLOW
, UIO_SYSSPACE
,
4794 CAST_USER_ADDR_T("/dev/null"),
4795 imgp
->ip_vfs_context
);
4797 if ((error
= vn_open(ndp
, flag
, 0)) != 0) {
4798 fp_free(p
, indx
, fp
);
4803 struct fileglob
*fg
= fp
->f_fglob
;
4806 fg
->fg_ops
= &vnops
;
4807 fg
->fg_data
= ndp
->ni_vp
;
4809 vnode_put(ndp
->ni_vp
);
4812 procfdtbl_releasefd(p
, indx
, NULL
);
4813 fp_drop(p
, indx
, fp
, 1);
4823 * We are here because we were told that the MAC label will
4824 * be transitioned, and the binary is not VSUID or VSGID; to
4825 * deal with this case, we could either duplicate a lot of
4826 * code, or we can indicate we want to default the P_SUGID
4827 * bit clear and jump back up.
4829 if (mac_transition
) {
4830 leave_sugid_clear
= 1;
4831 goto handle_mac_transition
;
4835 #endif /* CONFIG_MACF */
4838 * Implement the semantic where the effective user and group become
4839 * the saved user and group in exec'ed programs.
4841 * Modifications to p_ucred must be guarded using the
4842 * proc's ucred lock. This prevents others from accessing
4843 * a garbage credential.
4846 my_cred
= kauth_cred_proc_ref(p
);
4847 my_new_cred
= kauth_cred_setsvuidgid(my_cred
, kauth_cred_getuid(my_cred
), kauth_cred_getgid(my_cred
));
4849 if (my_new_cred
== my_cred
) {
4850 kauth_cred_unref(&my_cred
);
4854 /* update cred on proc */
4857 if (p
->p_ucred
!= my_cred
) {
4858 proc_ucred_unlock(p
);
4859 kauth_cred_unref(&my_new_cred
);
4863 /* donate cred reference on my_new_cred to p->p_ucred */
4864 p
->p_ucred
= my_new_cred
;
4865 PROC_UPDATE_CREDS_ONPROC(p
);
4866 proc_ucred_unlock(p
);
4868 /* drop additional reference that was taken on the previous cred */
4869 kauth_cred_unref(&my_cred
);
4875 /* Update the process' identity version and set the security token */
4878 if (imgp
->ip_new_thread
!= NULL
) {
4879 task
= get_threadtask(imgp
->ip_new_thread
);
4883 set_security_token_task_internal(p
, task
);
4892 * Description: Set the user stack address for the process to the provided
4893 * address. If a custom stack was not set as a result of the
4894 * load process (i.e. as specified by the image file for the
4895 * executable), then allocate the stack in the provided map and
4896 * set up appropriate guard pages for enforcing administrative
4897 * limits on stack growth, if they end up being needed.
4899 * Parameters: p Process to set stack on
4900 * load_result Information from mach-o load commands
4901 * map Address map in which to allocate the new stack
4903 * Returns: KERN_SUCCESS Stack successfully created
4904 * !KERN_SUCCESS Mach failure code
4906 static kern_return_t
4907 create_unix_stack(vm_map_t map
, load_result_t
* load_result
,
4910 mach_vm_size_t size
, prot_size
;
4911 mach_vm_offset_t addr
, prot_addr
;
4914 mach_vm_address_t user_stack
= load_result
->user_stack
;
4917 p
->user_stack
= user_stack
;
4920 if (load_result
->user_stack_alloc_size
> 0) {
4922 * Allocate enough space for the maximum stack size we
4923 * will ever authorize and an extra page to act as
4924 * a guard page for stack overflows. For default stacks,
4925 * vm_initial_limit_stack takes care of the extra guard page.
4926 * Otherwise we must allocate it ourselves.
4928 if (mach_vm_round_page_overflow(load_result
->user_stack_alloc_size
, &size
)) {
4929 return KERN_INVALID_ARGUMENT
;
4931 addr
= mach_vm_trunc_page(load_result
->user_stack
- size
);
4932 kr
= mach_vm_allocate_kernel(map
, &addr
, size
,
4933 VM_FLAGS_FIXED
, VM_MEMORY_STACK
);
4934 if (kr
!= KERN_SUCCESS
) {
4935 // Can't allocate at default location, try anywhere
4937 kr
= mach_vm_allocate_kernel(map
, &addr
, size
,
4938 VM_FLAGS_ANYWHERE
, VM_MEMORY_STACK
);
4939 if (kr
!= KERN_SUCCESS
) {
4943 user_stack
= addr
+ size
;
4944 load_result
->user_stack
= user_stack
;
4947 p
->user_stack
= user_stack
;
4951 load_result
->user_stack_alloc
= addr
;
4954 * And prevent access to what's above the current stack
4955 * size limit for this process.
4957 if (load_result
->user_stack_size
== 0) {
4958 load_result
->user_stack_size
= unix_stack_size(p
);
4959 prot_size
= mach_vm_trunc_page(size
- load_result
->user_stack_size
);
4961 prot_size
= PAGE_SIZE
;
4965 kr
= mach_vm_protect(map
,
4970 if (kr
!= KERN_SUCCESS
) {
4971 (void)mach_vm_deallocate(map
, addr
, size
);
4976 return KERN_SUCCESS
;
4979 #include <sys/reboot.h>
4982 * load_init_program_at_path
4984 * Description: Load the "init" program; in most cases, this will be "launchd"
4986 * Parameters: p Process to call execve() to create
4987 * the "init" program
4988 * scratch_addr Page in p, scratch space
4989 * path NULL terminated path
4991 * Returns: KERN_SUCCESS Success
4992 * !KERN_SUCCESS See execve/mac_execve for error codes
4994 * Notes: The process that is passed in is the first manufactured
4995 * process on the system, and gets here via bsd_ast() firing
4996 * for the first time. This is done to ensure that bsd_init()
4997 * has run to completion.
4999 * The address map of the first manufactured process matches the
5000 * word width of the kernel. Once the self-exec completes, the
5001 * initproc might be different.
5004 load_init_program_at_path(proc_t p
, user_addr_t scratch_addr
, const char* path
)
5008 struct execve_args init_exec_args
;
5009 user_addr_t argv0
= USER_ADDR_NULL
, argv1
= USER_ADDR_NULL
;
5012 * Validate inputs and pre-conditions
5015 assert(scratch_addr
);
5019 * Copy out program name.
5021 size_t path_length
= strlen(path
) + 1;
5022 argv0
= scratch_addr
;
5023 error
= copyout(path
, argv0
, path_length
);
5027 scratch_addr
= USER_ADDR_ALIGN(scratch_addr
+ path_length
, sizeof(user_addr_t
));
5030 * Put out first (and only) argument, similarly.
5031 * Assumes everything fits in a page as allocated above.
5033 if (boothowto
& RB_SINGLE
) {
5034 const char *init_args
= "-s";
5035 size_t init_args_length
= strlen(init_args
)+1;
5037 argv1
= scratch_addr
;
5038 error
= copyout(init_args
, argv1
, init_args_length
);
5042 scratch_addr
= USER_ADDR_ALIGN(scratch_addr
+ init_args_length
, sizeof(user_addr_t
));
5045 if (proc_is64bit(p
)) {
5046 user64_addr_t argv64bit
[3];
5048 argv64bit
[0] = argv0
;
5049 argv64bit
[1] = argv1
;
5050 argv64bit
[2] = USER_ADDR_NULL
;
5052 error
= copyout(argv64bit
, scratch_addr
, sizeof(argv64bit
));
5056 user32_addr_t argv32bit
[3];
5058 argv32bit
[0] = (user32_addr_t
)argv0
;
5059 argv32bit
[1] = (user32_addr_t
)argv1
;
5060 argv32bit
[2] = USER_ADDR_NULL
;
5062 error
= copyout(argv32bit
, scratch_addr
, sizeof(argv32bit
));
5068 * Set up argument block for fake call to execve.
5070 init_exec_args
.fname
= argv0
;
5071 init_exec_args
.argp
= scratch_addr
;
5072 init_exec_args
.envp
= USER_ADDR_NULL
;
5075 * So that init task is set with uid,gid 0 token
5077 set_security_token(p
);
5079 return execve(p
, &init_exec_args
, retval
);
5082 static const char * init_programs
[] = {
5084 "/usr/local/sbin/launchd.debug",
5086 #if DEVELOPMENT || DEBUG
5087 "/usr/local/sbin/launchd.development",
5095 * Description: Load the "init" program; in most cases, this will be "launchd"
5097 * Parameters: p Process to call execve() to create
5098 * the "init" program
5102 * Notes: The process that is passed in is the first manufactured
5103 * process on the system, and gets here via bsd_ast() firing
5104 * for the first time. This is done to ensure that bsd_init()
5105 * has run to completion.
5107 * In DEBUG & DEVELOPMENT builds, the launchdsuffix boot-arg
5108 * may be used to select a specific launchd executable. As with
5109 * the kcsuffix boot-arg, setting launchdsuffix to "" or "release"
5110 * will force /sbin/launchd to be selected.
5112 * Search order by build:
5114 * DEBUG DEVELOPMENT RELEASE PATH
5115 * ----------------------------------------------------------------------------------
5116 * 1 1 NA /usr/local/sbin/launchd.$LAUNCHDSUFFIX
5117 * 2 NA NA /usr/local/sbin/launchd.debug
5118 * 3 2 NA /usr/local/sbin/launchd.development
5119 * 4 3 1 /sbin/launchd
5122 load_init_program(proc_t p
)
5126 vm_map_t map
= current_map();
5127 mach_vm_offset_t scratch_addr
= 0;
5128 mach_vm_size_t map_page_size
= vm_map_page_size(map
);
5130 (void) mach_vm_allocate_kernel(map
, &scratch_addr
, map_page_size
, VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_NONE
);
5131 #if CONFIG_MEMORYSTATUS
5132 (void) memorystatus_init_at_boot_snapshot();
5133 #endif /* CONFIG_MEMORYSTATUS */
5135 #if DEBUG || DEVELOPMENT
5136 /* Check for boot-arg suffix first */
5137 char launchd_suffix
[64];
5138 if (PE_parse_boot_argn("launchdsuffix", launchd_suffix
, sizeof(launchd_suffix
))) {
5139 char launchd_path
[128];
5140 boolean_t is_release_suffix
= ((launchd_suffix
[0] == 0) ||
5141 (strcmp(launchd_suffix
, "release") == 0));
5143 if (is_release_suffix
) {
5144 printf("load_init_program: attempting to load /sbin/launchd\n");
5145 error
= load_init_program_at_path(p
, (user_addr_t
)scratch_addr
, "/sbin/launchd");
5149 panic("Process 1 exec of launchd.release failed, errno %d", error
);
5151 strlcpy(launchd_path
, "/usr/local/sbin/launchd.", sizeof(launchd_path
));
5152 strlcat(launchd_path
, launchd_suffix
, sizeof(launchd_path
));
5154 printf("load_init_program: attempting to load %s\n", launchd_path
);
5155 error
= load_init_program_at_path(p
, (user_addr_t
)scratch_addr
, launchd_path
);
5159 printf("load_init_program: failed loading %s: errno %d\n", launchd_path
, error
);
5166 for (i
= 0; i
< sizeof(init_programs
)/sizeof(init_programs
[0]); i
++) {
5167 printf("load_init_program: attempting to load %s\n", init_programs
[i
]);
5168 error
= load_init_program_at_path(p
, (user_addr_t
)scratch_addr
, init_programs
[i
]);
5172 printf("load_init_program: failed loading %s: errno %d\n", init_programs
[i
], error
);
5176 panic("Process 1 exec of %s failed, errno %d", ((i
== 0) ? "<null>" : init_programs
[i
-1]), error
);
5180 * load_return_to_errno
5182 * Description: Convert a load_return_t (Mach error) to an errno (BSD error)
5184 * Parameters: lrtn Mach error number
5186 * Returns: (int) BSD error number
5188 * EBADARCH Bad architecture
5189 * EBADMACHO Bad Mach object file
5190 * ESHLIBVERS Bad shared library version
5191 * ENOMEM Out of memory/resource shortage
5192 * EACCES Access denied
5193 * ENOENT Entry not found (usually "file does
5195 * EIO An I/O error occurred
5196 * EBADEXEC The executable is corrupt/unknown
5199 load_return_to_errno(load_return_t lrtn
)
5207 case LOAD_BADMACHO_UPX
:
5221 case LOAD_DECRYPTFAIL
:
5227 #include <mach/mach_types.h>
5228 #include <mach/vm_prot.h>
5229 #include <mach/semaphore.h>
5230 #include <mach/sync_policy.h>
5231 #include <kern/clock.h>
5232 #include <mach/kern_return.h>
5237 * Description: Allocate the block of memory used by the execve arguments.
5238 * At the same time, we allocate a page so that we can read in
5239 * the first page of the image.
5241 * Parameters: struct image_params * the image parameter block
5243 * Returns: 0 Success
5244 * EINVAL Invalid argument
5245 * EACCES Permission denied
5246 * EINTR Interrupted function
5247 * ENOMEM Not enough space
5249 * Notes: This is a temporary allocation into the kernel address space
5250 * to enable us to copy arguments in from user space. This is
5251 * necessitated by not mapping the process calling execve() into
5252 * the kernel address space during the execve() system call.
5254 * We assemble the argument and environment, etc., into this
5255 * region before copying it as a single block into the child
5256 * process address space (at the top or bottom of the stack,
5257 * depending on which way the stack grows; see the function
5258 * exec_copyout_strings() for details).
5260 * This ends up with a second (possibly unnecessary) copy compared
5261 * with assembing the data directly into the child address space,
5262 * instead, but since we cannot be guaranteed that the parent has
5263 * not modified its environment, we can't really know that it's
5264 * really a block there as well.
5268 static int execargs_waiters
= 0;
5269 lck_mtx_t
*execargs_cache_lock
;
5272 execargs_lock_lock(void) {
5273 lck_mtx_lock_spin(execargs_cache_lock
);
5277 execargs_lock_unlock(void) {
5278 lck_mtx_unlock(execargs_cache_lock
);
5281 static wait_result_t
5282 execargs_lock_sleep(void) {
5283 return(lck_mtx_sleep(execargs_cache_lock
, LCK_SLEEP_DEFAULT
, &execargs_free_count
, THREAD_INTERRUPTIBLE
));
5286 static kern_return_t
5287 execargs_purgeable_allocate(char **execarg_address
) {
5288 kern_return_t kr
= vm_allocate_kernel(bsd_pageable_map
, (vm_offset_t
*)execarg_address
, BSD_PAGEABLE_SIZE_PER_EXEC
, VM_FLAGS_ANYWHERE
| VM_FLAGS_PURGABLE
, VM_KERN_MEMORY_NONE
);
5289 assert(kr
== KERN_SUCCESS
);
5293 static kern_return_t
5294 execargs_purgeable_reference(void *execarg_address
) {
5295 int state
= VM_PURGABLE_NONVOLATILE
;
5296 kern_return_t kr
= vm_purgable_control(bsd_pageable_map
, (vm_offset_t
) execarg_address
, VM_PURGABLE_SET_STATE
, &state
);
5298 assert(kr
== KERN_SUCCESS
);
5302 static kern_return_t
5303 execargs_purgeable_volatilize(void *execarg_address
) {
5304 int state
= VM_PURGABLE_VOLATILE
| VM_PURGABLE_ORDERING_OBSOLETE
;
5306 kr
= vm_purgable_control(bsd_pageable_map
, (vm_offset_t
) execarg_address
, VM_PURGABLE_SET_STATE
, &state
);
5308 assert(kr
== KERN_SUCCESS
);
5314 execargs_wakeup_waiters(void) {
5315 thread_wakeup(&execargs_free_count
);
5319 execargs_alloc(struct image_params
*imgp
)
5323 int i
, cache_index
= -1;
5325 execargs_lock_lock();
5327 while (execargs_free_count
== 0) {
5329 res
= execargs_lock_sleep();
5331 if (res
!= THREAD_AWAKENED
) {
5332 execargs_lock_unlock();
5337 execargs_free_count
--;
5339 for (i
= 0; i
< execargs_cache_size
; i
++) {
5340 vm_offset_t element
= execargs_cache
[i
];
5343 imgp
->ip_strings
= (char *)(execargs_cache
[i
]);
5344 execargs_cache
[i
] = 0;
5349 assert(execargs_free_count
>= 0);
5351 execargs_lock_unlock();
5353 if (cache_index
== -1) {
5354 kret
= execargs_purgeable_allocate(&imgp
->ip_strings
);
5357 kret
= execargs_purgeable_reference(imgp
->ip_strings
);
5359 assert(kret
== KERN_SUCCESS
);
5360 if (kret
!= KERN_SUCCESS
) {
5364 /* last page used to read in file headers */
5365 imgp
->ip_vdata
= imgp
->ip_strings
+ ( NCARGS
+ PAGE_SIZE
);
5366 imgp
->ip_strendp
= imgp
->ip_strings
;
5367 imgp
->ip_argspace
= NCARGS
;
5368 imgp
->ip_strspace
= ( NCARGS
+ PAGE_SIZE
);
5376 * Description: Free the block of memory used by the execve arguments and the
5377 * first page of the executable by a previous call to the function
5380 * Parameters: struct image_params * the image parameter block
5382 * Returns: 0 Success
5383 * EINVAL Invalid argument
5384 * EINTR Oeration interrupted
5387 execargs_free(struct image_params
*imgp
)
5391 boolean_t needs_wakeup
= FALSE
;
5393 kret
= execargs_purgeable_volatilize(imgp
->ip_strings
);
5395 execargs_lock_lock();
5396 execargs_free_count
++;
5398 for (i
= 0; i
< execargs_cache_size
; i
++) {
5399 vm_offset_t element
= execargs_cache
[i
];
5401 execargs_cache
[i
] = (vm_offset_t
) imgp
->ip_strings
;
5402 imgp
->ip_strings
= NULL
;
5407 assert(imgp
->ip_strings
== NULL
);
5409 if (execargs_waiters
> 0)
5410 needs_wakeup
= TRUE
;
5412 execargs_lock_unlock();
5414 if (needs_wakeup
== TRUE
)
5415 execargs_wakeup_waiters();
5417 return ((kret
== KERN_SUCCESS
? 0 : EINVAL
));
5421 exec_resettextvp(proc_t p
, struct image_params
*imgp
)
5425 vnode_t tvp
= p
->p_textvp
;
5429 offset
= imgp
->ip_arch_offset
;
5432 panic("exec_resettextvp: expected valid vp");
5434 ret
= vnode_ref(vp
);
5438 p
->p_textoff
= offset
;
5440 p
->p_textvp
= NULLVP
; /* this is paranoia */
5445 if ( tvp
!= NULLVP
) {
5446 if (vnode_getwithref(tvp
) == 0) {
5454 // Includes the 0-byte (therefore "SIZE" instead of "LEN").
5455 static const size_t CS_CDHASH_STRING_SIZE
= CS_CDHASH_LEN
* 2 + 1;
5457 static void cdhash_to_string(char str
[CS_CDHASH_STRING_SIZE
], uint8_t const * const cdhash
) {
5458 static char const nibble
[] = "0123456789abcdef";
5460 /* Apparently still the safest way to get a hex representation
5462 * xnu's printf routines have %*D/%20D in theory, but "not really", see:
5463 * <rdar://problem/33328859> confusion around %*D/%nD in printf
5465 for (int i
= 0; i
< CS_CDHASH_LEN
; ++i
) {
5466 str
[i
*2] = nibble
[(cdhash
[i
] & 0xf0) >> 4];
5467 str
[i
*2+1] = nibble
[cdhash
[i
] & 0x0f];
5469 str
[CS_CDHASH_STRING_SIZE
- 1] = 0;
5473 * If the process is not signed or if it contains entitlements, we
5474 * need to communicate through the task_access_port to taskgated.
5476 * taskgated will provide a detached code signature if present, and
5477 * will enforce any restrictions on entitlements.
5481 taskgated_required(proc_t p
, boolean_t
*require_success
)
5488 csvnode_print_debug(p
->p_textvp
);
5490 #if !CONFIG_EMBEDDED
5491 const int can_skip_taskgated
= csproc_get_platform_binary(p
) && !csproc_get_platform_path(p
);
5493 const int can_skip_taskgated
= csproc_get_platform_binary(p
);
5495 if (can_skip_taskgated
) {
5496 if (cs_debug
) printf("taskgated not required for: %s\n", p
->p_name
);
5497 *require_success
= FALSE
;
5501 if ((p
->p_csflags
& CS_VALID
) == 0) {
5502 *require_success
= FALSE
;
5506 error
= cs_entitlements_blob_get(p
, &blob
, &length
);
5507 if (error
== 0 && blob
!= NULL
) {
5508 #if !CONFIG_EMBEDDED
5510 * fatal on the desktop when entitlements are present,
5511 * unless we started in single-user mode
5513 if ((boothowto
& RB_SINGLE
) == 0)
5514 *require_success
= TRUE
;
5516 * Allow initproc to run without causing taskgated to launch
5518 if (p
== initproc
) {
5519 *require_success
= FALSE
;
5524 if (cs_debug
) printf("taskgated required for: %s\n", p
->p_name
);
5529 *require_success
= FALSE
;
5534 * __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__
5536 * Description: Waits for the userspace daemon to respond to the request
5537 * we made. Function declared non inline to be visible in
5538 * stackshots and spindumps as well as debugging.
5540 __attribute__((noinline
)) int
5541 __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(mach_port_t task_access_port
, int32_t new_pid
)
5543 return find_code_signature(task_access_port
, new_pid
);
5547 check_for_signature(proc_t p
, struct image_params
*imgp
)
5549 mach_port_t port
= NULL
;
5550 kern_return_t kr
= KERN_FAILURE
;
5552 boolean_t unexpected_failure
= FALSE
;
5553 unsigned char hash
[CS_CDHASH_LEN
];
5554 boolean_t require_success
= FALSE
;
5555 int spawn
= (imgp
->ip_flags
& IMGPF_SPAWN
);
5556 int vfexec
= (imgp
->ip_flags
& IMGPF_VFORK_EXEC
);
5557 os_reason_t signature_failure_reason
= OS_REASON_NULL
;
5560 * Override inherited code signing flags with the
5561 * ones for the process that is being successfully
5565 p
->p_csflags
= imgp
->ip_csflags
;
5568 /* Set the switch_protect flag on the map */
5569 if(p
->p_csflags
& (CS_HARD
|CS_KILL
)) {
5570 vm_map_switch_protect(get_task_map(p
->task
), TRUE
);
5574 * image activation may be failed due to policy
5575 * which is unexpected but security framework does not
5576 * approve of exec, kill and return immediately.
5578 if (imgp
->ip_mac_return
!= 0) {
5580 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
5581 p
->p_pid
, OS_REASON_EXEC
, EXEC_EXIT_REASON_SECURITY_POLICY
, 0, 0);
5582 signature_failure_reason
= os_reason_create(OS_REASON_EXEC
, EXEC_EXIT_REASON_SECURITY_POLICY
);
5583 error
= imgp
->ip_mac_return
;
5584 unexpected_failure
= TRUE
;
5588 if (imgp
->ip_cs_error
!= OS_REASON_NULL
) {
5589 signature_failure_reason
= imgp
->ip_cs_error
;
5590 imgp
->ip_cs_error
= OS_REASON_NULL
;
5595 /* check if callout to taskgated is needed */
5596 if (!taskgated_required(p
, &require_success
)) {
5601 kr
= task_get_task_access_port(p
->task
, &port
);
5602 if (KERN_SUCCESS
!= kr
|| !IPC_PORT_VALID(port
)) {
5604 if (require_success
) {
5605 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
5606 p
->p_pid
, OS_REASON_CODESIGNING
, CODESIGNING_EXIT_REASON_TASK_ACCESS_PORT
, 0, 0);
5607 signature_failure_reason
= os_reason_create(OS_REASON_CODESIGNING
, CODESIGNING_EXIT_REASON_TASK_ACCESS_PORT
);
5614 * taskgated returns KERN_SUCCESS if it has completed its work
5615 * and the exec should continue, KERN_FAILURE if the exec should
5616 * fail, or it may error out with different error code in an
5617 * event of mig failure (e.g. process was signalled during the
5618 * rpc call, taskgated died, mig server died etc.).
5621 kr
= __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(port
, p
->p_pid
);
5629 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
5630 p
->p_pid
, OS_REASON_CODESIGNING
, CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG
, 0, 0);
5631 signature_failure_reason
= os_reason_create(OS_REASON_CODESIGNING
, CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG
);
5636 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
5637 p
->p_pid
, OS_REASON_EXEC
, EXEC_EXIT_REASON_TASKGATED_OTHER
, 0, 0);
5638 signature_failure_reason
= os_reason_create(OS_REASON_EXEC
, EXEC_EXIT_REASON_TASKGATED_OTHER
);
5639 unexpected_failure
= TRUE
;
5643 /* Only do this if exec_resettextvp() did not fail */
5644 if (p
->p_textvp
!= NULLVP
) {
5646 * If there's a new code directory, mark this process
5649 if (0 == ubc_cs_getcdhash(p
->p_textvp
, p
->p_textoff
, hash
)) {
5651 p
->p_csflags
|= CS_VALID
;
5658 /* The process's code signature related properties are
5659 * fully set up, so this is an opportune moment to log
5660 * platform binary execution, if desired. */
5661 if (platform_exec_logging
!= 0 && csproc_get_platform_binary(p
)) {
5662 uint8_t cdhash
[CS_CDHASH_LEN
];
5663 char cdhash_string
[CS_CDHASH_STRING_SIZE
];
5664 proc_getcdhash(p
, cdhash
);
5665 cdhash_to_string(cdhash_string
, cdhash
);
5667 os_log(peLog
, "CS Platform Exec Logging: Executing platform signed binary "
5668 "'%s' with cdhash %s\n", p
->p_name
, cdhash_string
);
5671 if (!unexpected_failure
)
5672 p
->p_csflags
|= CS_KILLED
;
5673 /* make very sure execution fails */
5674 if (vfexec
|| spawn
) {
5675 assert(signature_failure_reason
!= OS_REASON_NULL
);
5676 psignal_vfork_with_reason(p
, p
->task
, imgp
->ip_new_thread
,
5677 SIGKILL
, signature_failure_reason
);
5678 signature_failure_reason
= OS_REASON_NULL
;
5681 assert(signature_failure_reason
!= OS_REASON_NULL
);
5682 psignal_with_reason(p
, SIGKILL
, signature_failure_reason
);
5683 signature_failure_reason
= OS_REASON_NULL
;
5687 /* If we hit this, we likely would have leaked an exit reason */
5688 assert(signature_failure_reason
== OS_REASON_NULL
);
5693 * Typically as soon as we start executing this process, the
5694 * first instruction will trigger a VM fault to bring the text
5695 * pages (as executable) into the address space, followed soon
5696 * thereafter by dyld data structures (for dynamic executable).
5697 * To optimize this, as well as improve support for hardware
5698 * debuggers that can only access resident pages present
5699 * in the process' page tables, we prefault some pages if
5700 * possible. Errors are non-fatal.
5702 static void exec_prefault_data(proc_t p __unused
, struct image_params
*imgp
, load_result_t
*load_result
)
5705 size_t expected_all_image_infos_size
;
5708 * Prefault executable or dyld entry point.
5710 vm_fault(current_map(),
5711 vm_map_trunc_page(load_result
->entry_point
,
5712 vm_map_page_mask(current_map())),
5713 VM_PROT_READ
| VM_PROT_EXECUTE
,
5714 FALSE
, VM_KERN_MEMORY_NONE
,
5715 THREAD_UNINT
, NULL
, 0);
5717 if (imgp
->ip_flags
& IMGPF_IS_64BIT
) {
5718 expected_all_image_infos_size
= sizeof(struct user64_dyld_all_image_infos
);
5720 expected_all_image_infos_size
= sizeof(struct user32_dyld_all_image_infos
);
5723 /* Decode dyld anchor structure from <mach-o/dyld_images.h> */
5724 if (load_result
->dynlinker
&&
5725 load_result
->all_image_info_addr
&&
5726 load_result
->all_image_info_size
>= expected_all_image_infos_size
) {
5728 struct user64_dyld_all_image_infos infos64
;
5729 struct user32_dyld_all_image_infos infos32
;
5733 * Pre-fault to avoid copyin() going through the trap handler
5734 * and recovery path.
5736 vm_fault(current_map(),
5737 vm_map_trunc_page(load_result
->all_image_info_addr
,
5738 vm_map_page_mask(current_map())),
5739 VM_PROT_READ
| VM_PROT_WRITE
,
5740 FALSE
, VM_KERN_MEMORY_NONE
,
5741 THREAD_UNINT
, NULL
, 0);
5742 if ((load_result
->all_image_info_addr
& PAGE_MASK
) + expected_all_image_infos_size
> PAGE_SIZE
) {
5743 /* all_image_infos straddles a page */
5744 vm_fault(current_map(),
5745 vm_map_trunc_page(load_result
->all_image_info_addr
+ expected_all_image_infos_size
- 1,
5746 vm_map_page_mask(current_map())),
5747 VM_PROT_READ
| VM_PROT_WRITE
,
5748 FALSE
, VM_KERN_MEMORY_NONE
,
5749 THREAD_UNINT
, NULL
, 0);
5752 ret
= copyin(load_result
->all_image_info_addr
,
5754 expected_all_image_infos_size
);
5755 if (ret
== 0 && all_image_infos
.infos32
.version
>= DYLD_ALL_IMAGE_INFOS_ADDRESS_MINIMUM_VERSION
) {
5757 user_addr_t notification_address
;
5758 user_addr_t dyld_image_address
;
5759 user_addr_t dyld_version_address
;
5760 user_addr_t dyld_all_image_infos_address
;
5761 user_addr_t dyld_slide_amount
;
5763 if (imgp
->ip_flags
& IMGPF_IS_64BIT
) {
5764 notification_address
= all_image_infos
.infos64
.notification
;
5765 dyld_image_address
= all_image_infos
.infos64
.dyldImageLoadAddress
;
5766 dyld_version_address
= all_image_infos
.infos64
.dyldVersion
;
5767 dyld_all_image_infos_address
= all_image_infos
.infos64
.dyldAllImageInfosAddress
;
5769 notification_address
= all_image_infos
.infos32
.notification
;
5770 dyld_image_address
= all_image_infos
.infos32
.dyldImageLoadAddress
;
5771 dyld_version_address
= all_image_infos
.infos32
.dyldVersion
;
5772 dyld_all_image_infos_address
= all_image_infos
.infos32
.dyldAllImageInfosAddress
;
5776 * dyld statically sets up the all_image_infos in its Mach-O
5777 * binary at static link time, with pointers relative to its default
5778 * load address. Since ASLR might slide dyld before its first
5779 * instruction is executed, "dyld_slide_amount" tells us how far
5780 * dyld was loaded compared to its default expected load address.
5781 * All other pointers into dyld's image should be adjusted by this
5782 * amount. At some point later, dyld will fix up pointers to take
5783 * into account the slide, at which point the all_image_infos_address
5784 * field in the structure will match the runtime load address, and
5785 * "dyld_slide_amount" will be 0, if we were to consult it again.
5788 dyld_slide_amount
= load_result
->all_image_info_addr
- dyld_all_image_infos_address
;
5791 kprintf("exec_prefault: 0x%016llx 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
5792 (uint64_t)load_result
->all_image_info_addr
,
5793 all_image_infos
.infos32
.version
,
5794 (uint64_t)notification_address
,
5795 (uint64_t)dyld_image_address
,
5796 (uint64_t)dyld_version_address
,
5797 (uint64_t)dyld_all_image_infos_address
);
5800 vm_fault(current_map(),
5801 vm_map_trunc_page(notification_address
+ dyld_slide_amount
,
5802 vm_map_page_mask(current_map())),
5803 VM_PROT_READ
| VM_PROT_EXECUTE
,
5804 FALSE
, VM_KERN_MEMORY_NONE
,
5805 THREAD_UNINT
, NULL
, 0);
5806 vm_fault(current_map(),
5807 vm_map_trunc_page(dyld_image_address
+ dyld_slide_amount
,
5808 vm_map_page_mask(current_map())),
5809 VM_PROT_READ
| VM_PROT_EXECUTE
,
5810 FALSE
, VM_KERN_MEMORY_NONE
,
5811 THREAD_UNINT
, NULL
, 0);
5812 vm_fault(current_map(),
5813 vm_map_trunc_page(dyld_version_address
+ dyld_slide_amount
,
5814 vm_map_page_mask(current_map())),
5816 FALSE
, VM_KERN_MEMORY_NONE
,
5817 THREAD_UNINT
, NULL
, 0);
5818 vm_fault(current_map(),
5819 vm_map_trunc_page(dyld_all_image_infos_address
+ dyld_slide_amount
,
5820 vm_map_page_mask(current_map())),
5821 VM_PROT_READ
| VM_PROT_WRITE
,
5822 FALSE
, VM_KERN_MEMORY_NONE
,
5823 THREAD_UNINT
, NULL
, 0);