2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Mach Operating System
31 * Copyright (c) 1987 Carnegie-Mellon University
32 * All rights reserved. The CMU software License Agreement specifies
33 * the terms and conditions for use and redistribution.
39 * Copyright (c) 1982, 1986, 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 * (c) UNIX System Laboratories, Inc.
42 * All or some portions of this file are derived from material licensed
43 * to the University of California by American Telephone and Telegraph
44 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
45 * the permission of UNIX System Laboratories, Inc.
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
50 * 1. Redistributions of source code must retain the above copyright
51 * notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 * notice, this list of conditions and the following disclaimer in the
54 * documentation and/or other materials provided with the distribution.
55 * 3. All advertising materials mentioning features or use of this software
56 * must display the following acknowledgement:
57 * This product includes software developed by the University of
58 * California, Berkeley and its contributors.
59 * 4. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75 * from: @(#)kern_exec.c 8.1 (Berkeley) 6/10/93
78 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
79 * support for mandatory and extensible security protections. This notice
80 * is included in support of clause 2.2 (b) of the Apple Public License,
83 #include <machine/reg.h>
84 #include <machine/cpu_capabilities.h>
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/filedesc.h>
89 #include <sys/kernel.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
93 #include <sys/socketvar.h>
94 #include <sys/malloc.h>
95 #include <sys/namei.h>
96 #include <sys/mount_internal.h>
97 #include <sys/vnode_internal.h>
98 #include <sys/file_internal.h>
100 #include <sys/uio_internal.h>
101 #include <sys/acct.h>
102 #include <sys/exec.h>
103 #include <sys/kdebug.h>
104 #include <sys/signal.h>
105 #include <sys/aio_kern.h>
106 #include <sys/sysproto.h>
108 #include <sys/shm_internal.h> /* shmexec() */
110 #include <sys/ubc_internal.h> /* ubc_map() */
111 #include <sys/spawn.h>
112 #include <sys/spawn_internal.h>
113 #include <sys/codesign.h>
114 #include <crypto/sha1.h>
116 #include <security/audit/audit.h>
118 #include <ipc/ipc_types.h>
120 #include <mach/mach_types.h>
121 #include <mach/port.h>
122 #include <mach/task.h>
123 #include <mach/task_access.h>
124 #include <mach/thread_act.h>
125 #include <mach/vm_map.h>
126 #include <mach/mach_vm.h>
127 #include <mach/vm_param.h>
129 #include <kern/sched_prim.h> /* thread_wakeup() */
130 #include <kern/affinity.h>
131 #include <kern/assert.h>
132 #include <kern/task.h>
135 #include <security/mac.h>
136 #include <security/mac_mach_internal.h>
139 #include <vm/vm_map.h>
140 #include <vm/vm_kern.h>
141 #include <vm/vm_protos.h>
142 #include <vm/vm_kern.h>
143 #include <vm/vm_fault.h>
145 #include <kdp/kdp_dyld.h>
147 #include <machine/pal_routines.h>
149 #include <pexpert/pexpert.h>
151 #if CONFIG_MEMORYSTATUS
152 #include <sys/kern_memorystatus.h>
156 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
157 extern void (*dtrace_fasttrap_exec_ptr
)(proc_t
);
158 extern void (*dtrace_helpers_cleanup
)(proc_t
);
159 extern void dtrace_lazy_dofs_destroy(proc_t
);
161 #include <sys/dtrace_ptss.h>
164 /* support for child creation in exec after vfork */
165 thread_t
fork_create_child(task_t parent_task
, proc_t child_proc
, int inherit_memory
, int is64bit
);
166 void vfork_exit(proc_t p
, int rv
);
167 int setsigvec(proc_t
, thread_t
, int, struct __kern_sigaction
*, boolean_t in_sigstart
);
168 extern void proc_apply_task_networkbg_internal(proc_t
, thread_t
);
169 int task_set_cpuusage(task_t task
, uint64_t percentage
, uint64_t interval
, uint64_t deadline
, int scope
);
172 * Mach things for which prototypes are unavailable from Mach headers
176 void ipc_thread_reset(
178 kern_return_t
ipc_object_copyin(
180 mach_port_name_t name
,
181 mach_msg_type_name_t msgt_name
,
182 ipc_object_t
*objectp
);
183 void ipc_port_release_send(ipc_port_t
);
185 extern struct savearea
*get_user_regs(thread_t
);
188 #include <kern/thread.h>
189 #include <kern/task.h>
190 #include <kern/ast.h>
191 #include <kern/mach_loader.h>
192 #include <kern/mach_fat.h>
193 #include <mach-o/fat.h>
194 #include <mach-o/loader.h>
195 #include <machine/vmparam.h>
196 #include <sys/imgact.h>
202 * EAI_ITERLIMIT The maximum number of times to iterate an image
203 * activator in exec_activate_image() before treating
204 * it as malformed/corrupt.
206 #define EAI_ITERLIMIT 10
209 * For #! interpreter parsing
211 #define IS_WHITESPACE(ch) ((ch == ' ') || (ch == '\t'))
212 #define IS_EOL(ch) ((ch == '#') || (ch == '\n'))
214 extern vm_map_t bsd_pageable_map
;
215 extern struct fileops vnops
;
217 #define ROUND_PTR(type, addr) \
218 (type *)( ( (uintptr_t)(addr) + 16 - 1) \
221 struct image_params
; /* Forward */
222 static int exec_activate_image(struct image_params
*imgp
);
223 static int exec_copyout_strings(struct image_params
*imgp
, user_addr_t
*stackp
);
224 static int load_return_to_errno(load_return_t lrtn
);
225 static int execargs_alloc(struct image_params
*imgp
);
226 static int execargs_free(struct image_params
*imgp
);
227 static int exec_check_permissions(struct image_params
*imgp
);
228 static int exec_extract_strings(struct image_params
*imgp
);
229 static int exec_add_apple_strings(struct image_params
*imgp
);
230 static int exec_handle_sugid(struct image_params
*imgp
);
231 static int sugid_scripts
= 0;
232 SYSCTL_INT (_kern
, OID_AUTO
, sugid_scripts
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &sugid_scripts
, 0, "");
233 static kern_return_t
create_unix_stack(vm_map_t map
, load_result_t
* load_result
, proc_t p
);
234 static int copyoutptr(user_addr_t ua
, user_addr_t ptr
, int ptr_size
);
235 static void exec_resettextvp(proc_t
, struct image_params
*);
236 static int check_for_signature(proc_t
, struct image_params
*);
237 static void exec_prefault_data(proc_t
, struct image_params
*, load_result_t
*);
241 /* Identify process during exec and opt into legacy behaviors */
243 struct legacy_behavior
{
245 uint32_t legacy_mask
;
248 static const struct legacy_behavior legacy_behaviors
[] =
250 {{ 0xF8, 0x7C, 0xC3, 0x67, 0xFB, 0x68, 0x37, 0x93, 0xBC, 0x34, 0xB2, 0xB6, 0x05, 0x2B, 0xCD, 0xE2 }, PROC_LEGACY_BEHAVIOR_IOTHROTTLE
},
251 {{ 0x0B, 0x4E, 0xDF, 0xD8, 0x76, 0xD1, 0x3D, 0x4D, 0x9D, 0xD7, 0x37, 0x43, 0x1C, 0xA8, 0xFB, 0x26 }, PROC_LEGACY_BEHAVIOR_IOTHROTTLE
},
253 #endif /* !CONFIG_EMBEDDED */
255 /* We don't want this one exported */
257 int open1(vfs_context_t
, struct nameidata
*, int, struct vnode_attr
*, int32_t *);
260 * exec_add_user_string
262 * Add the requested string to the string space area.
264 * Parameters; struct image_params * image parameter block
265 * user_addr_t string to add to strings area
266 * int segment from which string comes
267 * boolean_t TRUE if string contributes to NCARGS
270 * !0 Failure errno from copyinstr()
273 * (imgp->ip_strendp) updated location of next add, if any
274 * (imgp->ip_strspace) updated byte count of space remaining
275 * (imgp->ip_argspace) updated byte count of space in NCARGS
278 exec_add_user_string(struct image_params
*imgp
, user_addr_t str
, int seg
, boolean_t is_ncargs
)
287 space
= imgp
->ip_argspace
; /* by definition smaller than ip_strspace */
289 space
= imgp
->ip_strspace
;
296 if (!UIO_SEG_IS_USER_SPACE(seg
)) {
297 char *kstr
= CAST_DOWN(char *,str
); /* SAFE */
298 error
= copystr(kstr
, imgp
->ip_strendp
, space
, &len
);
300 error
= copyinstr(str
, imgp
->ip_strendp
, space
, &len
);
303 imgp
->ip_strendp
+= len
;
304 imgp
->ip_strspace
-= len
;
306 imgp
->ip_argspace
-= len
;
308 } while (error
== ENAMETOOLONG
);
316 * To support new app package launching for Mac OS X, the dyld needs the
317 * first argument to execve() stored on the user stack.
319 * Save the executable path name at the bottom of the strings area and set
320 * the argument vector pointer to the location following that to indicate
321 * the start of the argument and environment tuples, setting the remaining
322 * string space count to the size of the string area minus the path length.
324 * Parameters; struct image_params * image parameter block
325 * char * path used to invoke program
326 * int segment from which path comes
328 * Returns: int 0 Success
330 * copy[in]str:EFAULT Bad address
331 * copy[in]str:ENAMETOOLONG Filename too long
334 * (imgp->ip_strings) saved path
335 * (imgp->ip_strspace) space remaining in ip_strings
336 * (imgp->ip_strendp) start of remaining copy area
337 * (imgp->ip_argspace) space remaining of NCARGS
338 * (imgp->ip_applec) Initial applev[0]
340 * Note: We have to do this before the initial namei() since in the
341 * path contains symbolic links, namei() will overwrite the
342 * original path buffer contents. If the last symbolic link
343 * resolved was a relative pathname, we would lose the original
344 * "path", which could be an absolute pathname. This might be
345 * unacceptable for dyld.
348 exec_save_path(struct image_params
*imgp
, user_addr_t path
, int seg
)
354 len
= MIN(MAXPATHLEN
, imgp
->ip_strspace
);
357 case UIO_USERSPACE32
:
358 case UIO_USERSPACE64
: /* Same for copyin()... */
359 error
= copyinstr(path
, imgp
->ip_strings
, len
, &len
);
362 kpath
= CAST_DOWN(char *,path
); /* SAFE */
363 error
= copystr(kpath
, imgp
->ip_strings
, len
, &len
);
371 imgp
->ip_strendp
+= len
;
372 imgp
->ip_strspace
-= len
;
379 * exec_reset_save_path
381 * If we detect a shell script, we need to reset the string area
382 * state so that the interpreter can be saved onto the stack.
384 * Parameters; struct image_params * image parameter block
386 * Returns: int 0 Success
389 * (imgp->ip_strings) saved path
390 * (imgp->ip_strspace) space remaining in ip_strings
391 * (imgp->ip_strendp) start of remaining copy area
392 * (imgp->ip_argspace) space remaining of NCARGS
396 exec_reset_save_path(struct image_params
*imgp
)
398 imgp
->ip_strendp
= imgp
->ip_strings
;
399 imgp
->ip_argspace
= NCARGS
;
400 imgp
->ip_strspace
= ( NCARGS
+ PAGE_SIZE
);
408 * Image activator for interpreter scripts. If the image begins with the
409 * characters "#!", then it is an interpreter script. Verify that we are
410 * not already executing in PowerPC mode, and that the length of the script
411 * line indicating the interpreter is not in excess of the maximum allowed
412 * size. If this is the case, then break out the arguments, if any, which
413 * are separated by white space, and copy them into the argument save area
414 * as if they were provided on the command line before all other arguments.
415 * The line ends when we encounter a comment character ('#') or newline.
417 * Parameters; struct image_params * image parameter block
419 * Returns: -1 not an interpreter (keep looking)
420 * -3 Success: interpreter: relookup
421 * >0 Failure: interpreter: error number
423 * A return value other than -1 indicates subsequent image activators should
424 * not be given the opportunity to attempt to activate the image.
427 exec_shell_imgact(struct image_params
*imgp
)
429 char *vdata
= imgp
->ip_vdata
;
431 char *line_startp
, *line_endp
;
439 * Make sure it's a shell script. If we've already redirected
440 * from an interpreted file once, don't do it again.
442 * Note: We disallow PowerPC, since the expectation is that we
443 * may run a PowerPC interpreter, but not an interpret a PowerPC
444 * image. This is consistent with historical behaviour.
446 if (vdata
[0] != '#' ||
448 (imgp
->ip_flags
& IMGPF_INTERPRET
) != 0) {
452 imgp
->ip_flags
|= IMGPF_INTERPRET
;
453 imgp
->ip_interp_sugid_fd
= -1;
454 imgp
->ip_interp_buffer
[0] = '\0';
456 /* Check to see if SUGID scripts are permitted. If they aren't then
457 * clear the SUGID bits.
458 * imgp->ip_vattr is known to be valid.
460 if (sugid_scripts
== 0) {
461 imgp
->ip_origvattr
->va_mode
&= ~(VSUID
| VSGID
);
464 /* Try to find the first non-whitespace character */
465 for( ihp
= &vdata
[2]; ihp
< &vdata
[IMG_SHSIZE
]; ihp
++ ) {
467 /* Did not find interpreter, "#!\n" */
469 } else if (IS_WHITESPACE(*ihp
)) {
470 /* Whitespace, like "#! /bin/sh\n", keep going. */
472 /* Found start of interpreter */
477 if (ihp
== &vdata
[IMG_SHSIZE
]) {
478 /* All whitespace, like "#! " */
484 /* Try to find the end of the interpreter+args string */
485 for ( ; ihp
< &vdata
[IMG_SHSIZE
]; ihp
++ ) {
490 /* Still part of interpreter or args */
494 if (ihp
== &vdata
[IMG_SHSIZE
]) {
495 /* A long line, like "#! blah blah blah" without end */
499 /* Backtrack until we find the last non-whitespace */
500 while (IS_EOL(*ihp
) || IS_WHITESPACE(*ihp
)) {
504 /* The character after the last non-whitespace is our logical end of line */
508 * Now we have pointers to the usable part of:
510 * "#! /usr/bin/int first second third \n"
511 * ^ line_startp ^ line_endp
514 /* copy the interpreter name */
515 interp
= imgp
->ip_interp_buffer
;
516 for ( ihp
= line_startp
; (ihp
< line_endp
) && !IS_WHITESPACE(*ihp
); ihp
++)
520 exec_reset_save_path(imgp
);
521 exec_save_path(imgp
, CAST_USER_ADDR_T(imgp
->ip_interp_buffer
),
524 /* Copy the entire interpreter + args for later processing into argv[] */
525 interp
= imgp
->ip_interp_buffer
;
526 for ( ihp
= line_startp
; (ihp
< line_endp
); ihp
++)
531 * If we have a SUID oder SGID script, create a file descriptor
532 * from the vnode and pass /dev/fd/%d instead of the actual
533 * path name so that the script does not get opened twice
535 if (imgp
->ip_origvattr
->va_mode
& (VSUID
| VSGID
)) {
536 p
= vfs_context_proc(imgp
->ip_vfs_context
);
537 error
= falloc(p
, &fp
, &fd
, imgp
->ip_vfs_context
);
541 fp
->f_fglob
->fg_flag
= FREAD
;
542 fp
->f_fglob
->fg_type
= DTYPE_VNODE
;
543 fp
->f_fglob
->fg_ops
= &vnops
;
544 fp
->f_fglob
->fg_data
= (caddr_t
)imgp
->ip_vp
;
547 procfdtbl_releasefd(p
, fd
, NULL
);
548 fp_drop(p
, fd
, fp
, 1);
550 vnode_ref(imgp
->ip_vp
);
552 imgp
->ip_interp_sugid_fd
= fd
;
563 * Image activator for fat 1.0 binaries. If the binary is fat, then we
564 * need to select an image from it internally, and make that the image
565 * we are going to attempt to execute. At present, this consists of
566 * reloading the first page for the image with a first page from the
567 * offset location indicated by the fat header.
569 * Parameters; struct image_params * image parameter block
571 * Returns: -1 not a fat binary (keep looking)
572 * -2 Success: encapsulated binary: reread
573 * >0 Failure: error number
575 * Important: This image activator is byte order neutral.
577 * Note: A return value other than -1 indicates subsequent image
578 * activators should not be given the opportunity to attempt
579 * to activate the image.
581 * If we find an encapsulated binary, we make no assertions
582 * about its validity; instead, we leave that up to a rescan
583 * for an activator to claim it, and, if it is claimed by one,
584 * that activator is responsible for determining validity.
587 exec_fat_imgact(struct image_params
*imgp
)
589 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
590 kauth_cred_t cred
= kauth_cred_proc_ref(p
);
591 struct fat_header
*fat_header
= (struct fat_header
*)imgp
->ip_vdata
;
592 struct _posix_spawnattr
*psa
= NULL
;
593 struct fat_arch fat_arch
;
597 /* Make sure it's a fat binary */
598 if ((fat_header
->magic
!= FAT_MAGIC
) &&
599 (fat_header
->magic
!= FAT_CIGAM
)) {
604 /* If posix_spawn binprefs exist, respect those prefs. */
605 psa
= (struct _posix_spawnattr
*) imgp
->ip_px_sa
;
606 if (psa
!= NULL
&& psa
->psa_binprefs
[0] != 0) {
607 struct fat_arch
*arches
= (struct fat_arch
*) (fat_header
+ 1);
608 int nfat_arch
= 0, pr
= 0, f
= 0;
610 nfat_arch
= OSSwapBigToHostInt32(fat_header
->nfat_arch
);
611 /* Check each preference listed against all arches in header */
612 for (pr
= 0; pr
< NBINPREFS
; pr
++) {
613 cpu_type_t pref
= psa
->psa_binprefs
[pr
];
615 /* No suitable arch in the pref list */
620 if (pref
== CPU_TYPE_ANY
) {
621 /* Fall through to regular grading */
625 for (f
= 0; f
< nfat_arch
; f
++) {
626 cpu_type_t archtype
= OSSwapBigToHostInt32(
628 cpu_type_t archsubtype
= OSSwapBigToHostInt32(
629 arches
[f
].cpusubtype
) & ~CPU_SUBTYPE_MASK
;
630 if (pref
== archtype
&&
631 grade_binary(archtype
, archsubtype
)) {
632 /* We have a winner! */
633 fat_arch
.cputype
= archtype
;
634 fat_arch
.cpusubtype
= archsubtype
;
635 fat_arch
.offset
= OSSwapBigToHostInt32(
637 fat_arch
.size
= OSSwapBigToHostInt32(
639 fat_arch
.align
= OSSwapBigToHostInt32(
647 /* Look up our preferred architecture in the fat file. */
648 lret
= fatfile_getarch_affinity(imgp
->ip_vp
,
649 (vm_offset_t
)fat_header
,
651 (p
->p_flag
& P_AFFINITY
));
652 if (lret
!= LOAD_SUCCESS
) {
653 error
= load_return_to_errno(lret
);
658 /* Read the Mach-O header out of fat_arch */
659 error
= vn_rdwr(UIO_READ
, imgp
->ip_vp
, imgp
->ip_vdata
,
660 PAGE_SIZE
, fat_arch
.offset
,
661 UIO_SYSSPACE
, (IO_UNIT
|IO_NODELOCKED
),
667 /* Did we read a complete header? */
673 /* Success. Indicate we have identified an encapsulated binary */
675 imgp
->ip_arch_offset
= (user_size_t
)fat_arch
.offset
;
676 imgp
->ip_arch_size
= (user_size_t
)fat_arch
.size
;
679 kauth_cred_unref(&cred
);
686 * Image activator for mach-o 1.0 binaries.
688 * Parameters; struct image_params * image parameter block
690 * Returns: -1 not a fat binary (keep looking)
691 * -2 Success: encapsulated binary: reread
692 * >0 Failure: error number
693 * EBADARCH Mach-o binary, but with an unrecognized
695 * ENOMEM No memory for child process after -
696 * can only happen after vfork()
698 * Important: This image activator is NOT byte order neutral.
700 * Note: A return value other than -1 indicates subsequent image
701 * activators should not be given the opportunity to attempt
702 * to activate the image.
704 * TODO: More gracefully handle failures after vfork
707 exec_mach_imgact(struct image_params
*imgp
)
709 struct mach_header
*mach_header
= (struct mach_header
*)imgp
->ip_vdata
;
710 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
714 task_t new_task
= NULL
; /* protected by vfexec */
716 struct uthread
*uthread
;
717 vm_map_t old_map
= VM_MAP_NULL
;
720 load_result_t load_result
;
721 struct _posix_spawnattr
*psa
= NULL
;
722 int spawn
= (imgp
->ip_flags
& IMGPF_SPAWN
);
726 * make sure it's a Mach-O 1.0 or Mach-O 2.0 binary; the difference
727 * is a reserved field on the end, so for the most part, we can
728 * treat them as if they were identical. Reverse-endian Mach-O
729 * binaries are recognized but not compatible.
731 if ((mach_header
->magic
== MH_CIGAM
) ||
732 (mach_header
->magic
== MH_CIGAM_64
)) {
737 if ((mach_header
->magic
!= MH_MAGIC
) &&
738 (mach_header
->magic
!= MH_MAGIC_64
)) {
743 switch (mach_header
->filetype
) {
750 if (!imgp
->ip_origcputype
) {
751 imgp
->ip_origcputype
= mach_header
->cputype
;
752 imgp
->ip_origcpusubtype
= mach_header
->cpusubtype
;
755 task
= current_task();
756 thread
= current_thread();
757 uthread
= get_bsdthread_info(thread
);
760 * Save off the vfexec state up front; we have to do this, because
761 * we need to know if we were in this state initially subsequent to
762 * creating the backing task, thread, and uthread for the child
763 * process (from the vfs_context_t from in img_parms).
765 if (uthread
->uu_flag
& UT_VFORK
)
766 vfexec
= 1; /* Mark in exec */
768 if ((mach_header
->cputype
& CPU_ARCH_ABI64
) == CPU_ARCH_ABI64
)
769 imgp
->ip_flags
|= IMGPF_IS_64BIT
;
771 /* If posix_spawn binprefs exist, respect those prefs. */
772 psa
= (struct _posix_spawnattr
*) imgp
->ip_px_sa
;
773 if (psa
!= NULL
&& psa
->psa_binprefs
[0] != 0) {
775 for (pr
= 0; pr
< NBINPREFS
; pr
++) {
776 cpu_type_t pref
= psa
->psa_binprefs
[pr
];
778 /* No suitable arch in the pref list */
783 if (pref
== CPU_TYPE_ANY
) {
784 /* Jump to regular grading */
788 if (pref
== imgp
->ip_origcputype
) {
789 /* We have a match! */
797 if (!grade_binary(imgp
->ip_origcputype
& ~CPU_SUBTYPE_LIB64
,
798 imgp
->ip_origcpusubtype
& ~CPU_SUBTYPE_MASK
)) {
803 /* Copy in arguments/environment from the old process */
804 error
= exec_extract_strings(imgp
);
808 error
= exec_add_apple_strings(imgp
);
812 AUDIT_ARG(argv
, imgp
->ip_startargv
, imgp
->ip_argc
,
813 imgp
->ip_endargv
- imgp
->ip_startargv
);
814 AUDIT_ARG(envv
, imgp
->ip_endargv
, imgp
->ip_envc
,
815 imgp
->ip_endenvv
- imgp
->ip_endargv
);
818 * We are being called to activate an image subsequent to a vfork()
819 * operation; in this case, we know that our task, thread, and
820 * uthread are actually those of our parent, and our proc, which we
821 * obtained indirectly from the image_params vfs_context_t, is the
824 if (vfexec
|| spawn
) {
826 imgp
->ip_new_thread
= fork_create_child(task
, p
, FALSE
, (imgp
->ip_flags
& IMGPF_IS_64BIT
));
827 if (imgp
->ip_new_thread
== NULL
) {
833 /* reset local idea of thread, uthread, task */
834 thread
= imgp
->ip_new_thread
;
835 uthread
= get_bsdthread_info(thread
);
836 task
= new_task
= get_threadtask(thread
);
837 map
= get_task_map(task
);
843 * We set these flags here; this is OK, since if we fail after
844 * this point, we have already destroyed the parent process anyway.
846 task_set_dyld_info(task
, MACH_VM_MIN_ADDRESS
, 0);
847 if (imgp
->ip_flags
& IMGPF_IS_64BIT
) {
848 task_set_64bit(task
, TRUE
);
849 OSBitOrAtomic(P_LP64
, &p
->p_flag
);
851 task_set_64bit(task
, FALSE
);
852 OSBitAndAtomic(~((uint32_t)P_LP64
), &p
->p_flag
);
856 * Load the Mach-O file.
858 * NOTE: An error after this point indicates we have potentially
859 * destroyed or overwritten some process state while attempting an
860 * execve() following a vfork(), which is an unrecoverable condition.
864 * Actually load the image file we previously decided to load.
866 lret
= load_machfile(imgp
, mach_header
, thread
, map
, &load_result
);
868 if (lret
!= LOAD_SUCCESS
) {
869 error
= load_return_to_errno(lret
);
873 vm_map_set_user_wire_limit(get_task_map(task
), p
->p_rlimit
[RLIMIT_MEMLOCK
].rlim_cur
);
876 * Set code-signing flags if this binary is signed, or if parent has
877 * requested them on exec.
879 if (load_result
.csflags
& CS_VALID
) {
880 imgp
->ip_csflags
|= load_result
.csflags
&
882 CS_HARD
|CS_KILL
|CS_EXEC_SET_HARD
|CS_EXEC_SET_KILL
);
884 imgp
->ip_csflags
&= ~CS_VALID
;
887 if (p
->p_csflags
& CS_EXEC_SET_HARD
)
888 imgp
->ip_csflags
|= CS_HARD
;
889 if (p
->p_csflags
& CS_EXEC_SET_KILL
)
890 imgp
->ip_csflags
|= CS_KILL
;
894 * Set up the system reserved areas in the new address space.
896 vm_map_exec(get_task_map(task
),
898 (void *) p
->p_fd
->fd_rdir
,
902 * Close file descriptors which specify close-on-exec.
904 fdexec(p
, psa
!= NULL
? psa
->psa_flags
: 0);
907 * deal with set[ug]id.
909 error
= exec_handle_sugid(imgp
);
911 /* Make sure we won't interrupt ourself signalling a partial process */
912 if (!vfexec
&& !spawn
&& (p
->p_lflag
& P_LTRACED
))
919 if (load_result
.unixproc
&&
920 create_unix_stack(get_task_map(task
),
922 p
) != KERN_SUCCESS
) {
923 error
= load_return_to_errno(LOAD_NOSPACE
);
927 if (vfexec
|| spawn
) {
928 old_map
= vm_map_switch(get_task_map(task
));
931 if (load_result
.unixproc
) {
935 * Copy the strings area out into the new process address
939 error
= exec_copyout_strings(imgp
, &ap
);
942 vm_map_switch(old_map
);
946 thread_setuserstack(thread
, ap
);
949 if (load_result
.dynlinker
) {
951 int new_ptr_size
= (imgp
->ip_flags
& IMGPF_IS_64BIT
) ? 8 : 4;
953 /* Adjust the stack */
954 ap
= thread_adjuserstack(thread
, -new_ptr_size
);
955 error
= copyoutptr(load_result
.mach_header
, ap
, new_ptr_size
);
959 vm_map_switch(old_map
);
962 task_set_dyld_info(task
, load_result
.all_image_info_addr
,
963 load_result
.all_image_info_size
);
966 /* Avoid immediate VM faults back into kernel */
967 exec_prefault_data(p
, imgp
, &load_result
);
969 if (vfexec
|| spawn
) {
970 vm_map_switch(old_map
);
972 /* Set the entry point */
973 thread_setentrypoint(thread
, load_result
.entry_point
);
979 * Reset signal state.
984 * need to cancel async IO requests that can be cancelled and wait for those
985 * already active. MAY BLOCK!
990 /* FIXME: Till vmspace inherit is fixed: */
991 if (!vfexec
&& p
->vm_shm
)
995 /* Clean up the semaphores */
1000 * Remember file name for accounting.
1002 p
->p_acflag
&= ~AFORK
;
1003 /* If the translated name isn't NULL, then we want to use
1004 * that translated name as the name we show as the "real" name.
1005 * Otherwise, use the name passed into exec.
1007 if (0 != imgp
->ip_p_comm
[0]) {
1008 bcopy((caddr_t
)imgp
->ip_p_comm
, (caddr_t
)p
->p_comm
,
1011 if (imgp
->ip_ndp
->ni_cnd
.cn_namelen
> MAXCOMLEN
)
1012 imgp
->ip_ndp
->ni_cnd
.cn_namelen
= MAXCOMLEN
;
1013 bcopy((caddr_t
)imgp
->ip_ndp
->ni_cnd
.cn_nameptr
, (caddr_t
)p
->p_comm
,
1014 (unsigned)imgp
->ip_ndp
->ni_cnd
.cn_namelen
);
1015 p
->p_comm
[imgp
->ip_ndp
->ni_cnd
.cn_namelen
] = '\0';
1018 pal_dbg_set_task_name( p
->task
);
1020 memcpy(&p
->p_uuid
[0], &load_result
.uuid
[0], sizeof(p
->p_uuid
));
1022 #if !CONFIG_EMBEDDED
1025 if (!vfexec
&& !spawn
) {
1026 if (p
->p_legacy_behavior
& PROC_LEGACY_BEHAVIOR_IOTHROTTLE
) {
1027 throttle_legacy_process_decr();
1031 p
->p_legacy_behavior
= 0;
1032 for (i
=0; i
< sizeof(legacy_behaviors
)/sizeof(legacy_behaviors
[0]); i
++) {
1033 if (0 == uuid_compare(legacy_behaviors
[i
].process_uuid
, p
->p_uuid
)) {
1034 p
->p_legacy_behavior
= legacy_behaviors
[i
].legacy_mask
;
1039 if (p
->p_legacy_behavior
& PROC_LEGACY_BEHAVIOR_IOTHROTTLE
) {
1040 throttle_legacy_process_incr();
1044 // <rdar://6598155> dtrace code cleanup needed
1047 * Invalidate any predicate evaluation already cached for this thread by DTrace.
1048 * That's because we've just stored to p_comm and DTrace refers to that when it
1049 * evaluates the "execname" special variable. uid and gid may have changed as well.
1051 dtrace_set_thread_predcache(current_thread(), 0);
1054 * Free any outstanding lazy dof entries. It is imperative we
1055 * always call dtrace_lazy_dofs_destroy, rather than null check
1056 * and call if !NULL. If we NULL test, during lazy dof faulting
1057 * we can race with the faulting code and proceed from here to
1058 * beyond the helpers cleanup. The lazy dof faulting will then
1059 * install new helpers which no longer belong to this process!
1061 dtrace_lazy_dofs_destroy(p
);
1065 * Clean up any DTrace helpers for the process.
1067 if (p
->p_dtrace_helpers
!= NULL
&& dtrace_helpers_cleanup
) {
1068 (*dtrace_helpers_cleanup
)(p
);
1072 * Cleanup the DTrace provider associated with this process.
1075 if (p
->p_dtrace_probes
&& dtrace_fasttrap_exec_ptr
) {
1076 (*dtrace_fasttrap_exec_ptr
)(p
);
1081 if (kdebug_enable
) {
1082 long dbg_arg1
, dbg_arg2
, dbg_arg3
, dbg_arg4
;
1085 * Collect the pathname for tracing
1087 kdbg_trace_string(p
, &dbg_arg1
, &dbg_arg2
, &dbg_arg3
, &dbg_arg4
);
1089 if (vfexec
|| spawn
) {
1090 KERNEL_DEBUG_CONSTANT1((TRACEDBG_CODE(DBG_TRACE_DATA
, 2)) | DBG_FUNC_NONE
,
1091 p
->p_pid
,0,0,0, (uintptr_t)thread_tid(thread
));
1092 KERNEL_DEBUG_CONSTANT1((TRACEDBG_CODE(DBG_TRACE_STRING
, 2)) | DBG_FUNC_NONE
,
1093 dbg_arg1
, dbg_arg2
, dbg_arg3
, dbg_arg4
, (uintptr_t)thread_tid(thread
));
1095 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_DATA
, 2)) | DBG_FUNC_NONE
,
1097 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING
, 2)) | DBG_FUNC_NONE
,
1098 dbg_arg1
, dbg_arg2
, dbg_arg3
, dbg_arg4
, 0);
1103 * Ensure the 'translated' and 'affinity' flags are cleared, since we
1104 * no longer run PowerPC binaries.
1106 OSBitAndAtomic(~((uint32_t)(P_TRANSLATED
| P_AFFINITY
)), &p
->p_flag
);
1109 * If posix_spawned with the START_SUSPENDED flag, stop the
1110 * process before it runs.
1112 if (imgp
->ip_px_sa
!= NULL
) {
1113 psa
= (struct _posix_spawnattr
*) imgp
->ip_px_sa
;
1114 if (psa
->psa_flags
& POSIX_SPAWN_START_SUSPENDED
) {
1118 (void) task_suspend(p
->task
);
1121 if ((psa
->psa_flags
& POSIX_SPAWN_IOS_RESV1_APP_START
) || (psa
->psa_flags
& POSIX_SPAWN_IOS_APPLE_DAEMON_START
) || (psa
->psa_flags
& POSIX_SPAWN_IOS_APP_START
)) {
1122 if ((psa
->psa_flags
& POSIX_SPAWN_IOS_RESV1_APP_START
))
1123 apptype
= PROC_POLICY_IOS_RESV1_APPTYPE
;
1124 else if (psa
->psa_flags
& POSIX_SPAWN_IOS_APPLE_DAEMON_START
)
1125 apptype
= PROC_POLICY_IOS_APPLE_DAEMON
;
1126 else if (psa
->psa_flags
& POSIX_SPAWN_IOS_APP_START
)
1127 apptype
= PROC_POLICY_IOS_APPTYPE
;
1129 apptype
= PROC_POLICY_OSX_APPTYPE_NONE
;
1130 proc_set_task_apptype(p
->task
, apptype
, imgp
->ip_new_thread
);
1131 if (apptype
== PROC_POLICY_IOS_RESV1_APPTYPE
)
1132 proc_apply_task_networkbg_internal(p
, NULL
);
1135 if (psa
->psa_apptype
& POSIX_SPAWN_APPTYPE_IOS_APPLEDAEMON
) {
1136 apptype
= PROC_POLICY_IOS_APPLE_DAEMON
;
1137 proc_set_task_apptype(p
->task
, apptype
, imgp
->ip_new_thread
);
1139 #else /* CONFIG_EMBEDDED */
1140 if ((psa
->psa_flags
& POSIX_SPAWN_OSX_TALAPP_START
) || (psa
->psa_flags
& POSIX_SPAWN_OSX_DBCLIENT_START
)) {
1141 if ((psa
->psa_flags
& POSIX_SPAWN_OSX_TALAPP_START
))
1142 apptype
= PROC_POLICY_OSX_APPTYPE_TAL
;
1143 else if (psa
->psa_flags
& POSIX_SPAWN_OSX_DBCLIENT_START
)
1144 apptype
= PROC_POLICY_OSX_APPTYPE_DBCLIENT
;
1146 apptype
= PROC_POLICY_OSX_APPTYPE_NONE
;
1147 proc_set_task_apptype(p
->task
, apptype
, NULL
);
1148 if ((apptype
== PROC_POLICY_OSX_APPTYPE_TAL
) ||
1149 (apptype
== PROC_POLICY_OSX_APPTYPE_DBCLIENT
)) {
1150 proc_apply_task_networkbg_internal(p
, NULL
);
1153 if ((psa
->psa_apptype
& POSIX_SPAWN_APPTYPE_OSX_TAL
) ||
1154 (psa
->psa_apptype
& POSIX_SPAWN_APPTYPE_OSX_WIDGET
)) {
1155 if ((psa
->psa_apptype
& POSIX_SPAWN_APPTYPE_OSX_TAL
))
1156 apptype
= PROC_POLICY_OSX_APPTYPE_TAL
;
1157 else if (psa
->psa_flags
& POSIX_SPAWN_APPTYPE_OSX_WIDGET
)
1158 apptype
= PROC_POLICY_OSX_APPTYPE_DBCLIENT
;
1160 apptype
= PROC_POLICY_OSX_APPTYPE_NONE
;
1161 proc_set_task_apptype(p
->task
, apptype
, imgp
->ip_new_thread
);
1162 if ((apptype
== PROC_POLICY_OSX_APPTYPE_TAL
) ||
1163 (apptype
== PROC_POLICY_OSX_APPTYPE_DBCLIENT
)) {
1164 proc_apply_task_networkbg_internal(p
, NULL
);
1167 #endif /* CONFIG_EMBEDDED */
1171 * mark as execed, wakeup the process that vforked (if any) and tell
1172 * it that it now has its own resources back
1174 OSBitOrAtomic(P_EXEC
, &p
->p_flag
);
1175 proc_resetregister(p
);
1176 if (p
->p_pptr
&& (p
->p_lflag
& P_LPPWAIT
)) {
1178 p
->p_lflag
&= ~P_LPPWAIT
;
1180 wakeup((caddr_t
)p
->p_pptr
);
1184 * Pay for our earlier safety; deliver the delayed signals from
1185 * the incomplete vfexec process now that it's complete.
1187 if (vfexec
&& (p
->p_lflag
& P_LTRACED
)) {
1188 psignal_vfork(p
, new_task
, thread
, SIGTRAP
);
1193 proc_knote(p
, NOTE_EXEC
);
1195 if (vfexec
|| spawn
) {
1196 task_deallocate(new_task
);
1197 thread_deallocate(thread
);
1210 * Our image activator table; this is the table of the image types we are
1211 * capable of loading. We list them in order of preference to ensure the
1212 * fastest image load speed.
1214 * XXX hardcoded, for now; should use linker sets
1217 int (*ex_imgact
)(struct image_params
*);
1218 const char *ex_name
;
1220 { exec_mach_imgact
, "Mach-o Binary" },
1221 { exec_fat_imgact
, "Fat Binary" },
1222 { exec_shell_imgact
, "Interpreter Script" },
1228 * exec_activate_image
1230 * Description: Iterate through the available image activators, and activate
1231 * the image associated with the imgp structure. We start with
1234 * Parameters: struct image_params * Image parameter block
1236 * Returns: 0 Success
1237 * EBADEXEC The executable is corrupt/unknown
1238 * execargs_alloc:EINVAL Invalid argument
1239 * execargs_alloc:EACCES Permission denied
1240 * execargs_alloc:EINTR Interrupted function
1241 * execargs_alloc:ENOMEM Not enough space
1242 * exec_save_path:EFAULT Bad address
1243 * exec_save_path:ENAMETOOLONG Filename too long
1244 * exec_check_permissions:EACCES Permission denied
1245 * exec_check_permissions:ENOEXEC Executable file format error
1246 * exec_check_permissions:ETXTBSY Text file busy [misuse of error code]
1247 * exec_check_permissions:???
1249 * vn_rdwr:??? [anything vn_rdwr can return]
1250 * <ex_imgact>:??? [anything an imgact can return]
1253 exec_activate_image(struct image_params
*imgp
)
1255 struct nameidata nd
;
1258 int once
= 1; /* save SGUID-ness for interpreted files */
1260 int iterlimit
= EAI_ITERLIMIT
;
1261 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
1263 error
= execargs_alloc(imgp
);
1267 error
= exec_save_path(imgp
, imgp
->ip_user_fname
, imgp
->ip_seg
);
1272 /* Use imgp->ip_strings, which contains the copyin-ed exec path */
1273 DTRACE_PROC1(exec
, uintptr_t, imgp
->ip_strings
);
1275 NDINIT(&nd
, LOOKUP
, OP_LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
,
1276 UIO_SYSSPACE
, CAST_USER_ADDR_T(imgp
->ip_strings
), imgp
->ip_vfs_context
);
1282 imgp
->ip_ndp
= &nd
; /* successful namei(); call nameidone() later */
1283 imgp
->ip_vp
= nd
.ni_vp
; /* if set, need to vnode_put() at some point */
1286 * Before we start the transition from binary A to binary B, make
1287 * sure another thread hasn't started exiting the process. We grab
1288 * the proc lock to check p_lflag initially, and the transition
1289 * mechanism ensures that the value doesn't change after we release
1293 if (p
->p_lflag
& P_LEXIT
) {
1297 error
= proc_transstart(p
, 1);
1302 error
= exec_check_permissions(imgp
);
1306 /* Copy; avoid invocation of an interpreter overwriting the original */
1309 *imgp
->ip_origvattr
= *imgp
->ip_vattr
;
1312 error
= vn_rdwr(UIO_READ
, imgp
->ip_vp
, imgp
->ip_vdata
, PAGE_SIZE
, 0,
1313 UIO_SYSSPACE
, IO_NODELOCKED
,
1314 vfs_context_ucred(imgp
->ip_vfs_context
),
1315 &resid
, vfs_context_proc(imgp
->ip_vfs_context
));
1319 encapsulated_binary
:
1320 /* Limit the number of iterations we will attempt on each binary */
1321 if (--iterlimit
== 0) {
1326 for(i
= 0; error
== -1 && execsw
[i
].ex_imgact
!= NULL
; i
++) {
1328 error
= (*execsw
[i
].ex_imgact
)(imgp
);
1331 /* case -1: not claimed: continue */
1332 case -2: /* Encapsulated binary */
1333 goto encapsulated_binary
;
1335 case -3: /* Interpreter */
1338 * Copy the script label for later use. Note that
1339 * the label can be different when the script is
1340 * actually read by the interpreter.
1342 if (imgp
->ip_scriptlabelp
)
1343 mac_vnode_label_free(imgp
->ip_scriptlabelp
);
1344 imgp
->ip_scriptlabelp
= mac_vnode_label_alloc();
1345 if (imgp
->ip_scriptlabelp
== NULL
) {
1349 mac_vnode_label_copy(imgp
->ip_vp
->v_label
,
1350 imgp
->ip_scriptlabelp
);
1355 vnode_put(imgp
->ip_vp
);
1356 imgp
->ip_vp
= NULL
; /* already put */
1357 imgp
->ip_ndp
= NULL
; /* already nameidone */
1359 /* Use imgp->ip_strings, which exec_shell_imgact reset to the interpreter */
1360 NDINIT(&nd
, LOOKUP
, OP_LOOKUP
, FOLLOW
| LOCKLEAF
,
1361 UIO_SYSSPACE
, CAST_USER_ADDR_T(imgp
->ip_strings
), imgp
->ip_vfs_context
);
1363 proc_transend(p
, 0);
1372 * Call out to allow 3rd party notification of exec.
1373 * Ignore result of kauth_authorize_fileop call.
1375 if (error
== 0 && kauth_authorize_fileop_has_listeners()) {
1376 kauth_authorize_fileop(vfs_context_ucred(imgp
->ip_vfs_context
),
1378 (uintptr_t)nd
.ni_vp
, 0);
1382 proc_transend(p
, 0);
1385 if (imgp
->ip_strings
)
1386 execargs_free(imgp
);
1388 nameidone(imgp
->ip_ndp
);
1394 * exec_handle_port_actions
1396 * Description: Go through the _posix_port_actions_t contents,
1397 * calling task_set_special_port, task_set_exception_ports
1398 * and/or audit_session_spawnjoin for the current task.
1400 * Parameters: struct image_params * Image parameter block
1401 * short psa_flags posix spawn attribute flags
1403 * Returns: 0 Success
1405 * ENOTSUP Illegal posix_spawn attr flag was set
1408 exec_handle_port_actions(struct image_params
*imgp
, short psa_flags
)
1410 _posix_spawn_port_actions_t pacts
= imgp
->ip_px_spa
;
1411 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
1412 _ps_port_action_t
*act
= NULL
;
1413 task_t task
= p
->task
;
1414 ipc_port_t port
= NULL
;
1418 for (i
= 0; i
< pacts
->pspa_count
; i
++) {
1419 act
= &pacts
->pspa_actions
[i
];
1421 if (ipc_object_copyin(get_task_ipcspace(current_task()),
1422 act
->new_port
, MACH_MSG_TYPE_COPY_SEND
,
1423 (ipc_object_t
*) &port
) != KERN_SUCCESS
)
1426 switch (act
->port_type
) {
1428 /* Only allowed when not under vfork */
1429 if (!(psa_flags
& POSIX_SPAWN_SETEXEC
))
1431 else if (task_set_special_port(task
,
1432 act
->which
, port
) != KERN_SUCCESS
)
1436 case PSPA_EXCEPTION
:
1437 /* Only allowed when not under vfork */
1438 if (!(psa_flags
& POSIX_SPAWN_SETEXEC
))
1440 else if (task_set_exception_ports(task
,
1441 act
->mask
, port
, act
->behavior
,
1442 act
->flavor
) != KERN_SUCCESS
)
1446 case PSPA_AU_SESSION
:
1447 ret
= audit_session_spawnjoin(p
, port
);
1455 /* action failed, so release port resources */
1458 ipc_port_release_send(port
);
1467 * exec_handle_file_actions
1469 * Description: Go through the _posix_file_actions_t contents applying the
1470 * open, close, and dup2 operations to the open file table for
1471 * the current process.
1473 * Parameters: struct image_params * Image parameter block
1475 * Returns: 0 Success
1478 * Note: Actions are applied in the order specified, with the credential
1479 * of the parent process. This is done to permit the parent
1480 * process to utilize POSIX_SPAWN_RESETIDS to drop privilege in
1481 * the child following operations the child may in fact not be
1482 * normally permitted to perform.
1485 exec_handle_file_actions(struct image_params
*imgp
, short psa_flags
)
1489 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
1490 _posix_spawn_file_actions_t px_sfap
= imgp
->ip_px_sfa
;
1491 int ival
[2]; /* dummy retval for system calls) */
1493 for (action
= 0; action
< px_sfap
->psfa_act_count
; action
++) {
1494 _psfa_action_t
*psfa
= &px_sfap
->psfa_act_acts
[ action
];
1496 switch(psfa
->psfaa_type
) {
1499 * Open is different, in that it requires the use of
1500 * a path argument, which is normally copied in from
1501 * user space; because of this, we have to support an
1502 * open from kernel space that passes an address space
1503 * context of UIO_SYSSPACE, and casts the address
1504 * argument to a user_addr_t.
1506 struct vnode_attr va
;
1507 struct nameidata nd
;
1508 int mode
= psfa
->psfaa_openargs
.psfao_mode
;
1509 struct dup2_args dup2a
;
1510 struct close_nocancel_args ca
;
1514 /* Mask off all but regular access permissions */
1515 mode
= ((mode
&~ p
->p_fd
->fd_cmask
) & ALLPERMS
) & ~S_ISTXT
;
1516 VATTR_SET(&va
, va_mode
, mode
& ACCESSPERMS
);
1518 NDINIT(&nd
, LOOKUP
, OP_OPEN
, FOLLOW
| AUDITVNPATH1
, UIO_SYSSPACE
,
1519 CAST_USER_ADDR_T(psfa
->psfaa_openargs
.psfao_path
),
1520 imgp
->ip_vfs_context
);
1522 error
= open1(imgp
->ip_vfs_context
,
1524 psfa
->psfaa_openargs
.psfao_oflag
,
1529 * If there's an error, or we get the right fd by
1530 * accident, then drop out here. This is easier than
1531 * reworking all the open code to preallocate fd
1532 * slots, and internally taking one as an argument.
1534 if (error
|| ival
[0] == psfa
->psfaa_filedes
)
1539 * If we didn't fall out from an error, we ended up
1540 * with the wrong fd; so now we've got to try to dup2
1541 * it to the right one.
1543 dup2a
.from
= origfd
;
1544 dup2a
.to
= psfa
->psfaa_filedes
;
1547 * The dup2() system call implementation sets
1548 * ival to newfd in the success case, but we
1549 * can ignore that, since if we didn't get the
1550 * fd we wanted, the error will stop us.
1552 error
= dup2(p
, &dup2a
, ival
);
1557 * Finally, close the original fd.
1561 error
= close_nocancel(p
, &ca
, ival
);
1566 struct dup2_args dup2a
;
1568 dup2a
.from
= psfa
->psfaa_filedes
;
1569 dup2a
.to
= psfa
->psfaa_openargs
.psfao_oflag
;
1572 * The dup2() system call implementation sets
1573 * ival to newfd in the success case, but we
1574 * can ignore that, since if we didn't get the
1575 * fd we wanted, the error will stop us.
1577 error
= dup2(p
, &dup2a
, ival
);
1582 struct close_nocancel_args ca
;
1584 ca
.fd
= psfa
->psfaa_filedes
;
1586 error
= close_nocancel(p
, &ca
, ival
);
1590 case PSFA_INHERIT
: {
1591 struct fileproc
*fp
;
1592 int fd
= psfa
->psfaa_filedes
;
1595 * Check to see if the descriptor exists, and
1596 * ensure it's -not- marked as close-on-exec.
1597 * [Less code than the equivalent F_GETFD/F_SETFD.]
1600 if ((error
= fp_lookup(p
, fd
, &fp
, 1)) == 0) {
1601 *fdflags(p
, fd
) &= ~UF_EXCLOSE
;
1602 (void) fp_drop(p
, fd
, fp
, 1);
1613 /* All file actions failures are considered fatal, per POSIX */
1619 if (error
!= 0 || (psa_flags
& POSIX_SPAWN_CLOEXEC_DEFAULT
) == 0)
1623 * If POSIX_SPAWN_CLOEXEC_DEFAULT is set, behave (during
1624 * this spawn only) as if "close on exec" is the default
1625 * disposition of all pre-existing file descriptors. In this case,
1626 * the list of file descriptors mentioned in the file actions
1627 * are the only ones that can be inherited, so mark them now.
1629 * The actual closing part comes later, in fdexec().
1632 for (action
= 0; action
< px_sfap
->psfa_act_count
; action
++) {
1633 _psfa_action_t
*psfa
= &px_sfap
->psfa_act_acts
[action
];
1634 int fd
= psfa
->psfaa_filedes
;
1636 switch (psfa
->psfaa_type
) {
1638 fd
= psfa
->psfaa_openargs
.psfao_oflag
;
1642 *fdflags(p
, fd
) |= UF_INHERIT
;
1658 * Parameters: uap->pid Pointer to pid return area
1659 * uap->fname File name to exec
1660 * uap->argp Argument list
1661 * uap->envp Environment list
1663 * Returns: 0 Success
1664 * EINVAL Invalid argument
1665 * ENOTSUP Not supported
1666 * ENOEXEC Executable file format error
1667 * exec_activate_image:EINVAL Invalid argument
1668 * exec_activate_image:EACCES Permission denied
1669 * exec_activate_image:EINTR Interrupted function
1670 * exec_activate_image:ENOMEM Not enough space
1671 * exec_activate_image:EFAULT Bad address
1672 * exec_activate_image:ENAMETOOLONG Filename too long
1673 * exec_activate_image:ENOEXEC Executable file format error
1674 * exec_activate_image:ETXTBSY Text file busy [misuse of error code]
1675 * exec_activate_image:EBADEXEC The executable is corrupt/unknown
1676 * exec_activate_image:???
1677 * mac_execve_enter:???
1679 * TODO: Expect to need __mac_posix_spawn() at some point...
1680 * Handle posix_spawnattr_t
1681 * Handle posix_spawn_file_actions_t
1684 posix_spawn(proc_t ap
, struct posix_spawn_args
*uap
, int32_t *retval
)
1686 proc_t p
= ap
; /* quiet bogus GCC vfork() warning */
1687 user_addr_t pid
= uap
->pid
;
1688 int ival
[2]; /* dummy retval for setpgid() */
1690 struct image_params
*imgp
;
1691 struct vnode_attr
*vap
;
1692 struct vnode_attr
*origvap
;
1693 struct uthread
*uthread
= 0; /* compiler complains if not set to 0*/
1695 char alt_p_comm
[sizeof(p
->p_comm
)] = {0}; /* for PowerPC */
1696 int is_64
= IS_64BIT_PROCESS(p
);
1697 struct vfs_context context
;
1698 struct user__posix_spawn_args_desc px_args
;
1699 struct _posix_spawnattr px_sa
;
1700 _posix_spawn_file_actions_t px_sfap
= NULL
;
1701 _posix_spawn_port_actions_t px_spap
= NULL
;
1702 struct __kern_sigaction vec
;
1703 boolean_t spawn_no_exec
= FALSE
;
1704 boolean_t proc_transit_set
= TRUE
;
1705 boolean_t exec_done
= FALSE
;
1708 * Allocate a big chunk for locals instead of using stack since these
1709 * structures are pretty big.
1711 MALLOC(bufp
, char *, (sizeof(*imgp
) + sizeof(*vap
) + sizeof(*origvap
)), M_TEMP
, M_WAITOK
| M_ZERO
);
1712 imgp
= (struct image_params
*) bufp
;
1717 vap
= (struct vnode_attr
*) (bufp
+ sizeof(*imgp
));
1718 origvap
= (struct vnode_attr
*) (bufp
+ sizeof(*imgp
) + sizeof(*vap
));
1720 /* Initialize the common data in the image_params structure */
1721 imgp
->ip_user_fname
= uap
->path
;
1722 imgp
->ip_user_argv
= uap
->argv
;
1723 imgp
->ip_user_envv
= uap
->envp
;
1724 imgp
->ip_vattr
= vap
;
1725 imgp
->ip_origvattr
= origvap
;
1726 imgp
->ip_vfs_context
= &context
;
1727 imgp
->ip_flags
= (is_64
? IMGPF_WAS_64BIT
: IMGPF_NONE
);
1728 imgp
->ip_p_comm
= alt_p_comm
; /* for PowerPC */
1729 imgp
->ip_seg
= (is_64
? UIO_USERSPACE64
: UIO_USERSPACE32
);
1731 if (uap
->adesc
!= USER_ADDR_NULL
) {
1733 error
= copyin(uap
->adesc
, &px_args
, sizeof(px_args
));
1735 struct user32__posix_spawn_args_desc px_args32
;
1737 error
= copyin(uap
->adesc
, &px_args32
, sizeof(px_args32
));
1740 * Convert arguments descriptor from external 32 bit
1741 * representation to internal 64 bit representation
1743 px_args
.attr_size
= px_args32
.attr_size
;
1744 px_args
.attrp
= CAST_USER_ADDR_T(px_args32
.attrp
);
1745 px_args
.file_actions_size
= px_args32
.file_actions_size
;
1746 px_args
.file_actions
= CAST_USER_ADDR_T(px_args32
.file_actions
);
1747 px_args
.port_actions_size
= px_args32
.port_actions_size
;
1748 px_args
.port_actions
= CAST_USER_ADDR_T(px_args32
.port_actions
);
1753 if (px_args
.attr_size
!= 0) {
1755 * This could lose some of the port_actions pointer,
1756 * but we already have it from px_args.
1758 if ((error
= copyin(px_args
.attrp
, &px_sa
, sizeof(px_sa
))) != 0)
1761 imgp
->ip_px_sa
= &px_sa
;
1763 if (px_args
.file_actions_size
!= 0) {
1764 /* Limit file_actions to allowed number of open files */
1765 int maxfa
= (p
->p_limit
? p
->p_rlimit
[RLIMIT_NOFILE
].rlim_cur
: NOFILE
);
1766 if (px_args
.file_actions_size
< PSF_ACTIONS_SIZE(1) ||
1767 px_args
.file_actions_size
> PSF_ACTIONS_SIZE(maxfa
)) {
1771 MALLOC(px_sfap
, _posix_spawn_file_actions_t
, px_args
.file_actions_size
, M_TEMP
, M_WAITOK
);
1772 if (px_sfap
== NULL
) {
1776 imgp
->ip_px_sfa
= px_sfap
;
1778 if ((error
= copyin(px_args
.file_actions
, px_sfap
,
1779 px_args
.file_actions_size
)) != 0)
1782 if (px_args
.port_actions_size
!= 0) {
1783 /* Limit port_actions to one page of data */
1784 if (px_args
.port_actions_size
< PS_PORT_ACTIONS_SIZE(1) ||
1785 px_args
.port_actions_size
> PAGE_SIZE
) {
1790 MALLOC(px_spap
, _posix_spawn_port_actions_t
,
1791 px_args
.port_actions_size
, M_TEMP
, M_WAITOK
);
1792 if (px_spap
== NULL
) {
1796 imgp
->ip_px_spa
= px_spap
;
1798 if ((error
= copyin(px_args
.port_actions
, px_spap
,
1799 px_args
.port_actions_size
)) != 0)
1804 /* set uthread to parent */
1805 uthread
= get_bsdthread_info(current_thread());
1808 * <rdar://6640530>; this does not result in a behaviour change
1809 * relative to Leopard, so there should not be any existing code
1810 * which depends on it.
1812 if (uthread
->uu_flag
& UT_VFORK
) {
1818 * If we don't have the extension flag that turns "posix_spawn()"
1819 * into "execve() with options", then we will be creating a new
1820 * process which does not inherit memory from the parent process,
1821 * which is one of the most expensive things about using fork()
1824 if (imgp
->ip_px_sa
== NULL
|| !(px_sa
.psa_flags
& POSIX_SPAWN_SETEXEC
)){
1825 if ((error
= fork1(p
, &imgp
->ip_new_thread
, PROC_CREATE_SPAWN
)) != 0)
1827 imgp
->ip_flags
|= IMGPF_SPAWN
; /* spawn w/o exec */
1828 spawn_no_exec
= TRUE
; /* used in later tests */
1832 p
= (proc_t
)get_bsdthreadtask_info(imgp
->ip_new_thread
);
1835 /* By default, the thread everyone plays with is the parent */
1836 context
.vc_thread
= current_thread();
1837 context
.vc_ucred
= p
->p_ucred
; /* XXX must NOT be kauth_cred_get() */
1840 * However, if we're not in the setexec case, redirect the context
1841 * to the newly created process instead
1844 context
.vc_thread
= imgp
->ip_new_thread
;
1847 * Post fdcopy(), pre exec_handle_sugid() - this is where we want
1848 * to handle the file_actions. Since vfork() also ends up setting
1849 * us into the parent process group, and saved off the signal flags,
1850 * this is also where we want to handle the spawn flags.
1853 /* Has spawn file actions? */
1854 if (imgp
->ip_px_sfa
!= NULL
) {
1856 * The POSIX_SPAWN_CLOEXEC_DEFAULT flag
1857 * is handled in exec_handle_file_actions().
1859 if ((error
= exec_handle_file_actions(imgp
,
1860 imgp
->ip_px_sa
!= NULL
? px_sa
.psa_flags
: 0)) != 0)
1864 /* Has spawn port actions? */
1865 if (imgp
->ip_px_spa
!= NULL
) {
1867 * The check for the POSIX_SPAWN_SETEXEC flag is done in
1868 * exec_handle_port_actions().
1870 if ((error
= exec_handle_port_actions(imgp
,
1871 imgp
->ip_px_sa
!= NULL
? px_sa
.psa_flags
: 0)) != 0)
1875 /* Has spawn attr? */
1876 if (imgp
->ip_px_sa
!= NULL
) {
1878 * Set the process group ID of the child process; this has
1879 * to happen before the image activation.
1881 if (px_sa
.psa_flags
& POSIX_SPAWN_SETPGROUP
) {
1882 struct setpgid_args spga
;
1883 spga
.pid
= p
->p_pid
;
1884 spga
.pgid
= px_sa
.psa_pgroup
;
1886 * Effectively, call setpgid() system call; works
1887 * because there are no pointer arguments.
1889 if((error
= setpgid(p
, &spga
, ival
)) != 0)
1894 * Reset UID/GID to parent's RUID/RGID; This works only
1895 * because the operation occurs *after* the vfork() and
1896 * before the call to exec_handle_sugid() by the image
1897 * activator called from exec_activate_image(). POSIX
1898 * requires that any setuid/setgid bits on the process
1899 * image will take precedence over the spawn attributes
1902 * The use of p_ucred is safe, since we are acting on the
1903 * new process, and it has no threads other than the one
1904 * we are creating for it.
1906 if (px_sa
.psa_flags
& POSIX_SPAWN_RESETIDS
) {
1907 kauth_cred_t my_cred
= p
->p_ucred
;
1908 kauth_cred_t my_new_cred
= kauth_cred_setuidgid(my_cred
, kauth_cred_getruid(my_cred
), kauth_cred_getrgid(my_cred
));
1909 if (my_new_cred
!= my_cred
) {
1910 p
->p_ucred
= my_new_cred
;
1911 /* update cred on proc */
1912 PROC_UPDATE_CREDS_ONPROC(p
);
1917 * Disable ASLR for the spawned process.
1919 if (px_sa
.psa_flags
& _POSIX_SPAWN_DISABLE_ASLR
)
1920 OSBitOrAtomic(P_DISABLE_ASLR
, &p
->p_flag
);
1923 * Forcibly disallow execution from data pages for the spawned process
1924 * even if it would otherwise be permitted by the architecture default.
1926 if (px_sa
.psa_flags
& _POSIX_SPAWN_ALLOW_DATA_EXEC
)
1927 imgp
->ip_flags
|= IMGPF_ALLOW_DATA_EXEC
;
1931 * Disable ASLR during image activation. This occurs either if the
1932 * _POSIX_SPAWN_DISABLE_ASLR attribute was found above or if
1933 * P_DISABLE_ASLR was inherited from the parent process.
1935 if (p
->p_flag
& P_DISABLE_ASLR
)
1936 imgp
->ip_flags
|= IMGPF_DISABLE_ASLR
;
1939 * Clear transition flag so we won't hang if exec_activate_image() causes
1940 * an automount (and launchd does a proc sysctl to service it).
1942 * <rdar://problem/6848672>, <rdar://problem/5959568>.
1944 if (spawn_no_exec
) {
1945 proc_transend(p
, 0);
1946 proc_transit_set
= 0;
1949 #if MAC_SPAWN /* XXX */
1950 if (uap
->mac_p
!= USER_ADDR_NULL
) {
1951 error
= mac_execve_enter(uap
->mac_p
, imgp
);
1958 * Activate the image
1960 error
= exec_activate_image(imgp
);
1963 /* process completed the exec */
1965 } else if (error
== -1) {
1966 /* Image not claimed by any activator? */
1971 * If we have a spawn attr, and it contains signal related flags,
1972 * the we need to process them in the "context" of the new child
1973 * process, so we have to process it following image activation,
1974 * prior to making the thread runnable in user space. This is
1975 * necessitated by some signal information being per-thread rather
1976 * than per-process, and we don't have the new allocation in hand
1977 * until after the image is activated.
1979 if (!error
&& imgp
->ip_px_sa
!= NULL
) {
1980 thread_t child_thread
= current_thread();
1981 uthread_t child_uthread
= uthread
;
1984 * If we created a new child thread, then the thread and
1985 * uthread are different than the current ones; otherwise,
1986 * we leave them, since we are in the exec case instead.
1988 if (spawn_no_exec
) {
1989 child_thread
= imgp
->ip_new_thread
;
1990 child_uthread
= get_bsdthread_info(child_thread
);
1994 * Mask a list of signals, instead of them being unmasked, if
1995 * they were unmasked in the parent; note that some signals
1998 if (px_sa
.psa_flags
& POSIX_SPAWN_SETSIGMASK
)
1999 child_uthread
->uu_sigmask
= (px_sa
.psa_sigmask
& ~sigcantmask
);
2001 * Default a list of signals instead of ignoring them, if
2002 * they were ignored in the parent. Note that we pass
2003 * spawn_no_exec to setsigvec() to indicate that we called
2004 * fork1() and therefore do not need to call proc_signalstart()
2007 if (px_sa
.psa_flags
& POSIX_SPAWN_SETSIGDEF
) {
2008 vec
.sa_handler
= SIG_DFL
;
2012 for (sig
= 0; sig
< NSIG
; sig
++)
2013 if (px_sa
.psa_sigdefault
& (1 << sig
)) {
2014 error
= setsigvec(p
, child_thread
, sig
+ 1, &vec
, spawn_no_exec
);
2019 * Activate the CPU usage monitor, if requested. This is done via a task-wide, per-thread CPU
2020 * usage limit, which will generate a resource exceeded exception if any one thread exceeds the
2023 * Userland gives us interval in seconds, and the kernel SPI expects nanoseconds.
2025 if (px_sa
.psa_cpumonitor_percent
!= 0) {
2026 error
= proc_set_task_ruse_cpu(p
->task
,
2027 TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC
,
2028 px_sa
.psa_cpumonitor_percent
,
2029 px_sa
.psa_cpumonitor_interval
* NSEC_PER_SEC
,
2036 /* reset delay idle sleep status if set */
2037 #if !CONFIG_EMBEDDED
2038 if ((p
->p_flag
& P_DELAYIDLESLEEP
) == P_DELAYIDLESLEEP
)
2039 OSBitAndAtomic(~((uint32_t)P_DELAYIDLESLEEP
), &p
->p_flag
);
2040 #endif /* !CONFIG_EMBEDDED */
2041 /* upon successful spawn, re/set the proc control state */
2042 if (imgp
->ip_px_sa
!= NULL
) {
2043 switch (px_sa
.psa_pcontrol
) {
2044 case POSIX_SPAWN_PCONTROL_THROTTLE
:
2045 p
->p_pcaction
= P_PCTHROTTLE
;
2047 case POSIX_SPAWN_PCONTROL_SUSPEND
:
2048 p
->p_pcaction
= P_PCSUSP
;
2050 case POSIX_SPAWN_PCONTROL_KILL
:
2051 p
->p_pcaction
= P_PCKILL
;
2053 case POSIX_SPAWN_PCONTROL_NONE
:
2058 #if !CONFIG_EMBEDDED
2059 if ((px_sa
.psa_apptype
& POSIX_SPAWN_APPTYPE_DELAYIDLESLEEP
) != 0)
2060 OSBitOrAtomic(P_DELAYIDLESLEEP
, &p
->p_flag
);
2061 #endif /* !CONFIG_EMBEDDED */
2063 exec_resettextvp(p
, imgp
);
2066 /* Has jetsam attributes? */
2067 if (imgp
->ip_px_sa
!= NULL
) {
2068 memorystatus_list_change((px_sa
.psa_jetsam_flags
& POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY
),
2069 p
->p_pid
, px_sa
.psa_priority
, -1, px_sa
.psa_high_water_mark
);
2075 * If we successfully called fork1(), we always need to do this;
2076 * we identify this case by noting the IMGPF_SPAWN flag. This is
2077 * because we come back from that call with signals blocked in the
2078 * child, and we have to unblock them, but we want to wait until
2079 * after we've performed any spawn actions. This has to happen
2080 * before check_for_signature(), which uses psignal.
2082 if (spawn_no_exec
) {
2083 if (proc_transit_set
)
2084 proc_transend(p
, 0);
2087 * Drop the signal lock on the child which was taken on our
2088 * behalf by forkproc()/cloneproc() to prevent signals being
2089 * received by the child in a partially constructed state.
2091 proc_signalend(p
, 0);
2093 /* flag the 'fork' has occurred */
2094 proc_knote(p
->p_pptr
, NOTE_FORK
| p
->p_pid
);
2095 /* then flag exec has occurred */
2096 proc_knote(p
, NOTE_EXEC
);
2097 DTRACE_PROC1(create
, proc_t
, p
);
2101 * We have to delay operations which might throw a signal until after
2102 * the signals have been unblocked; however, we want that to happen
2103 * after exec_resettextvp() so that the textvp is correct when they
2107 error
= check_for_signature(p
, imgp
);
2110 * Pay for our earlier safety; deliver the delayed signals from
2111 * the incomplete spawn process now that it's complete.
2113 if (imgp
!= NULL
&& spawn_no_exec
&& (p
->p_lflag
& P_LTRACED
)) {
2114 psignal_vfork(p
, p
->task
, imgp
->ip_new_thread
, SIGTRAP
);
2121 vnode_put(imgp
->ip_vp
);
2122 if (imgp
->ip_strings
)
2123 execargs_free(imgp
);
2124 if (imgp
->ip_px_sfa
!= NULL
)
2125 FREE(imgp
->ip_px_sfa
, M_TEMP
);
2126 if (imgp
->ip_px_spa
!= NULL
)
2127 FREE(imgp
->ip_px_spa
, M_TEMP
);
2130 if (imgp
->ip_execlabelp
)
2131 mac_cred_label_free(imgp
->ip_execlabelp
);
2132 if (imgp
->ip_scriptlabelp
)
2133 mac_vnode_label_free(imgp
->ip_scriptlabelp
);
2138 DTRACE_PROC1(exec__failure
, int, error
);
2141 * <rdar://6609474> temporary - so dtrace call to current_proc()
2142 * returns the child process instead of the parent.
2144 if (imgp
!= NULL
&& imgp
->ip_flags
& IMGPF_SPAWN
) {
2145 p
->p_lflag
|= P_LINVFORK
;
2146 p
->p_vforkact
= current_thread();
2147 uthread
->uu_proc
= p
;
2148 uthread
->uu_flag
|= UT_VFORK
;
2151 DTRACE_PROC(exec__success
);
2154 * <rdar://6609474> temporary - so dtrace call to current_proc()
2155 * returns the child process instead of the parent.
2157 if (imgp
!= NULL
&& imgp
->ip_flags
& IMGPF_SPAWN
) {
2158 p
->p_lflag
&= ~P_LINVFORK
;
2159 p
->p_vforkact
= NULL
;
2160 uthread
->uu_proc
= PROC_NULL
;
2161 uthread
->uu_flag
&= ~UT_VFORK
;
2165 /* Return to both the parent and the child? */
2166 if (imgp
!= NULL
&& spawn_no_exec
) {
2168 * If the parent wants the pid, copy it out
2170 if (pid
!= USER_ADDR_NULL
)
2171 (void)suword(pid
, p
->p_pid
);
2175 * If we had an error, perform an internal reap ; this is
2176 * entirely safe, as we have a real process backing us.
2180 p
->p_listflag
|= P_LIST_DEADPARENT
;
2183 /* make sure no one else has killed it off... */
2184 if (p
->p_stat
!= SZOMB
&& p
->exit_thread
== NULL
) {
2185 p
->exit_thread
= current_thread();
2187 exit1(p
, 1, (int *)NULL
);
2188 if (exec_done
== FALSE
) {
2189 task_deallocate(get_threadtask(imgp
->ip_new_thread
));
2190 thread_deallocate(imgp
->ip_new_thread
);
2193 /* someone is doing it for us; just skip it */
2199 * Return" to the child
2201 * Note: the image activator earlier dropped the
2202 * task/thread references to the newly spawned
2203 * process; this is OK, since we still have suspended
2204 * queue references on them, so we should be fine
2205 * with the delayed resume of the thread here.
2207 (void)thread_resume(imgp
->ip_new_thread
);
2221 * Parameters: uap->fname File name to exec
2222 * uap->argp Argument list
2223 * uap->envp Environment list
2225 * Returns: 0 Success
2226 * __mac_execve:EINVAL Invalid argument
2227 * __mac_execve:ENOTSUP Invalid argument
2228 * __mac_execve:EACCES Permission denied
2229 * __mac_execve:EINTR Interrupted function
2230 * __mac_execve:ENOMEM Not enough space
2231 * __mac_execve:EFAULT Bad address
2232 * __mac_execve:ENAMETOOLONG Filename too long
2233 * __mac_execve:ENOEXEC Executable file format error
2234 * __mac_execve:ETXTBSY Text file busy [misuse of error code]
2237 * TODO: Dynamic linker header address on stack is copied via suword()
2241 execve(proc_t p
, struct execve_args
*uap
, int32_t *retval
)
2243 struct __mac_execve_args muap
;
2246 muap
.fname
= uap
->fname
;
2247 muap
.argp
= uap
->argp
;
2248 muap
.envp
= uap
->envp
;
2249 muap
.mac_p
= USER_ADDR_NULL
;
2250 err
= __mac_execve(p
, &muap
, retval
);
2258 * Parameters: uap->fname File name to exec
2259 * uap->argp Argument list
2260 * uap->envp Environment list
2261 * uap->mac_p MAC label supplied by caller
2263 * Returns: 0 Success
2264 * EINVAL Invalid argument
2265 * ENOTSUP Not supported
2266 * ENOEXEC Executable file format error
2267 * exec_activate_image:EINVAL Invalid argument
2268 * exec_activate_image:EACCES Permission denied
2269 * exec_activate_image:EINTR Interrupted function
2270 * exec_activate_image:ENOMEM Not enough space
2271 * exec_activate_image:EFAULT Bad address
2272 * exec_activate_image:ENAMETOOLONG Filename too long
2273 * exec_activate_image:ENOEXEC Executable file format error
2274 * exec_activate_image:ETXTBSY Text file busy [misuse of error code]
2275 * exec_activate_image:EBADEXEC The executable is corrupt/unknown
2276 * exec_activate_image:???
2277 * mac_execve_enter:???
2279 * TODO: Dynamic linker header address on stack is copied via suword()
2282 __mac_execve(proc_t p
, struct __mac_execve_args
*uap
, int32_t *retval
)
2285 struct image_params
*imgp
;
2286 struct vnode_attr
*vap
;
2287 struct vnode_attr
*origvap
;
2289 char alt_p_comm
[sizeof(p
->p_comm
)] = {0}; /* for PowerPC */
2290 int is_64
= IS_64BIT_PROCESS(p
);
2291 struct vfs_context context
;
2293 context
.vc_thread
= current_thread();
2294 context
.vc_ucred
= kauth_cred_proc_ref(p
); /* XXX must NOT be kauth_cred_get() */
2296 /* Allocate a big chunk for locals instead of using stack since these
2297 * structures a pretty big.
2299 MALLOC(bufp
, char *, (sizeof(*imgp
) + sizeof(*vap
) + sizeof(*origvap
)), M_TEMP
, M_WAITOK
| M_ZERO
);
2300 imgp
= (struct image_params
*) bufp
;
2303 goto exit_with_error
;
2305 vap
= (struct vnode_attr
*) (bufp
+ sizeof(*imgp
));
2306 origvap
= (struct vnode_attr
*) (bufp
+ sizeof(*imgp
) + sizeof(*vap
));
2308 /* Initialize the common data in the image_params structure */
2309 imgp
->ip_user_fname
= uap
->fname
;
2310 imgp
->ip_user_argv
= uap
->argp
;
2311 imgp
->ip_user_envv
= uap
->envp
;
2312 imgp
->ip_vattr
= vap
;
2313 imgp
->ip_origvattr
= origvap
;
2314 imgp
->ip_vfs_context
= &context
;
2315 imgp
->ip_flags
= (is_64
? IMGPF_WAS_64BIT
: IMGPF_NONE
) | ((p
->p_flag
& P_DISABLE_ASLR
) ? IMGPF_DISABLE_ASLR
: IMGPF_NONE
);
2316 imgp
->ip_p_comm
= alt_p_comm
; /* for PowerPC */
2317 imgp
->ip_seg
= (is_64
? UIO_USERSPACE64
: UIO_USERSPACE32
);
2320 if (uap
->mac_p
!= USER_ADDR_NULL
) {
2321 error
= mac_execve_enter(uap
->mac_p
, imgp
);
2323 kauth_cred_unref(&context
.vc_ucred
);
2324 goto exit_with_error
;
2329 error
= exec_activate_image(imgp
);
2331 kauth_cred_unref(&context
.vc_ucred
);
2333 /* Image not claimed by any activator? */
2338 exec_resettextvp(p
, imgp
);
2339 error
= check_for_signature(p
, imgp
);
2341 if (imgp
->ip_vp
!= NULLVP
)
2342 vnode_put(imgp
->ip_vp
);
2343 if (imgp
->ip_strings
)
2344 execargs_free(imgp
);
2346 if (imgp
->ip_execlabelp
)
2347 mac_cred_label_free(imgp
->ip_execlabelp
);
2348 if (imgp
->ip_scriptlabelp
)
2349 mac_vnode_label_free(imgp
->ip_scriptlabelp
);
2352 struct uthread
*uthread
;
2354 /* Sever any extant thread affinity */
2355 thread_affinity_exec(current_thread());
2357 DTRACE_PROC(exec__success
);
2358 uthread
= get_bsdthread_info(current_thread());
2359 if (uthread
->uu_flag
& UT_VFORK
) {
2360 vfork_return(p
, retval
, p
->p_pid
);
2361 (void)thread_resume(imgp
->ip_new_thread
);
2364 DTRACE_PROC1(exec__failure
, int, error
);
2379 * Description: Copy a pointer in from user space to a user_addr_t in kernel
2380 * space, based on 32/64 bitness of the user space
2382 * Parameters: froma User space address
2383 * toptr Address of kernel space user_addr_t
2384 * ptr_size 4/8, based on 'froma' address space
2386 * Returns: 0 Success
2387 * EFAULT Bad 'froma'
2390 * *ptr_size Modified
2393 copyinptr(user_addr_t froma
, user_addr_t
*toptr
, int ptr_size
)
2397 if (ptr_size
== 4) {
2398 /* 64 bit value containing 32 bit address */
2401 error
= copyin(froma
, &i
, 4);
2402 *toptr
= CAST_USER_ADDR_T(i
); /* SAFE */
2404 error
= copyin(froma
, toptr
, 8);
2413 * Description: Copy a pointer out from a user_addr_t in kernel space to
2414 * user space, based on 32/64 bitness of the user space
2416 * Parameters: ua User space address to copy to
2417 * ptr Address of kernel space user_addr_t
2418 * ptr_size 4/8, based on 'ua' address space
2420 * Returns: 0 Success
2425 copyoutptr(user_addr_t ua
, user_addr_t ptr
, int ptr_size
)
2429 if (ptr_size
== 4) {
2430 /* 64 bit value containing 32 bit address */
2431 unsigned int i
= CAST_DOWN_EXPLICIT(unsigned int,ua
); /* SAFE */
2433 error
= copyout(&i
, ptr
, 4);
2435 error
= copyout(&ua
, ptr
, 8);
2442 * exec_copyout_strings
2444 * Copy out the strings segment to user space. The strings segment is put
2445 * on a preinitialized stack frame.
2447 * Parameters: struct image_params * the image parameter block
2448 * int * a pointer to the stack offset variable
2450 * Returns: 0 Success
2454 * (*stackp) The stack offset, modified
2456 * Note: The strings segment layout is backward, from the beginning
2457 * of the top of the stack to consume the minimal amount of
2458 * space possible; the returned stack pointer points to the
2459 * end of the area consumed (stacks grow downward).
2461 * argc is an int; arg[i] are pointers; env[i] are pointers;
2462 * the 0's are (void *)NULL's
2464 * The stack frame layout is:
2466 * +-------------+ <- p->user_stack
2507 * sp-> +-------------+
2509 * Although technically a part of the STRING AREA, we treat the PATH AREA as
2510 * a separate entity. This allows us to align the beginning of the PATH AREA
2511 * to a pointer boundary so that the exec_path, env[i], and argv[i] pointers
2512 * which preceed it on the stack are properly aligned.
2516 exec_copyout_strings(struct image_params
*imgp
, user_addr_t
*stackp
)
2518 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
2519 int ptr_size
= (imgp
->ip_flags
& IMGPF_IS_64BIT
) ? 8 : 4;
2521 void *ptr_buffer_start
, *ptr_buffer
;
2524 user_addr_t string_area
; /* *argv[], *env[] */
2525 user_addr_t ptr_area
; /* argv[], env[], applev[] */
2526 user_addr_t argc_area
; /* argc */
2531 struct copyout_desc
{
2535 user_addr_t
*dtrace_cookie
;
2537 boolean_t null_term
;
2540 .start_string
= imgp
->ip_startargv
,
2541 .count
= imgp
->ip_argc
,
2543 .dtrace_cookie
= &p
->p_dtrace_argv
,
2548 .start_string
= imgp
->ip_endargv
,
2549 .count
= imgp
->ip_envc
,
2551 .dtrace_cookie
= &p
->p_dtrace_envp
,
2556 .start_string
= imgp
->ip_strings
,
2559 .dtrace_cookie
= NULL
,
2564 .start_string
= imgp
->ip_endenvv
,
2565 .count
= imgp
->ip_applec
- 1, /* exec_path handled above */
2567 .dtrace_cookie
= NULL
,
2576 * All previous contributors to the string area
2577 * should have aligned their sub-area
2579 if (imgp
->ip_strspace
% ptr_size
!= 0) {
2584 /* Grow the stack down for the strings we've been building up */
2585 string_size
= imgp
->ip_strendp
- imgp
->ip_strings
;
2586 stack
-= string_size
;
2587 string_area
= stack
;
2590 * Need room for one pointer for each string, plus
2591 * one for the NULLs terminating the argv, envv, and apple areas.
2593 ptr_area_size
= (imgp
->ip_argc
+ imgp
->ip_envc
+ imgp
->ip_applec
+ 3) *
2595 stack
-= ptr_area_size
;
2598 /* We'll construct all the pointer arrays in our string buffer,
2599 * which we already know is aligned properly, and ip_argspace
2600 * was used to verify we have enough space.
2602 ptr_buffer_start
= ptr_buffer
= (void *)imgp
->ip_strendp
;
2605 * Need room for pointer-aligned argc slot.
2611 * Record the size of the arguments area so that sysctl_procargs()
2612 * can return the argument area without having to parse the arguments.
2615 p
->p_argc
= imgp
->ip_argc
;
2616 p
->p_argslen
= (int)(*stackp
- string_area
);
2619 /* Return the initial stack address: the location of argc */
2623 * Copy out the entire strings area.
2625 error
= copyout(imgp
->ip_strings
, string_area
,
2630 for (i
= 0; i
< sizeof(descriptors
)/sizeof(descriptors
[0]); i
++) {
2631 char *cur_string
= descriptors
[i
].start_string
;
2635 if (descriptors
[i
].dtrace_cookie
) {
2637 *descriptors
[i
].dtrace_cookie
= ptr_area
+ ((uintptr_t)ptr_buffer
- (uintptr_t)ptr_buffer_start
); /* dtrace convenience */
2640 #endif /* CONFIG_DTRACE */
2643 * For each segment (argv, envv, applev), copy as many pointers as requested
2644 * to our pointer buffer.
2646 for (j
= 0; j
< descriptors
[i
].count
; j
++) {
2647 user_addr_t cur_address
= string_area
+ (cur_string
- imgp
->ip_strings
);
2649 /* Copy out the pointer to the current string. Alignment has been verified */
2650 if (ptr_size
== 8) {
2651 *(uint64_t *)ptr_buffer
= (uint64_t)cur_address
;
2653 *(uint32_t *)ptr_buffer
= (uint32_t)cur_address
;
2656 ptr_buffer
= (void *)((uintptr_t)ptr_buffer
+ ptr_size
);
2657 cur_string
+= strlen(cur_string
) + 1; /* Only a NUL between strings in the same area */
2660 if (descriptors
[i
].null_term
) {
2661 if (ptr_size
== 8) {
2662 *(uint64_t *)ptr_buffer
= 0ULL;
2664 *(uint32_t *)ptr_buffer
= 0;
2667 ptr_buffer
= (void *)((uintptr_t)ptr_buffer
+ ptr_size
);
2672 * Copy out all our pointer arrays in bulk.
2674 error
= copyout(ptr_buffer_start
, ptr_area
,
2679 /* argc (int32, stored in a ptr_size area) */
2680 error
= copyoutptr((user_addr_t
)imgp
->ip_argc
, argc_area
, ptr_size
);
2690 * exec_extract_strings
2692 * Copy arguments and environment from user space into work area; we may
2693 * have already copied some early arguments into the work area, and if
2694 * so, any arguments opied in are appended to those already there.
2695 * This function is the primary manipulator of ip_argspace, since
2696 * these are the arguments the client of execve(2) knows about. After
2697 * each argv[]/envv[] string is copied, we charge the string length
2698 * and argv[]/envv[] pointer slot to ip_argspace, so that we can
2699 * full preflight the arg list size.
2701 * Parameters: struct image_params * the image parameter block
2703 * Returns: 0 Success
2707 * (imgp->ip_argc) Count of arguments, updated
2708 * (imgp->ip_envc) Count of environment strings, updated
2709 * (imgp->ip_argspace) Count of remaining of NCARGS
2710 * (imgp->ip_interp_buffer) Interpreter and args (mutated in place)
2713 * Note: The argument and environment vectors are user space pointers
2714 * to arrays of user space pointers.
2717 exec_extract_strings(struct image_params
*imgp
)
2720 int ptr_size
= (imgp
->ip_flags
& IMGPF_WAS_64BIT
) ? 8 : 4;
2721 int new_ptr_size
= (imgp
->ip_flags
& IMGPF_IS_64BIT
) ? 8 : 4;
2722 user_addr_t argv
= imgp
->ip_user_argv
;
2723 user_addr_t envv
= imgp
->ip_user_envv
;
2726 * Adjust space reserved for the path name by however much padding it
2727 * needs. Doing this here since we didn't know if this would be a 32-
2728 * or 64-bit process back in exec_save_path.
2730 while (imgp
->ip_strspace
% new_ptr_size
!= 0) {
2731 *imgp
->ip_strendp
++ = '\0';
2732 imgp
->ip_strspace
--;
2733 /* imgp->ip_argspace--; not counted towards exec args total */
2737 * From now on, we start attributing string space to ip_argspace
2739 imgp
->ip_startargv
= imgp
->ip_strendp
;
2742 if((imgp
->ip_flags
& IMGPF_INTERPRET
) != 0) {
2744 char *argstart
, *ch
;
2746 /* First, the arguments in the "#!" string are tokenized and extracted. */
2747 argstart
= imgp
->ip_interp_buffer
;
2750 while (*ch
&& !IS_WHITESPACE(*ch
)) {
2755 /* last argument, no need to NUL-terminate */
2756 error
= exec_add_user_string(imgp
, CAST_USER_ADDR_T(argstart
), UIO_SYSSPACE
, TRUE
);
2761 error
= exec_add_user_string(imgp
, CAST_USER_ADDR_T(argstart
), UIO_SYSSPACE
, TRUE
);
2764 * Find the next string. We know spaces at the end of the string have already
2768 while (IS_WHITESPACE(*argstart
)) {
2773 /* Error-check, regardless of whether this is the last interpreter arg or not */
2776 if (imgp
->ip_argspace
< new_ptr_size
) {
2780 imgp
->ip_argspace
-= new_ptr_size
; /* to hold argv[] entry */
2786 * If we are running an interpreter, replace the av[0] that was
2787 * passed to execve() with the path name that was
2788 * passed to execve() for interpreters which do not use the PATH
2789 * to locate their script arguments.
2791 error
= copyinptr(argv
, &arg
, ptr_size
);
2795 argv
+= ptr_size
; /* consume without using */
2799 if (imgp
->ip_interp_sugid_fd
!= -1) {
2800 char temp
[19]; /* "/dev/fd/" + 10 digits + NUL */
2801 snprintf(temp
, sizeof(temp
), "/dev/fd/%d", imgp
->ip_interp_sugid_fd
);
2802 error
= exec_add_user_string(imgp
, CAST_USER_ADDR_T(temp
), UIO_SYSSPACE
, TRUE
);
2804 error
= exec_add_user_string(imgp
, imgp
->ip_user_fname
, imgp
->ip_seg
, TRUE
);
2809 if (imgp
->ip_argspace
< new_ptr_size
) {
2813 imgp
->ip_argspace
-= new_ptr_size
; /* to hold argv[] entry */
2817 while (argv
!= 0LL) {
2820 error
= copyinptr(argv
, &arg
, ptr_size
);
2833 error
= exec_add_user_string(imgp
, arg
, imgp
->ip_seg
, TRUE
);
2836 if (imgp
->ip_argspace
< new_ptr_size
) {
2840 imgp
->ip_argspace
-= new_ptr_size
; /* to hold argv[] entry */
2844 /* Save space for argv[] NULL terminator */
2845 if (imgp
->ip_argspace
< new_ptr_size
) {
2849 imgp
->ip_argspace
-= new_ptr_size
;
2851 /* Note where the args ends and env begins. */
2852 imgp
->ip_endargv
= imgp
->ip_strendp
;
2855 /* Now, get the environment */
2856 while (envv
!= 0LL) {
2859 error
= copyinptr(envv
, &env
, ptr_size
);
2870 error
= exec_add_user_string(imgp
, env
, imgp
->ip_seg
, TRUE
);
2873 if (imgp
->ip_argspace
< new_ptr_size
) {
2877 imgp
->ip_argspace
-= new_ptr_size
; /* to hold envv[] entry */
2881 /* Save space for envv[] NULL terminator */
2882 if (imgp
->ip_argspace
< new_ptr_size
) {
2886 imgp
->ip_argspace
-= new_ptr_size
;
2888 /* Align the tail of the combined argv+envv area */
2889 while (imgp
->ip_strspace
% new_ptr_size
!= 0) {
2890 if (imgp
->ip_argspace
< 1) {
2894 *imgp
->ip_strendp
++ = '\0';
2895 imgp
->ip_strspace
--;
2896 imgp
->ip_argspace
--;
2899 /* Note where the envv ends and applev begins. */
2900 imgp
->ip_endenvv
= imgp
->ip_strendp
;
2903 * From now on, we are no longer charging argument
2904 * space to ip_argspace.
2912 random_hex_str(char *str
, int len
)
2914 uint64_t low
, high
, value
;
2918 /* A 64-bit value will only take 16 characters, plus '0x' and NULL. */
2922 /* We need enough room for at least 1 digit */
2928 value
= high
<< 32 | low
;
2932 for (idx
= 2; idx
< len
- 1; idx
++) {
2933 digit
= value
& 0xf;
2936 str
[idx
] = '0' + digit
;
2938 str
[idx
] = 'a' + (digit
- 10);
2945 * Libc has an 8-element array set up for stack guard values. It only fills
2946 * in one of those entries, and both gcc and llvm seem to use only a single
2947 * 8-byte guard. Until somebody needs more than an 8-byte guard value, don't
2948 * do the work to construct them.
2950 #define GUARD_VALUES 1
2951 #define GUARD_KEY "stack_guard="
2954 * System malloc needs some entropy when it is initialized.
2956 #define ENTROPY_VALUES 2
2957 #define ENTROPY_KEY "malloc_entropy="
2959 #define PFZ_KEY "pfz="
2960 extern user32_addr_t commpage_text32_location
;
2961 extern user64_addr_t commpage_text64_location
;
2963 * Build up the contents of the apple[] string vector
2966 exec_add_apple_strings(struct image_params
*imgp
)
2971 char guard_vec
[strlen(GUARD_KEY
) + 19 * GUARD_VALUES
+ 1];
2974 char entropy_vec
[strlen(ENTROPY_KEY
) + 19 * ENTROPY_VALUES
+ 1];
2976 char pfz_string
[strlen(PFZ_KEY
) + 16 + 4 +1];
2978 if( imgp
->ip_flags
& IMGPF_IS_64BIT
) {
2980 snprintf(pfz_string
, sizeof(pfz_string
),PFZ_KEY
"0x%llx",commpage_text64_location
);
2982 snprintf(pfz_string
, sizeof(pfz_string
),PFZ_KEY
"0x%x",commpage_text32_location
);
2985 /* exec_save_path stored the first string */
2986 imgp
->ip_applec
= 1;
2988 /* adding the pfz string */
2989 error
= exec_add_user_string(imgp
, CAST_USER_ADDR_T(pfz_string
),UIO_SYSSPACE
,FALSE
);
2995 * Supply libc with a collection of random values to use when
2996 * implementing -fstack-protector.
2998 (void)strlcpy(guard_vec
, GUARD_KEY
, sizeof (guard_vec
));
2999 for (i
= 0; i
< GUARD_VALUES
; i
++) {
3000 random_hex_str(guard
, sizeof (guard
));
3002 (void)strlcat(guard_vec
, ",", sizeof (guard_vec
));
3003 (void)strlcat(guard_vec
, guard
, sizeof (guard_vec
));
3006 error
= exec_add_user_string(imgp
, CAST_USER_ADDR_T(guard_vec
), UIO_SYSSPACE
, FALSE
);
3012 * Supply libc with entropy for system malloc.
3014 (void)strlcpy(entropy_vec
, ENTROPY_KEY
, sizeof(entropy_vec
));
3015 for (i
= 0; i
< ENTROPY_VALUES
; i
++) {
3016 random_hex_str(entropy
, sizeof (entropy
));
3018 (void)strlcat(entropy_vec
, ",", sizeof (entropy_vec
));
3019 (void)strlcat(entropy_vec
, entropy
, sizeof (entropy_vec
));
3022 error
= exec_add_user_string(imgp
, CAST_USER_ADDR_T(entropy_vec
), UIO_SYSSPACE
, FALSE
);
3027 /* Align the tail of the combined applev area */
3028 while (imgp
->ip_strspace
% new_ptr_size
!= 0) {
3029 *imgp
->ip_strendp
++ = '\0';
3030 imgp
->ip_strspace
--;
3037 #define unix_stack_size(p) (p->p_rlimit[RLIMIT_STACK].rlim_cur)
3040 * exec_check_permissions
3042 * Description: Verify that the file that is being attempted to be executed
3043 * is in fact allowed to be executed based on it POSIX file
3044 * permissions and other access control criteria
3046 * Parameters: struct image_params * the image parameter block
3048 * Returns: 0 Success
3049 * EACCES Permission denied
3050 * ENOEXEC Executable file format error
3051 * ETXTBSY Text file busy [misuse of error code]
3053 * vnode_authorize:???
3056 exec_check_permissions(struct image_params
*imgp
)
3058 struct vnode
*vp
= imgp
->ip_vp
;
3059 struct vnode_attr
*vap
= imgp
->ip_vattr
;
3060 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
3062 kauth_action_t action
;
3064 /* Only allow execution of regular files */
3065 if (!vnode_isreg(vp
))
3068 /* Get the file attributes that we will be using here and elsewhere */
3070 VATTR_WANTED(vap
, va_uid
);
3071 VATTR_WANTED(vap
, va_gid
);
3072 VATTR_WANTED(vap
, va_mode
);
3073 VATTR_WANTED(vap
, va_fsid
);
3074 VATTR_WANTED(vap
, va_fileid
);
3075 VATTR_WANTED(vap
, va_data_size
);
3076 if ((error
= vnode_getattr(vp
, vap
, imgp
->ip_vfs_context
)) != 0)
3080 * Ensure that at least one execute bit is on - otherwise root
3081 * will always succeed, and we don't want to happen unless the
3082 * file really is executable.
3084 if (!vfs_authopaque(vnode_mount(vp
)) && ((vap
->va_mode
& (S_IXUSR
| S_IXGRP
| S_IXOTH
)) == 0))
3087 /* Disallow zero length files */
3088 if (vap
->va_data_size
== 0)
3091 imgp
->ip_arch_offset
= (user_size_t
)0;
3092 imgp
->ip_arch_size
= vap
->va_data_size
;
3094 /* Disable setuid-ness for traced programs or if MNT_NOSUID */
3095 if ((vp
->v_mount
->mnt_flag
& MNT_NOSUID
) || (p
->p_lflag
& P_LTRACED
))
3096 vap
->va_mode
&= ~(VSUID
| VSGID
);
3099 error
= mac_vnode_check_exec(imgp
->ip_vfs_context
, vp
, imgp
);
3104 /* Check for execute permission */
3105 action
= KAUTH_VNODE_EXECUTE
;
3106 /* Traced images must also be readable */
3107 if (p
->p_lflag
& P_LTRACED
)
3108 action
|= KAUTH_VNODE_READ_DATA
;
3109 if ((error
= vnode_authorize(vp
, NULL
, action
, imgp
->ip_vfs_context
)) != 0)
3113 /* Don't let it run if anyone had it open for writing */
3115 if (vp
->v_writecount
) {
3116 panic("going to return ETXTBSY %x", vp
);
3124 /* XXX May want to indicate to underlying FS that vnode is open */
3133 * Initially clear the P_SUGID in the process flags; if an SUGID process is
3134 * exec'ing a non-SUGID image, then this is the point of no return.
3136 * If the image being activated is SUGID, then replace the credential with a
3137 * copy, disable tracing (unless the tracing process is root), reset the
3138 * mach task port to revoke it, set the P_SUGID bit,
3140 * If the saved user and group ID will be changing, then make sure it happens
3141 * to a new credential, rather than a shared one.
3143 * Set the security token (this is probably obsolete, given that the token
3144 * should not technically be separate from the credential itself).
3146 * Parameters: struct image_params * the image parameter block
3148 * Returns: void No failure indication
3151 * <process credential> Potentially modified/replaced
3152 * <task port> Potentially revoked
3153 * <process flags> P_SUGID bit potentially modified
3154 * <security token> Potentially modified
3157 exec_handle_sugid(struct image_params
*imgp
)
3159 kauth_cred_t cred
= vfs_context_ucred(imgp
->ip_vfs_context
);
3160 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
3162 int leave_sugid_clear
= 0;
3168 * Determine whether a call to update the MAC label will result in the
3169 * credential changing.
3171 * Note: MAC policies which do not actually end up modifying
3172 * the label subsequently are strongly encouraged to
3173 * return 0 for this check, since a non-zero answer will
3174 * slow down the exec fast path for normal binaries.
3176 mac_transition
= mac_cred_check_label_update_execve(
3177 imgp
->ip_vfs_context
,
3179 imgp
->ip_scriptlabelp
,
3180 imgp
->ip_execlabelp
, p
);
3183 OSBitAndAtomic(~((uint32_t)P_SUGID
), &p
->p_flag
);
3186 * Order of the following is important; group checks must go last,
3187 * as we use the success of the 'ismember' check combined with the
3188 * failure of the explicit match to indicate that we will be setting
3189 * the egid of the process even though the new process did not
3190 * require VSUID/VSGID bits in order for it to set the new group as
3193 * Note: Technically, by this we are implying a call to
3194 * setegid() in the new process, rather than implying
3195 * it used its VSGID bit to set the effective group,
3196 * even though there is no code in that process to make
3199 if (((imgp
->ip_origvattr
->va_mode
& VSUID
) != 0 &&
3200 kauth_cred_getuid(cred
) != imgp
->ip_origvattr
->va_uid
) ||
3201 ((imgp
->ip_origvattr
->va_mode
& VSGID
) != 0 &&
3202 ((kauth_cred_ismember_gid(cred
, imgp
->ip_origvattr
->va_gid
, &leave_sugid_clear
) || !leave_sugid_clear
) ||
3203 (kauth_cred_getgid(cred
) != imgp
->ip_origvattr
->va_gid
)))) {
3206 /* label for MAC transition and neither VSUID nor VSGID */
3207 handle_mac_transition
:
3211 * Replace the credential with a copy of itself if euid or
3214 * Note: setuid binaries will automatically opt out of
3215 * group resolver participation as a side effect
3216 * of this operation. This is an intentional
3217 * part of the security model, which requires a
3218 * participating credential be established by
3219 * escalating privilege, setting up all other
3220 * aspects of the credential including whether
3221 * or not to participate in external group
3222 * membership resolution, then dropping their
3223 * effective privilege to that of the desired
3224 * final credential state.
3226 if (imgp
->ip_origvattr
->va_mode
& VSUID
) {
3227 p
->p_ucred
= kauth_cred_setresuid(p
->p_ucred
, KAUTH_UID_NONE
, imgp
->ip_origvattr
->va_uid
, imgp
->ip_origvattr
->va_uid
, KAUTH_UID_NONE
);
3228 /* update cred on proc */
3229 PROC_UPDATE_CREDS_ONPROC(p
);
3231 if (imgp
->ip_origvattr
->va_mode
& VSGID
) {
3232 p
->p_ucred
= kauth_cred_setresgid(p
->p_ucred
, KAUTH_GID_NONE
, imgp
->ip_origvattr
->va_gid
, imgp
->ip_origvattr
->va_gid
);
3233 /* update cred on proc */
3234 PROC_UPDATE_CREDS_ONPROC(p
);
3239 * If a policy has indicated that it will transition the label,
3240 * before making the call into the MAC policies, get a new
3241 * duplicate credential, so they can modify it without
3242 * modifying any others sharing it.
3244 if (mac_transition
) {
3245 kauth_cred_t my_cred
;
3246 if (kauth_proc_label_update_execve(p
,
3247 imgp
->ip_vfs_context
,
3249 imgp
->ip_scriptlabelp
,
3250 imgp
->ip_execlabelp
)) {
3252 * If updating the MAC label resulted in a
3253 * disjoint credential, flag that we need to
3254 * set the P_SUGID bit. This protects
3255 * against debuggers being attached by an
3256 * insufficiently privileged process onto the
3257 * result of a transition to a more privileged
3260 leave_sugid_clear
= 0;
3263 my_cred
= kauth_cred_proc_ref(p
);
3264 mac_task_label_update_cred(my_cred
, p
->task
);
3265 kauth_cred_unref(&my_cred
);
3267 #endif /* CONFIG_MACF */
3270 * Have mach reset the task and thread ports.
3271 * We don't want anyone who had the ports before
3272 * a setuid exec to be able to access/control the
3273 * task/thread after.
3275 ipc_task_reset(p
->task
);
3276 ipc_thread_reset((imgp
->ip_new_thread
!= NULL
) ?
3277 imgp
->ip_new_thread
: current_thread());
3280 * If 'leave_sugid_clear' is non-zero, then we passed the
3281 * VSUID and MACF checks, and successfully determined that
3282 * the previous cred was a member of the VSGID group, but
3283 * that it was not the default at the time of the execve,
3284 * and that the post-labelling credential was not disjoint.
3285 * So we don't set the P_SUGID on the basis of simply
3286 * running this code.
3288 if (!leave_sugid_clear
)
3289 OSBitOrAtomic(P_SUGID
, &p
->p_flag
);
3292 * Radar 2261856; setuid security hole fix
3293 * XXX For setuid processes, attempt to ensure that
3294 * stdin, stdout, and stderr are already allocated.
3295 * We do not want userland to accidentally allocate
3296 * descriptors in this range which has implied meaning
3299 for (i
= 0; i
< 3; i
++) {
3301 if (p
->p_fd
->fd_ofiles
[i
] != NULL
)
3305 * Do the kernel equivalent of
3307 * (void) open("/dev/null", O_RDONLY);
3310 struct fileproc
*fp
;
3313 if ((error
= falloc(p
,
3314 &fp
, &indx
, imgp
->ip_vfs_context
)) != 0)
3317 struct nameidata nd1
;
3319 NDINIT(&nd1
, LOOKUP
, OP_OPEN
, FOLLOW
, UIO_SYSSPACE
,
3320 CAST_USER_ADDR_T("/dev/null"),
3321 imgp
->ip_vfs_context
);
3323 if ((error
= vn_open(&nd1
, FREAD
, 0)) != 0) {
3324 fp_free(p
, indx
, fp
);
3328 struct fileglob
*fg
= fp
->f_fglob
;
3330 fg
->fg_flag
= FREAD
;
3331 fg
->fg_type
= DTYPE_VNODE
;
3332 fg
->fg_ops
= &vnops
;
3333 fg
->fg_data
= nd1
.ni_vp
;
3335 vnode_put(nd1
.ni_vp
);
3338 procfdtbl_releasefd(p
, indx
, NULL
);
3339 fp_drop(p
, indx
, fp
, 1);
3346 * We are here because we were told that the MAC label will
3347 * be transitioned, and the binary is not VSUID or VSGID; to
3348 * deal with this case, we could either duplicate a lot of
3349 * code, or we can indicate we want to default the P_SUGID
3350 * bit clear and jump back up.
3352 if (mac_transition
) {
3353 leave_sugid_clear
= 1;
3354 goto handle_mac_transition
;
3357 #endif /* CONFIG_MACF */
3360 * Implement the semantic where the effective user and group become
3361 * the saved user and group in exec'ed programs.
3363 p
->p_ucred
= kauth_cred_setsvuidgid(p
->p_ucred
, kauth_cred_getuid(p
->p_ucred
), kauth_cred_getgid(p
->p_ucred
));
3364 /* update cred on proc */
3365 PROC_UPDATE_CREDS_ONPROC(p
);
3367 /* Update the process' identity version and set the security token */
3369 set_security_token(p
);
3378 * Description: Set the user stack address for the process to the provided
3379 * address. If a custom stack was not set as a result of the
3380 * load process (i.e. as specified by the image file for the
3381 * executable), then allocate the stack in the provided map and
3382 * set up appropriate guard pages for enforcing administrative
3383 * limits on stack growth, if they end up being needed.
3385 * Parameters: p Process to set stack on
3386 * load_result Information from mach-o load commands
3387 * map Address map in which to allocate the new stack
3389 * Returns: KERN_SUCCESS Stack successfully created
3390 * !KERN_SUCCESS Mach failure code
3392 static kern_return_t
3393 create_unix_stack(vm_map_t map
, load_result_t
* load_result
,
3396 mach_vm_size_t size
, prot_size
;
3397 mach_vm_offset_t addr
, prot_addr
;
3400 mach_vm_address_t user_stack
= load_result
->user_stack
;
3403 p
->user_stack
= user_stack
;
3406 if (!load_result
->prog_allocated_stack
) {
3408 * Allocate enough space for the maximum stack size we
3409 * will ever authorize and an extra page to act as
3410 * a guard page for stack overflows. For default stacks,
3411 * vm_initial_limit_stack takes care of the extra guard page.
3412 * Otherwise we must allocate it ourselves.
3415 size
= mach_vm_round_page(load_result
->user_stack_size
);
3416 if (load_result
->prog_stack_size
)
3418 addr
= mach_vm_trunc_page(load_result
->user_stack
- size
);
3419 kr
= mach_vm_allocate(map
, &addr
, size
,
3420 VM_MAKE_TAG(VM_MEMORY_STACK
) |
3422 if (kr
!= KERN_SUCCESS
) {
3423 /* If can't allocate at default location, try anywhere */
3425 kr
= mach_vm_allocate(map
, &addr
, size
,
3426 VM_MAKE_TAG(VM_MEMORY_STACK
) |
3428 if (kr
!= KERN_SUCCESS
)
3431 user_stack
= addr
+ size
;
3432 load_result
->user_stack
= user_stack
;
3435 p
->user_stack
= user_stack
;
3440 * And prevent access to what's above the current stack
3441 * size limit for this process.
3444 if (load_result
->prog_stack_size
)
3445 prot_size
= PAGE_SIZE
;
3447 prot_size
= mach_vm_trunc_page(size
- unix_stack_size(p
));
3448 kr
= mach_vm_protect(map
,
3453 if (kr
!= KERN_SUCCESS
) {
3454 (void) mach_vm_deallocate(map
, addr
, size
);
3459 return KERN_SUCCESS
;
3462 #include <sys/reboot.h>
3464 static char init_program_name
[128] = "/sbin/launchd";
3466 struct execve_args init_exec_args
;
3471 * Description: Load the "init" program; in most cases, this will be "launchd"
3473 * Parameters: p Process to call execve() to create
3474 * the "init" program
3478 * Notes: The process that is passed in is the first manufactured
3479 * process on the system, and gets here via bsd_ast() firing
3480 * for the first time. This is done to ensure that bsd_init()
3481 * has run to completion.
3484 load_init_program(proc_t p
)
3486 vm_offset_t init_addr
;
3493 * Copy out program name.
3496 init_addr
= VM_MIN_ADDRESS
;
3497 (void) vm_allocate(current_map(), &init_addr
, PAGE_SIZE
,
3502 (void) copyout((caddr_t
) init_program_name
, CAST_USER_ADDR_T(init_addr
),
3503 (unsigned) sizeof(init_program_name
)+1);
3505 argv
[argc
++] = (uint32_t)init_addr
;
3506 init_addr
+= sizeof(init_program_name
);
3507 init_addr
= (vm_offset_t
)ROUND_PTR(char, init_addr
);
3510 * Put out first (and only) argument, similarly.
3511 * Assumes everything fits in a page as allocated
3514 if (boothowto
& RB_SINGLE
) {
3515 const char *init_args
= "-s";
3517 copyout(init_args
, CAST_USER_ADDR_T(init_addr
),
3520 argv
[argc
++] = (uint32_t)init_addr
;
3521 init_addr
+= strlen(init_args
);
3522 init_addr
= (vm_offset_t
)ROUND_PTR(char, init_addr
);
3527 * Null-end the argument list
3532 * Copy out the argument list.
3535 (void) copyout((caddr_t
) argv
, CAST_USER_ADDR_T(init_addr
),
3536 (unsigned) sizeof(argv
));
3539 * Set up argument block for fake call to execve.
3542 init_exec_args
.fname
= CAST_USER_ADDR_T(argv
[0]);
3543 init_exec_args
.argp
= CAST_USER_ADDR_T((char **)init_addr
);
3544 init_exec_args
.envp
= CAST_USER_ADDR_T(0);
3547 * So that mach_init task is set with uid,gid 0 token
3549 set_security_token(p
);
3551 error
= execve(p
,&init_exec_args
,retval
);
3553 panic("Process 1 exec of %s failed, errno %d",
3554 init_program_name
, error
);
3558 * load_return_to_errno
3560 * Description: Convert a load_return_t (Mach error) to an errno (BSD error)
3562 * Parameters: lrtn Mach error number
3564 * Returns: (int) BSD error number
3566 * EBADARCH Bad architecture
3567 * EBADMACHO Bad Mach object file
3568 * ESHLIBVERS Bad shared library version
3569 * ENOMEM Out of memory/resource shortage
3570 * EACCES Access denied
3571 * ENOENT Entry not found (usually "file does
3573 * EIO An I/O error occurred
3574 * EBADEXEC The executable is corrupt/unknown
3577 load_return_to_errno(load_return_t lrtn
)
3603 #include <mach/mach_types.h>
3604 #include <mach/vm_prot.h>
3605 #include <mach/semaphore.h>
3606 #include <mach/sync_policy.h>
3607 #include <kern/clock.h>
3608 #include <mach/kern_return.h>
3613 * Description: Allocate the block of memory used by the execve arguments.
3614 * At the same time, we allocate a page so that we can read in
3615 * the first page of the image.
3617 * Parameters: struct image_params * the image parameter block
3619 * Returns: 0 Success
3620 * EINVAL Invalid argument
3621 * EACCES Permission denied
3622 * EINTR Interrupted function
3623 * ENOMEM Not enough space
3625 * Notes: This is a temporary allocation into the kernel address space
3626 * to enable us to copy arguments in from user space. This is
3627 * necessitated by not mapping the process calling execve() into
3628 * the kernel address space during the execve() system call.
3630 * We assemble the argument and environment, etc., into this
3631 * region before copying it as a single block into the child
3632 * process address space (at the top or bottom of the stack,
3633 * depending on which way the stack grows; see the function
3634 * exec_copyout_strings() for details).
3636 * This ends up with a second (possibly unnecessary) copy compared
3637 * with assembing the data directly into the child address space,
3638 * instead, but since we cannot be guaranteed that the parent has
3639 * not modified its environment, we can't really know that it's
3640 * really a block there as well.
3644 static int execargs_waiters
= 0;
3645 lck_mtx_t
*execargs_cache_lock
;
3648 execargs_lock_lock(void) {
3649 lck_mtx_lock_spin(execargs_cache_lock
);
3653 execargs_lock_unlock(void) {
3654 lck_mtx_unlock(execargs_cache_lock
);
3658 execargs_lock_sleep(void) {
3659 lck_mtx_sleep(execargs_cache_lock
, LCK_SLEEP_DEFAULT
, &execargs_free_count
, THREAD_UNINT
);
3662 static kern_return_t
3663 execargs_purgeable_allocate(char **execarg_address
) {
3664 kern_return_t kr
= vm_allocate(bsd_pageable_map
, (vm_offset_t
*)execarg_address
, BSD_PAGEABLE_SIZE_PER_EXEC
, VM_FLAGS_ANYWHERE
| VM_FLAGS_PURGABLE
);
3665 assert(kr
== KERN_SUCCESS
);
3669 static kern_return_t
3670 execargs_purgeable_reference(void *execarg_address
) {
3671 int state
= VM_PURGABLE_NONVOLATILE
;
3672 kern_return_t kr
= vm_purgable_control(bsd_pageable_map
, (vm_offset_t
) execarg_address
, VM_PURGABLE_SET_STATE
, &state
);
3674 assert(kr
== KERN_SUCCESS
);
3678 static kern_return_t
3679 execargs_purgeable_volatilize(void *execarg_address
) {
3680 int state
= VM_PURGABLE_VOLATILE
| VM_PURGABLE_ORDERING_OBSOLETE
;
3682 kr
= vm_purgable_control(bsd_pageable_map
, (vm_offset_t
) execarg_address
, VM_PURGABLE_SET_STATE
, &state
);
3684 assert(kr
== KERN_SUCCESS
);
3690 execargs_wakeup_waiters(void) {
3691 thread_wakeup(&execargs_free_count
);
3695 execargs_alloc(struct image_params
*imgp
)
3698 int i
, cache_index
= -1;
3700 execargs_lock_lock();
3702 while (execargs_free_count
== 0) {
3704 execargs_lock_sleep();
3708 execargs_free_count
--;
3710 for (i
= 0; i
< execargs_cache_size
; i
++) {
3711 vm_offset_t element
= execargs_cache
[i
];
3714 imgp
->ip_strings
= (char *)(execargs_cache
[i
]);
3715 execargs_cache
[i
] = 0;
3720 assert(execargs_free_count
>= 0);
3722 execargs_lock_unlock();
3724 if (cache_index
== -1) {
3725 kret
= execargs_purgeable_allocate(&imgp
->ip_strings
);
3728 kret
= execargs_purgeable_reference(imgp
->ip_strings
);
3730 assert(kret
== KERN_SUCCESS
);
3731 if (kret
!= KERN_SUCCESS
) {
3735 /* last page used to read in file headers */
3736 imgp
->ip_vdata
= imgp
->ip_strings
+ ( NCARGS
+ PAGE_SIZE
);
3737 imgp
->ip_strendp
= imgp
->ip_strings
;
3738 imgp
->ip_argspace
= NCARGS
;
3739 imgp
->ip_strspace
= ( NCARGS
+ PAGE_SIZE
);
3747 * Description: Free the block of memory used by the execve arguments and the
3748 * first page of the executable by a previous call to the function
3751 * Parameters: struct image_params * the image parameter block
3753 * Returns: 0 Success
3754 * EINVAL Invalid argument
3755 * EINTR Oeration interrupted
3758 execargs_free(struct image_params
*imgp
)
3762 boolean_t needs_wakeup
= FALSE
;
3764 kret
= execargs_purgeable_volatilize(imgp
->ip_strings
);
3766 execargs_lock_lock();
3767 execargs_free_count
++;
3769 for (i
= 0; i
< execargs_cache_size
; i
++) {
3770 vm_offset_t element
= execargs_cache
[i
];
3772 execargs_cache
[i
] = (vm_offset_t
) imgp
->ip_strings
;
3773 imgp
->ip_strings
= NULL
;
3778 assert(imgp
->ip_strings
== NULL
);
3780 if (execargs_waiters
> 0)
3781 needs_wakeup
= TRUE
;
3783 execargs_lock_unlock();
3785 if (needs_wakeup
== TRUE
)
3786 execargs_wakeup_waiters();
3788 return ((kret
== KERN_SUCCESS
? 0 : EINVAL
));
3792 exec_resettextvp(proc_t p
, struct image_params
*imgp
)
3796 vnode_t tvp
= p
->p_textvp
;
3800 offset
= imgp
->ip_arch_offset
;
3803 panic("exec_resettextvp: expected valid vp");
3805 ret
= vnode_ref(vp
);
3809 p
->p_textoff
= offset
;
3811 p
->p_textvp
= NULLVP
; /* this is paranoia */
3816 if ( tvp
!= NULLVP
) {
3817 if (vnode_getwithref(tvp
) == 0) {
3826 check_for_signature(proc_t p
, struct image_params
*imgp
)
3830 mach_port_t port
= NULL
;
3831 kern_return_t kr
= KERN_FAILURE
;
3833 unsigned char hash
[SHA1_RESULTLEN
];
3836 * Override inherited code signing flags with the
3837 * ones for the process that is being successfully
3841 p
->p_csflags
= imgp
->ip_csflags
;
3844 /* Set the switch_protect flag on the map */
3845 if(p
->p_csflags
& (CS_HARD
|CS_KILL
)) {
3846 vm_map_switch_protect(get_task_map(p
->task
), TRUE
);
3849 /* If the process is not signed or if it contains
3850 * entitlements, we need to communicate through the
3851 * task_access_port to taskgated. taskgated will provide a
3852 * detached code signature if present, and will enforce any
3853 * restrictions on entitlements. taskgated returns
3854 * KERN_SUCCESS if it has completed its work and the exec
3855 * should continue, or KERN_FAILURE if the exec should fail.
3857 error
= cs_entitlements_blob_get(p
, &blob
, &length
);
3859 /* if signed and no entitlements, then we're done here */
3860 if ((p
->p_csflags
& CS_VALID
) && NULL
== blob
) {
3865 kr
= task_get_task_access_port(p
->task
, &port
);
3866 if (KERN_SUCCESS
!= kr
|| !IPC_PORT_VALID(port
)) {
3868 #if !CONFIG_EMBEDDED
3869 /* fatal on the desktop when entitlements are present */
3876 kr
= find_code_signature(port
, p
->p_pid
);
3877 if (KERN_SUCCESS
!= kr
) {
3882 /* Only do this if exec_resettextvp() did not fail */
3883 if (p
->p_textvp
!= NULLVP
) {
3885 * If there's a new code directory, mark this process
3888 if (0 == ubc_cs_getcdhash(p
->p_textvp
, p
->p_textoff
, hash
)) {
3890 p
->p_csflags
|= CS_VALID
;
3897 /* make very sure execution fails */
3898 psignal(p
, SIGKILL
);
3903 * Typically as soon as we start executing this process, the
3904 * first instruction will trigger a VM fault to bring the text
3905 * pages (as executable) into the address space, followed soon
3906 * thereafter by dyld data structures (for dynamic executable).
3907 * To optimize this, as well as improve support for hardware
3908 * debuggers that can only access resident pages present
3909 * in the process' page tables, we prefault some pages if
3910 * possible. Errors are non-fatal.
3912 static void exec_prefault_data(proc_t p __unused
, struct image_params
*imgp
, load_result_t
*load_result
)
3915 size_t expected_all_image_infos_size
;
3918 * Prefault executable or dyld entry point.
3920 vm_fault( current_map(),
3921 vm_map_trunc_page(load_result
->entry_point
),
3922 VM_PROT_READ
| VM_PROT_EXECUTE
,
3924 THREAD_UNINT
, NULL
, 0);
3926 if (imgp
->ip_flags
& IMGPF_IS_64BIT
) {
3927 expected_all_image_infos_size
= sizeof(struct user64_dyld_all_image_infos
);
3929 expected_all_image_infos_size
= sizeof(struct user32_dyld_all_image_infos
);
3932 /* Decode dyld anchor structure from <mach-o/dyld_images.h> */
3933 if (load_result
->dynlinker
&&
3934 load_result
->all_image_info_addr
&&
3935 load_result
->all_image_info_size
>= expected_all_image_infos_size
) {
3937 struct user64_dyld_all_image_infos infos64
;
3938 struct user32_dyld_all_image_infos infos32
;
3942 * Pre-fault to avoid copyin() going through the trap handler
3943 * and recovery path.
3945 vm_fault( current_map(),
3946 vm_map_trunc_page(load_result
->all_image_info_addr
),
3947 VM_PROT_READ
| VM_PROT_WRITE
,
3949 THREAD_UNINT
, NULL
, 0);
3950 if ((load_result
->all_image_info_addr
& PAGE_MASK
) + expected_all_image_infos_size
> PAGE_SIZE
) {
3951 /* all_image_infos straddles a page */
3952 vm_fault( current_map(),
3953 vm_map_trunc_page(load_result
->all_image_info_addr
+ expected_all_image_infos_size
- 1),
3954 VM_PROT_READ
| VM_PROT_WRITE
,
3956 THREAD_UNINT
, NULL
, 0);
3959 ret
= copyin(load_result
->all_image_info_addr
,
3961 expected_all_image_infos_size
);
3962 if (ret
== 0 && all_image_infos
.infos32
.version
>= 9) {
3964 user_addr_t notification_address
;
3965 user_addr_t dyld_image_address
;
3966 user_addr_t dyld_version_address
;
3967 user_addr_t dyld_all_image_infos_address
;
3968 user_addr_t dyld_slide_amount
;
3970 if (imgp
->ip_flags
& IMGPF_IS_64BIT
) {
3971 notification_address
= all_image_infos
.infos64
.notification
;
3972 dyld_image_address
= all_image_infos
.infos64
.dyldImageLoadAddress
;
3973 dyld_version_address
= all_image_infos
.infos64
.dyldVersion
;
3974 dyld_all_image_infos_address
= all_image_infos
.infos64
.dyldAllImageInfosAddress
;
3976 notification_address
= all_image_infos
.infos32
.notification
;
3977 dyld_image_address
= all_image_infos
.infos32
.dyldImageLoadAddress
;
3978 dyld_version_address
= all_image_infos
.infos32
.dyldVersion
;
3979 dyld_all_image_infos_address
= all_image_infos
.infos32
.dyldAllImageInfosAddress
;
3983 * dyld statically sets up the all_image_infos in its Mach-O
3984 * binary at static link time, with pointers relative to its default
3985 * load address. Since ASLR might slide dyld before its first
3986 * instruction is executed, "dyld_slide_amount" tells us how far
3987 * dyld was loaded compared to its default expected load address.
3988 * All other pointers into dyld's image should be adjusted by this
3989 * amount. At some point later, dyld will fix up pointers to take
3990 * into account the slide, at which point the all_image_infos_address
3991 * field in the structure will match the runtime load address, and
3992 * "dyld_slide_amount" will be 0, if we were to consult it again.
3995 dyld_slide_amount
= load_result
->all_image_info_addr
- dyld_all_image_infos_address
;
3998 kprintf("exec_prefault: 0x%016llx 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
3999 (uint64_t)load_result
->all_image_info_addr
,
4000 all_image_infos
.infos32
.version
,
4001 (uint64_t)notification_address
,
4002 (uint64_t)dyld_image_address
,
4003 (uint64_t)dyld_version_address
,
4004 (uint64_t)dyld_all_image_infos_address
);
4007 vm_fault( current_map(),
4008 vm_map_trunc_page(notification_address
+ dyld_slide_amount
),
4009 VM_PROT_READ
| VM_PROT_EXECUTE
,
4011 THREAD_UNINT
, NULL
, 0);
4012 vm_fault( current_map(),
4013 vm_map_trunc_page(dyld_image_address
+ dyld_slide_amount
),
4014 VM_PROT_READ
| VM_PROT_EXECUTE
,
4016 THREAD_UNINT
, NULL
, 0);
4017 vm_fault( current_map(),
4018 vm_map_trunc_page(dyld_version_address
+ dyld_slide_amount
),
4021 THREAD_UNINT
, NULL
, 0);
4022 vm_fault( current_map(),
4023 vm_map_trunc_page(dyld_all_image_infos_address
+ dyld_slide_amount
),
4024 VM_PROT_READ
| VM_PROT_WRITE
,
4026 THREAD_UNINT
, NULL
, 0);