]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_exec.c
xnu-4570.31.3.tar.gz
[apple/xnu.git] / bsd / kern / kern_exec.c
1 /*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Mach Operating System
31 * Copyright (c) 1987 Carnegie-Mellon University
32 * All rights reserved. The CMU software License Agreement specifies
33 * the terms and conditions for use and redistribution.
34 */
35
36 /*-
37 * Copyright (c) 1982, 1986, 1991, 1993
38 * The Regents of the University of California. All rights reserved.
39 * (c) UNIX System Laboratories, Inc.
40 * All or some portions of this file are derived from material licensed
41 * to the University of California by American Telephone and Telegraph
42 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
43 * the permission of UNIX System Laboratories, Inc.
44 *
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 * 3. All advertising materials mentioning features or use of this software
54 * must display the following acknowledgement:
55 * This product includes software developed by the University of
56 * California, Berkeley and its contributors.
57 * 4. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * from: @(#)kern_exec.c 8.1 (Berkeley) 6/10/93
74 */
75 /*
76 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
77 * support for mandatory and extensible security protections. This notice
78 * is included in support of clause 2.2 (b) of the Apple Public License,
79 * Version 2.0.
80 */
81 #include <machine/reg.h>
82 #include <machine/cpu_capabilities.h>
83
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/filedesc.h>
87 #include <sys/kernel.h>
88 #include <sys/proc_internal.h>
89 #include <sys/kauth.h>
90 #include <sys/user.h>
91 #include <sys/socketvar.h>
92 #include <sys/malloc.h>
93 #include <sys/namei.h>
94 #include <sys/mount_internal.h>
95 #include <sys/vnode_internal.h>
96 #include <sys/file_internal.h>
97 #include <sys/stat.h>
98 #include <sys/uio_internal.h>
99 #include <sys/acct.h>
100 #include <sys/exec.h>
101 #include <sys/kdebug.h>
102 #include <sys/signal.h>
103 #include <sys/aio_kern.h>
104 #include <sys/sysproto.h>
105 #include <sys/persona.h>
106 #include <sys/reason.h>
107 #if SYSV_SHM
108 #include <sys/shm_internal.h> /* shmexec() */
109 #endif
110 #include <sys/ubc_internal.h> /* ubc_map() */
111 #include <sys/spawn.h>
112 #include <sys/spawn_internal.h>
113 #include <sys/process_policy.h>
114 #include <sys/codesign.h>
115 #include <sys/random.h>
116 #include <crypto/sha1.h>
117
118 #include <libkern/libkern.h>
119
120 #include <security/audit/audit.h>
121
122 #include <ipc/ipc_types.h>
123
124 #include <mach/mach_types.h>
125 #include <mach/port.h>
126 #include <mach/task.h>
127 #include <mach/task_access.h>
128 #include <mach/thread_act.h>
129 #include <mach/vm_map.h>
130 #include <mach/mach_vm.h>
131 #include <mach/vm_param.h>
132
133 #include <kern/sched_prim.h> /* thread_wakeup() */
134 #include <kern/affinity.h>
135 #include <kern/assert.h>
136 #include <kern/task.h>
137 #include <kern/coalition.h>
138 #include <kern/policy_internal.h>
139 #include <kern/kalloc.h>
140
141 #include <os/log.h>
142
143 #if CONFIG_MACF
144 #include <security/mac_framework.h>
145 #include <security/mac_mach_internal.h>
146 #endif
147
148 #include <vm/vm_map.h>
149 #include <vm/vm_kern.h>
150 #include <vm/vm_protos.h>
151 #include <vm/vm_kern.h>
152 #include <vm/vm_fault.h>
153 #include <vm/vm_pageout.h>
154
155 #include <kdp/kdp_dyld.h>
156
157 #include <machine/pal_routines.h>
158
159 #include <pexpert/pexpert.h>
160
161 #if CONFIG_MEMORYSTATUS
162 #include <sys/kern_memorystatus.h>
163 #endif
164
165 #if CONFIG_DTRACE
166 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
167 extern void dtrace_proc_exec(proc_t);
168 extern void (*dtrace_proc_waitfor_exec_ptr)(proc_t);
169
170 /*
171 * Since dtrace_proc_waitfor_exec_ptr can be added/removed in dtrace_subr.c,
172 * we will store its value before actually calling it.
173 */
174 static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL;
175
176 #include <sys/dtrace_ptss.h>
177 #endif
178
179 /* support for child creation in exec after vfork */
180 thread_t fork_create_child(task_t parent_task, coalition_t *parent_coalition, proc_t child_proc, int inherit_memory, int is64bit, int in_exec);
181 void vfork_exit(proc_t p, int rv);
182 extern void proc_apply_task_networkbg_internal(proc_t, thread_t);
183 extern void task_set_did_exec_flag(task_t task);
184 extern void task_clear_exec_copy_flag(task_t task);
185 proc_t proc_exec_switch_task(proc_t p, task_t old_task, task_t new_task, thread_t new_thread);
186 boolean_t task_is_active(task_t);
187 boolean_t thread_is_active(thread_t thread);
188 void thread_copy_resource_info(thread_t dst_thread, thread_t src_thread);
189 void *ipc_importance_exec_switch_task(task_t old_task, task_t new_task);
190 extern void ipc_importance_release(void *elem);
191
192 /*
193 * Mach things for which prototypes are unavailable from Mach headers
194 */
195 void ipc_task_reset(
196 task_t task);
197 void ipc_thread_reset(
198 thread_t thread);
199 kern_return_t ipc_object_copyin(
200 ipc_space_t space,
201 mach_port_name_t name,
202 mach_msg_type_name_t msgt_name,
203 ipc_object_t *objectp);
204 void ipc_port_release_send(ipc_port_t);
205
206 #if DEVELOPMENT || DEBUG
207 void task_importance_update_owner_info(task_t);
208 #endif
209
210 extern struct savearea *get_user_regs(thread_t);
211
212 __attribute__((noinline)) int __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(mach_port_t task_access_port, int32_t new_pid);
213
214 #include <kern/thread.h>
215 #include <kern/task.h>
216 #include <kern/ast.h>
217 #include <kern/mach_loader.h>
218 #include <kern/mach_fat.h>
219 #include <mach-o/fat.h>
220 #include <mach-o/loader.h>
221 #include <machine/vmparam.h>
222 #include <sys/imgact.h>
223
224 #include <sys/sdt.h>
225
226
227 /*
228 * EAI_ITERLIMIT The maximum number of times to iterate an image
229 * activator in exec_activate_image() before treating
230 * it as malformed/corrupt.
231 */
232 #define EAI_ITERLIMIT 3
233
234 /*
235 * For #! interpreter parsing
236 */
237 #define IS_WHITESPACE(ch) ((ch == ' ') || (ch == '\t'))
238 #define IS_EOL(ch) ((ch == '#') || (ch == '\n'))
239
240 extern vm_map_t bsd_pageable_map;
241 extern const struct fileops vnops;
242
243 #define USER_ADDR_ALIGN(addr, val) \
244 ( ( (user_addr_t)(addr) + (val) - 1) \
245 & ~((val) - 1) )
246
247 /* Platform Code Exec Logging */
248 static int platform_exec_logging = 0;
249
250 SYSCTL_DECL(_security_mac);
251
252 SYSCTL_INT(_security_mac, OID_AUTO, platform_exec_logging, CTLFLAG_RW, &platform_exec_logging, 0,
253 "log cdhashes for all platform binary executions");
254
255 static os_log_t peLog = OS_LOG_DEFAULT;
256
257 struct image_params; /* Forward */
258 static int exec_activate_image(struct image_params *imgp);
259 static int exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp);
260 static int load_return_to_errno(load_return_t lrtn);
261 static int execargs_alloc(struct image_params *imgp);
262 static int execargs_free(struct image_params *imgp);
263 static int exec_check_permissions(struct image_params *imgp);
264 static int exec_extract_strings(struct image_params *imgp);
265 static int exec_add_apple_strings(struct image_params *imgp, const load_result_t *load_result);
266 static int exec_handle_sugid(struct image_params *imgp);
267 static int sugid_scripts = 0;
268 SYSCTL_INT (_kern, OID_AUTO, sugid_scripts, CTLFLAG_RW | CTLFLAG_LOCKED, &sugid_scripts, 0, "");
269 static kern_return_t create_unix_stack(vm_map_t map, load_result_t* load_result, proc_t p);
270 static int copyoutptr(user_addr_t ua, user_addr_t ptr, int ptr_size);
271 static void exec_resettextvp(proc_t, struct image_params *);
272 static int check_for_signature(proc_t, struct image_params *);
273 static void exec_prefault_data(proc_t, struct image_params *, load_result_t *);
274 static errno_t exec_handle_port_actions(struct image_params *imgp, boolean_t * portwatch_present, ipc_port_t * portwatch_ports);
275 static errno_t exec_handle_spawnattr_policy(proc_t p, int psa_apptype, uint64_t psa_qos_clamp, uint64_t psa_darwin_role,
276 ipc_port_t * portwatch_ports, int portwatch_count);
277
278 /*
279 * exec_add_user_string
280 *
281 * Add the requested string to the string space area.
282 *
283 * Parameters; struct image_params * image parameter block
284 * user_addr_t string to add to strings area
285 * int segment from which string comes
286 * boolean_t TRUE if string contributes to NCARGS
287 *
288 * Returns: 0 Success
289 * !0 Failure errno from copyinstr()
290 *
291 * Implicit returns:
292 * (imgp->ip_strendp) updated location of next add, if any
293 * (imgp->ip_strspace) updated byte count of space remaining
294 * (imgp->ip_argspace) updated byte count of space in NCARGS
295 */
296 static int
297 exec_add_user_string(struct image_params *imgp, user_addr_t str, int seg, boolean_t is_ncargs)
298 {
299 int error = 0;
300
301 do {
302 size_t len = 0;
303 int space;
304
305 if (is_ncargs)
306 space = imgp->ip_argspace; /* by definition smaller than ip_strspace */
307 else
308 space = imgp->ip_strspace;
309
310 if (space <= 0) {
311 error = E2BIG;
312 break;
313 }
314
315 if (!UIO_SEG_IS_USER_SPACE(seg)) {
316 char *kstr = CAST_DOWN(char *,str); /* SAFE */
317 error = copystr(kstr, imgp->ip_strendp, space, &len);
318 } else {
319 error = copyinstr(str, imgp->ip_strendp, space, &len);
320 }
321
322 imgp->ip_strendp += len;
323 imgp->ip_strspace -= len;
324 if (is_ncargs)
325 imgp->ip_argspace -= len;
326
327 } while (error == ENAMETOOLONG);
328
329 return error;
330 }
331
332 /*
333 * dyld is now passed the executable path as a getenv-like variable
334 * in the same fashion as the stack_guard and malloc_entropy keys.
335 */
336 #define EXECUTABLE_KEY "executable_path="
337
338 /*
339 * exec_save_path
340 *
341 * To support new app package launching for Mac OS X, the dyld needs the
342 * first argument to execve() stored on the user stack.
343 *
344 * Save the executable path name at the bottom of the strings area and set
345 * the argument vector pointer to the location following that to indicate
346 * the start of the argument and environment tuples, setting the remaining
347 * string space count to the size of the string area minus the path length.
348 *
349 * Parameters; struct image_params * image parameter block
350 * char * path used to invoke program
351 * int segment from which path comes
352 *
353 * Returns: int 0 Success
354 * EFAULT Bad address
355 * copy[in]str:EFAULT Bad address
356 * copy[in]str:ENAMETOOLONG Filename too long
357 *
358 * Implicit returns:
359 * (imgp->ip_strings) saved path
360 * (imgp->ip_strspace) space remaining in ip_strings
361 * (imgp->ip_strendp) start of remaining copy area
362 * (imgp->ip_argspace) space remaining of NCARGS
363 * (imgp->ip_applec) Initial applev[0]
364 *
365 * Note: We have to do this before the initial namei() since in the
366 * path contains symbolic links, namei() will overwrite the
367 * original path buffer contents. If the last symbolic link
368 * resolved was a relative pathname, we would lose the original
369 * "path", which could be an absolute pathname. This might be
370 * unacceptable for dyld.
371 */
372 static int
373 exec_save_path(struct image_params *imgp, user_addr_t path, int seg, const char **excpath)
374 {
375 int error;
376 size_t len;
377 char *kpath;
378
379 // imgp->ip_strings can come out of a cache, so we need to obliterate the
380 // old path.
381 memset(imgp->ip_strings, '\0', strlen(EXECUTABLE_KEY) + MAXPATHLEN);
382
383 len = MIN(MAXPATHLEN, imgp->ip_strspace);
384
385 switch(seg) {
386 case UIO_USERSPACE32:
387 case UIO_USERSPACE64: /* Same for copyin()... */
388 error = copyinstr(path, imgp->ip_strings + strlen(EXECUTABLE_KEY), len, &len);
389 break;
390 case UIO_SYSSPACE:
391 kpath = CAST_DOWN(char *,path); /* SAFE */
392 error = copystr(kpath, imgp->ip_strings + strlen(EXECUTABLE_KEY), len, &len);
393 break;
394 default:
395 error = EFAULT;
396 break;
397 }
398
399 if (!error) {
400 bcopy(EXECUTABLE_KEY, imgp->ip_strings, strlen(EXECUTABLE_KEY));
401 len += strlen(EXECUTABLE_KEY);
402
403 imgp->ip_strendp += len;
404 imgp->ip_strspace -= len;
405
406 if (excpath) {
407 *excpath = imgp->ip_strings + strlen(EXECUTABLE_KEY);
408 }
409 }
410
411 return(error);
412 }
413
414 /*
415 * exec_reset_save_path
416 *
417 * If we detect a shell script, we need to reset the string area
418 * state so that the interpreter can be saved onto the stack.
419
420 * Parameters; struct image_params * image parameter block
421 *
422 * Returns: int 0 Success
423 *
424 * Implicit returns:
425 * (imgp->ip_strings) saved path
426 * (imgp->ip_strspace) space remaining in ip_strings
427 * (imgp->ip_strendp) start of remaining copy area
428 * (imgp->ip_argspace) space remaining of NCARGS
429 *
430 */
431 static int
432 exec_reset_save_path(struct image_params *imgp)
433 {
434 imgp->ip_strendp = imgp->ip_strings;
435 imgp->ip_argspace = NCARGS;
436 imgp->ip_strspace = ( NCARGS + PAGE_SIZE );
437
438 return (0);
439 }
440
441 /*
442 * exec_shell_imgact
443 *
444 * Image activator for interpreter scripts. If the image begins with
445 * the characters "#!", then it is an interpreter script. Verify the
446 * length of the script line indicating the interpreter is not in
447 * excess of the maximum allowed size. If this is the case, then
448 * break out the arguments, if any, which are separated by white
449 * space, and copy them into the argument save area as if they were
450 * provided on the command line before all other arguments. The line
451 * ends when we encounter a comment character ('#') or newline.
452 *
453 * Parameters; struct image_params * image parameter block
454 *
455 * Returns: -1 not an interpreter (keep looking)
456 * -3 Success: interpreter: relookup
457 * >0 Failure: interpreter: error number
458 *
459 * A return value other than -1 indicates subsequent image activators should
460 * not be given the opportunity to attempt to activate the image.
461 */
462 static int
463 exec_shell_imgact(struct image_params *imgp)
464 {
465 char *vdata = imgp->ip_vdata;
466 char *ihp;
467 char *line_startp, *line_endp;
468 char *interp;
469
470 /*
471 * Make sure it's a shell script. If we've already redirected
472 * from an interpreted file once, don't do it again.
473 */
474 if (vdata[0] != '#' ||
475 vdata[1] != '!' ||
476 (imgp->ip_flags & IMGPF_INTERPRET) != 0) {
477 return (-1);
478 }
479
480 if (imgp->ip_origcputype != 0) {
481 /* Fat header previously matched, don't allow shell script inside */
482 return (-1);
483 }
484
485 imgp->ip_flags |= IMGPF_INTERPRET;
486 imgp->ip_interp_sugid_fd = -1;
487 imgp->ip_interp_buffer[0] = '\0';
488
489 /* Check to see if SUGID scripts are permitted. If they aren't then
490 * clear the SUGID bits.
491 * imgp->ip_vattr is known to be valid.
492 */
493 if (sugid_scripts == 0) {
494 imgp->ip_origvattr->va_mode &= ~(VSUID | VSGID);
495 }
496
497 /* Try to find the first non-whitespace character */
498 for( ihp = &vdata[2]; ihp < &vdata[IMG_SHSIZE]; ihp++ ) {
499 if (IS_EOL(*ihp)) {
500 /* Did not find interpreter, "#!\n" */
501 return (ENOEXEC);
502 } else if (IS_WHITESPACE(*ihp)) {
503 /* Whitespace, like "#! /bin/sh\n", keep going. */
504 } else {
505 /* Found start of interpreter */
506 break;
507 }
508 }
509
510 if (ihp == &vdata[IMG_SHSIZE]) {
511 /* All whitespace, like "#! " */
512 return (ENOEXEC);
513 }
514
515 line_startp = ihp;
516
517 /* Try to find the end of the interpreter+args string */
518 for ( ; ihp < &vdata[IMG_SHSIZE]; ihp++ ) {
519 if (IS_EOL(*ihp)) {
520 /* Got it */
521 break;
522 } else {
523 /* Still part of interpreter or args */
524 }
525 }
526
527 if (ihp == &vdata[IMG_SHSIZE]) {
528 /* A long line, like "#! blah blah blah" without end */
529 return (ENOEXEC);
530 }
531
532 /* Backtrack until we find the last non-whitespace */
533 while (IS_EOL(*ihp) || IS_WHITESPACE(*ihp)) {
534 ihp--;
535 }
536
537 /* The character after the last non-whitespace is our logical end of line */
538 line_endp = ihp + 1;
539
540 /*
541 * Now we have pointers to the usable part of:
542 *
543 * "#! /usr/bin/int first second third \n"
544 * ^ line_startp ^ line_endp
545 */
546
547 /* copy the interpreter name */
548 interp = imgp->ip_interp_buffer;
549 for ( ihp = line_startp; (ihp < line_endp) && !IS_WHITESPACE(*ihp); ihp++)
550 *interp++ = *ihp;
551 *interp = '\0';
552
553 exec_reset_save_path(imgp);
554 exec_save_path(imgp, CAST_USER_ADDR_T(imgp->ip_interp_buffer),
555 UIO_SYSSPACE, NULL);
556
557 /* Copy the entire interpreter + args for later processing into argv[] */
558 interp = imgp->ip_interp_buffer;
559 for ( ihp = line_startp; (ihp < line_endp); ihp++)
560 *interp++ = *ihp;
561 *interp = '\0';
562
563 #if !SECURE_KERNEL
564 /*
565 * If we have an SUID or SGID script, create a file descriptor
566 * from the vnode and pass /dev/fd/%d instead of the actual
567 * path name so that the script does not get opened twice
568 */
569 if (imgp->ip_origvattr->va_mode & (VSUID | VSGID)) {
570 proc_t p;
571 struct fileproc *fp;
572 int fd;
573 int error;
574
575 p = vfs_context_proc(imgp->ip_vfs_context);
576 error = falloc(p, &fp, &fd, imgp->ip_vfs_context);
577 if (error)
578 return(error);
579
580 fp->f_fglob->fg_flag = FREAD;
581 fp->f_fglob->fg_ops = &vnops;
582 fp->f_fglob->fg_data = (caddr_t)imgp->ip_vp;
583
584 proc_fdlock(p);
585 procfdtbl_releasefd(p, fd, NULL);
586 fp_drop(p, fd, fp, 1);
587 proc_fdunlock(p);
588 vnode_ref(imgp->ip_vp);
589
590 imgp->ip_interp_sugid_fd = fd;
591 }
592 #endif
593
594 return (-3);
595 }
596
597
598
599 /*
600 * exec_fat_imgact
601 *
602 * Image activator for fat 1.0 binaries. If the binary is fat, then we
603 * need to select an image from it internally, and make that the image
604 * we are going to attempt to execute. At present, this consists of
605 * reloading the first page for the image with a first page from the
606 * offset location indicated by the fat header.
607 *
608 * Parameters; struct image_params * image parameter block
609 *
610 * Returns: -1 not a fat binary (keep looking)
611 * -2 Success: encapsulated binary: reread
612 * >0 Failure: error number
613 *
614 * Important: This image activator is byte order neutral.
615 *
616 * Note: A return value other than -1 indicates subsequent image
617 * activators should not be given the opportunity to attempt
618 * to activate the image.
619 *
620 * If we find an encapsulated binary, we make no assertions
621 * about its validity; instead, we leave that up to a rescan
622 * for an activator to claim it, and, if it is claimed by one,
623 * that activator is responsible for determining validity.
624 */
625 static int
626 exec_fat_imgact(struct image_params *imgp)
627 {
628 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
629 kauth_cred_t cred = kauth_cred_proc_ref(p);
630 struct fat_header *fat_header = (struct fat_header *)imgp->ip_vdata;
631 struct _posix_spawnattr *psa = NULL;
632 struct fat_arch fat_arch;
633 int resid, error;
634 load_return_t lret;
635
636 if (imgp->ip_origcputype != 0) {
637 /* Fat header previously matched, don't allow another fat file inside */
638 error = -1; /* not claimed */
639 goto bad;
640 }
641
642 /* Make sure it's a fat binary */
643 if (OSSwapBigToHostInt32(fat_header->magic) != FAT_MAGIC) {
644 error = -1; /* not claimed */
645 goto bad;
646 }
647
648 /* imgp->ip_vdata has PAGE_SIZE, zerofilled if the file is smaller */
649 lret = fatfile_validate_fatarches((vm_offset_t)fat_header, PAGE_SIZE);
650 if (lret != LOAD_SUCCESS) {
651 error = load_return_to_errno(lret);
652 goto bad;
653 }
654
655 /* If posix_spawn binprefs exist, respect those prefs. */
656 psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
657 if (psa != NULL && psa->psa_binprefs[0] != 0) {
658 uint32_t pr = 0;
659
660 /* Check each preference listed against all arches in header */
661 for (pr = 0; pr < NBINPREFS; pr++) {
662 cpu_type_t pref = psa->psa_binprefs[pr];
663 if (pref == 0) {
664 /* No suitable arch in the pref list */
665 error = EBADARCH;
666 goto bad;
667 }
668
669 if (pref == CPU_TYPE_ANY) {
670 /* Fall through to regular grading */
671 goto regular_grading;
672 }
673
674 lret = fatfile_getbestarch_for_cputype(pref,
675 (vm_offset_t)fat_header,
676 PAGE_SIZE,
677 &fat_arch);
678 if (lret == LOAD_SUCCESS) {
679 goto use_arch;
680 }
681 }
682
683 /* Requested binary preference was not honored */
684 error = EBADEXEC;
685 goto bad;
686 }
687
688 regular_grading:
689 /* Look up our preferred architecture in the fat file. */
690 lret = fatfile_getbestarch((vm_offset_t)fat_header,
691 PAGE_SIZE,
692 &fat_arch);
693 if (lret != LOAD_SUCCESS) {
694 error = load_return_to_errno(lret);
695 goto bad;
696 }
697
698 use_arch:
699 /* Read the Mach-O header out of fat_arch */
700 error = vn_rdwr(UIO_READ, imgp->ip_vp, imgp->ip_vdata,
701 PAGE_SIZE, fat_arch.offset,
702 UIO_SYSSPACE, (IO_UNIT|IO_NODELOCKED),
703 cred, &resid, p);
704 if (error) {
705 goto bad;
706 }
707
708 if (resid) {
709 memset(imgp->ip_vdata + (PAGE_SIZE - resid), 0x0, resid);
710 }
711
712 /* Success. Indicate we have identified an encapsulated binary */
713 error = -2;
714 imgp->ip_arch_offset = (user_size_t)fat_arch.offset;
715 imgp->ip_arch_size = (user_size_t)fat_arch.size;
716 imgp->ip_origcputype = fat_arch.cputype;
717 imgp->ip_origcpusubtype = fat_arch.cpusubtype;
718
719 bad:
720 kauth_cred_unref(&cred);
721 return (error);
722 }
723
724 static int
725 activate_exec_state(task_t task, proc_t p, thread_t thread, load_result_t *result)
726 {
727 int ret;
728
729 task_set_dyld_info(task, MACH_VM_MIN_ADDRESS, 0);
730 if (result->is64bit) {
731 task_set_64bit(task, TRUE);
732 OSBitOrAtomic(P_LP64, &p->p_flag);
733 } else {
734 task_set_64bit(task, FALSE);
735 OSBitAndAtomic(~((uint32_t)P_LP64), &p->p_flag);
736 }
737
738 ret = thread_state_initialize(thread);
739 if (ret != KERN_SUCCESS) {
740 return ret;
741 }
742
743 if (result->threadstate) {
744 uint32_t *ts = result->threadstate;
745 uint32_t total_size = result->threadstate_sz;
746
747 while (total_size > 0) {
748 uint32_t flavor = *ts++;
749 uint32_t size = *ts++;
750
751 ret = thread_setstatus(thread, flavor, (thread_state_t)ts, size);
752 if (ret) {
753 return ret;
754 }
755 ts += size;
756 total_size -= (size + 2) * sizeof(uint32_t);
757 }
758 }
759
760 thread_setentrypoint(thread, result->entry_point);
761
762 return KERN_SUCCESS;
763 }
764
765
766 /*
767 * Set p->p_comm and p->p_name to the name passed to exec
768 */
769 static void
770 set_proc_name(struct image_params *imgp, proc_t p)
771 {
772 int p_name_len = sizeof(p->p_name) - 1;
773
774 if (imgp->ip_ndp->ni_cnd.cn_namelen > p_name_len) {
775 imgp->ip_ndp->ni_cnd.cn_namelen = p_name_len;
776 }
777
778 bcopy((caddr_t)imgp->ip_ndp->ni_cnd.cn_nameptr, (caddr_t)p->p_name,
779 (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen);
780 p->p_name[imgp->ip_ndp->ni_cnd.cn_namelen] = '\0';
781
782 if (imgp->ip_ndp->ni_cnd.cn_namelen > MAXCOMLEN) {
783 imgp->ip_ndp->ni_cnd.cn_namelen = MAXCOMLEN;
784 }
785
786 bcopy((caddr_t)imgp->ip_ndp->ni_cnd.cn_nameptr, (caddr_t)p->p_comm,
787 (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen);
788 p->p_comm[imgp->ip_ndp->ni_cnd.cn_namelen] = '\0';
789 }
790
791 static uint64_t get_va_fsid(struct vnode_attr *vap)
792 {
793 if (VATTR_IS_SUPPORTED(vap, va_fsid64)) {
794 return *(uint64_t *)&vap->va_fsid64;
795 } else {
796 return vap->va_fsid;
797 }
798 }
799
800 /*
801 * exec_mach_imgact
802 *
803 * Image activator for mach-o 1.0 binaries.
804 *
805 * Parameters; struct image_params * image parameter block
806 *
807 * Returns: -1 not a fat binary (keep looking)
808 * -2 Success: encapsulated binary: reread
809 * >0 Failure: error number
810 * EBADARCH Mach-o binary, but with an unrecognized
811 * architecture
812 * ENOMEM No memory for child process after -
813 * can only happen after vfork()
814 *
815 * Important: This image activator is NOT byte order neutral.
816 *
817 * Note: A return value other than -1 indicates subsequent image
818 * activators should not be given the opportunity to attempt
819 * to activate the image.
820 *
821 * TODO: More gracefully handle failures after vfork
822 */
823 static int
824 exec_mach_imgact(struct image_params *imgp)
825 {
826 struct mach_header *mach_header = (struct mach_header *)imgp->ip_vdata;
827 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
828 int error = 0;
829 task_t task;
830 task_t new_task = NULL; /* protected by vfexec */
831 thread_t thread;
832 struct uthread *uthread;
833 vm_map_t old_map = VM_MAP_NULL;
834 vm_map_t map = VM_MAP_NULL;
835 load_return_t lret;
836 load_result_t load_result;
837 struct _posix_spawnattr *psa = NULL;
838 int spawn = (imgp->ip_flags & IMGPF_SPAWN);
839 int vfexec = (imgp->ip_flags & IMGPF_VFORK_EXEC);
840 int exec = (imgp->ip_flags & IMGPF_EXEC);
841 os_reason_t exec_failure_reason = OS_REASON_NULL;
842
843 /*
844 * make sure it's a Mach-O 1.0 or Mach-O 2.0 binary; the difference
845 * is a reserved field on the end, so for the most part, we can
846 * treat them as if they were identical. Reverse-endian Mach-O
847 * binaries are recognized but not compatible.
848 */
849 if ((mach_header->magic == MH_CIGAM) ||
850 (mach_header->magic == MH_CIGAM_64)) {
851 error = EBADARCH;
852 goto bad;
853 }
854
855 if ((mach_header->magic != MH_MAGIC) &&
856 (mach_header->magic != MH_MAGIC_64)) {
857 error = -1;
858 goto bad;
859 }
860
861 if (mach_header->filetype != MH_EXECUTE) {
862 error = -1;
863 goto bad;
864 }
865
866 if (imgp->ip_origcputype != 0) {
867 /* Fat header previously had an idea about this thin file */
868 if (imgp->ip_origcputype != mach_header->cputype ||
869 imgp->ip_origcpusubtype != mach_header->cpusubtype) {
870 error = EBADARCH;
871 goto bad;
872 }
873 } else {
874 imgp->ip_origcputype = mach_header->cputype;
875 imgp->ip_origcpusubtype = mach_header->cpusubtype;
876 }
877
878 task = current_task();
879 thread = current_thread();
880 uthread = get_bsdthread_info(thread);
881
882 if ((mach_header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64)
883 imgp->ip_flags |= IMGPF_IS_64BIT;
884
885 /* If posix_spawn binprefs exist, respect those prefs. */
886 psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
887 if (psa != NULL && psa->psa_binprefs[0] != 0) {
888 int pr = 0;
889 for (pr = 0; pr < NBINPREFS; pr++) {
890 cpu_type_t pref = psa->psa_binprefs[pr];
891 if (pref == 0) {
892 /* No suitable arch in the pref list */
893 error = EBADARCH;
894 goto bad;
895 }
896
897 if (pref == CPU_TYPE_ANY) {
898 /* Jump to regular grading */
899 goto grade;
900 }
901
902 if (pref == imgp->ip_origcputype) {
903 /* We have a match! */
904 goto grade;
905 }
906 }
907 error = EBADARCH;
908 goto bad;
909 }
910 grade:
911 if (!grade_binary(imgp->ip_origcputype, imgp->ip_origcpusubtype & ~CPU_SUBTYPE_MASK)) {
912 error = EBADARCH;
913 goto bad;
914 }
915
916 /* Copy in arguments/environment from the old process */
917 error = exec_extract_strings(imgp);
918 if (error)
919 goto bad;
920
921 AUDIT_ARG(argv, imgp->ip_startargv, imgp->ip_argc,
922 imgp->ip_endargv - imgp->ip_startargv);
923 AUDIT_ARG(envv, imgp->ip_endargv, imgp->ip_envc,
924 imgp->ip_endenvv - imgp->ip_endargv);
925
926 /*
927 * We are being called to activate an image subsequent to a vfork()
928 * operation; in this case, we know that our task, thread, and
929 * uthread are actually those of our parent, and our proc, which we
930 * obtained indirectly from the image_params vfs_context_t, is the
931 * new child process.
932 */
933 if (vfexec) {
934 imgp->ip_new_thread = fork_create_child(task, NULL, p, FALSE, (imgp->ip_flags & IMGPF_IS_64BIT), FALSE);
935 /* task and thread ref returned, will be released in __mac_execve */
936 if (imgp->ip_new_thread == NULL) {
937 error = ENOMEM;
938 goto bad;
939 }
940 }
941
942
943 /* reset local idea of thread, uthread, task */
944 thread = imgp->ip_new_thread;
945 uthread = get_bsdthread_info(thread);
946 task = new_task = get_threadtask(thread);
947
948 /*
949 * Load the Mach-O file.
950 *
951 * NOTE: An error after this point indicates we have potentially
952 * destroyed or overwritten some process state while attempting an
953 * execve() following a vfork(), which is an unrecoverable condition.
954 * We send the new process an immediate SIGKILL to avoid it executing
955 * any instructions in the mutated address space. For true spawns,
956 * this is not the case, and "too late" is still not too late to
957 * return an error code to the parent process.
958 */
959
960 /*
961 * Actually load the image file we previously decided to load.
962 */
963 lret = load_machfile(imgp, mach_header, thread, &map, &load_result);
964 if (lret != LOAD_SUCCESS) {
965 error = load_return_to_errno(lret);
966
967 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
968 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO, 0, 0);
969 if (lret == LOAD_BADMACHO_UPX) {
970 /* set anything that might be useful in the crash report */
971 set_proc_name(imgp, p);
972
973 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_UPX);
974 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
975 exec_failure_reason->osr_flags |= OS_REASON_FLAG_CONSISTENT_FAILURE;
976 } else {
977 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO);
978 }
979
980 goto badtoolate;
981 }
982
983 proc_lock(p);
984 p->p_cputype = imgp->ip_origcputype;
985 p->p_cpusubtype = imgp->ip_origcpusubtype;
986 proc_unlock(p);
987
988 vm_map_set_user_wire_limit(map, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
989
990 /*
991 * Set code-signing flags if this binary is signed, or if parent has
992 * requested them on exec.
993 */
994 if (load_result.csflags & CS_VALID) {
995 imgp->ip_csflags |= load_result.csflags &
996 (CS_VALID|CS_SIGNED|CS_DEV_CODE|
997 CS_HARD|CS_KILL|CS_RESTRICT|CS_ENFORCEMENT|CS_REQUIRE_LV|
998 CS_ENTITLEMENTS_VALIDATED|CS_DYLD_PLATFORM|
999 CS_ENTITLEMENT_FLAGS|
1000 CS_EXEC_SET_HARD|CS_EXEC_SET_KILL|CS_EXEC_SET_ENFORCEMENT);
1001 } else {
1002 imgp->ip_csflags &= ~CS_VALID;
1003 }
1004
1005 if (p->p_csflags & CS_EXEC_SET_HARD)
1006 imgp->ip_csflags |= CS_HARD;
1007 if (p->p_csflags & CS_EXEC_SET_KILL)
1008 imgp->ip_csflags |= CS_KILL;
1009 if (p->p_csflags & CS_EXEC_SET_ENFORCEMENT)
1010 imgp->ip_csflags |= CS_ENFORCEMENT;
1011 if (p->p_csflags & CS_EXEC_INHERIT_SIP) {
1012 if (p->p_csflags & CS_INSTALLER)
1013 imgp->ip_csflags |= CS_INSTALLER;
1014 if (p->p_csflags & CS_DATAVAULT_CONTROLLER)
1015 imgp->ip_csflags |= CS_DATAVAULT_CONTROLLER;
1016 if (p->p_csflags & CS_NVRAM_UNRESTRICTED)
1017 imgp->ip_csflags |= CS_NVRAM_UNRESTRICTED;
1018 }
1019
1020 /*
1021 * Set up the system reserved areas in the new address space.
1022 */
1023 vm_map_exec(map, task, load_result.is64bit, (void *)p->p_fd->fd_rdir, cpu_type());
1024
1025 /*
1026 * Close file descriptors which specify close-on-exec.
1027 */
1028 fdexec(p, psa != NULL ? psa->psa_flags : 0, exec);
1029
1030 /*
1031 * deal with set[ug]id.
1032 */
1033 error = exec_handle_sugid(imgp);
1034 if (error) {
1035 vm_map_deallocate(map);
1036
1037 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1038 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_SUGID_FAILURE, 0, 0);
1039 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_SUGID_FAILURE);
1040 goto badtoolate;
1041 }
1042
1043 /*
1044 * Commit to new map.
1045 *
1046 * Swap the new map for the old for target task, which consumes
1047 * our new map reference but each leaves us responsible for the
1048 * old_map reference. That lets us get off the pmap associated
1049 * with it, and then we can release it.
1050 *
1051 * The map needs to be set on the target task which is different
1052 * than current task, thus swap_task_map is used instead of
1053 * vm_map_switch.
1054 */
1055 old_map = swap_task_map(task, thread, map);
1056 vm_map_deallocate(old_map);
1057 old_map = NULL;
1058
1059 lret = activate_exec_state(task, p, thread, &load_result);
1060 if (lret != KERN_SUCCESS) {
1061
1062 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1063 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_ACTV_THREADSTATE, 0, 0);
1064 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_ACTV_THREADSTATE);
1065 goto badtoolate;
1066 }
1067
1068 /*
1069 * deal with voucher on exec-calling thread.
1070 */
1071 if (imgp->ip_new_thread == NULL)
1072 thread_set_mach_voucher(current_thread(), IPC_VOUCHER_NULL);
1073
1074 /* Make sure we won't interrupt ourself signalling a partial process */
1075 if (!vfexec && !spawn && (p->p_lflag & P_LTRACED))
1076 psignal(p, SIGTRAP);
1077
1078 if (load_result.unixproc &&
1079 create_unix_stack(get_task_map(task),
1080 &load_result,
1081 p) != KERN_SUCCESS) {
1082 error = load_return_to_errno(LOAD_NOSPACE);
1083
1084 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1085 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_STACK_ALLOC, 0, 0);
1086 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_STACK_ALLOC);
1087 goto badtoolate;
1088 }
1089
1090 error = exec_add_apple_strings(imgp, &load_result);
1091 if (error) {
1092
1093 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1094 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_APPLE_STRING_INIT, 0, 0);
1095 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_APPLE_STRING_INIT);
1096 goto badtoolate;
1097 }
1098
1099 /* Switch to target task's map to copy out strings */
1100 old_map = vm_map_switch(get_task_map(task));
1101
1102 if (load_result.unixproc) {
1103 user_addr_t ap;
1104
1105 /*
1106 * Copy the strings area out into the new process address
1107 * space.
1108 */
1109 ap = p->user_stack;
1110 error = exec_copyout_strings(imgp, &ap);
1111 if (error) {
1112 vm_map_switch(old_map);
1113
1114 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1115 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_STRINGS, 0, 0);
1116 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_STRINGS);
1117 goto badtoolate;
1118 }
1119 /* Set the stack */
1120 thread_setuserstack(thread, ap);
1121 }
1122
1123 if (load_result.dynlinker) {
1124 uint64_t ap;
1125 int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT) ? 8 : 4;
1126
1127 /* Adjust the stack */
1128 ap = thread_adjuserstack(thread, -new_ptr_size);
1129 error = copyoutptr(load_result.mach_header, ap, new_ptr_size);
1130
1131 if (error) {
1132 vm_map_switch(old_map);
1133
1134 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1135 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_DYNLINKER, 0, 0);
1136 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_DYNLINKER);
1137 goto badtoolate;
1138 }
1139 task_set_dyld_info(task, load_result.all_image_info_addr,
1140 load_result.all_image_info_size);
1141 }
1142
1143 /* Avoid immediate VM faults back into kernel */
1144 exec_prefault_data(p, imgp, &load_result);
1145
1146 vm_map_switch(old_map);
1147
1148 /* Stop profiling */
1149 stopprofclock(p);
1150
1151 /*
1152 * Reset signal state.
1153 */
1154 execsigs(p, thread);
1155
1156 /*
1157 * need to cancel async IO requests that can be cancelled and wait for those
1158 * already active. MAY BLOCK!
1159 */
1160 _aio_exec( p );
1161
1162 #if SYSV_SHM
1163 /* FIXME: Till vmspace inherit is fixed: */
1164 if (!vfexec && p->vm_shm)
1165 shmexec(p);
1166 #endif
1167 #if SYSV_SEM
1168 /* Clean up the semaphores */
1169 semexit(p);
1170 #endif
1171
1172 /*
1173 * Remember file name for accounting.
1174 */
1175 p->p_acflag &= ~AFORK;
1176
1177 set_proc_name(imgp, p);
1178
1179 #if CONFIG_SECLUDED_MEMORY
1180 if (secluded_for_apps &&
1181 load_result.platform_binary) {
1182 if (strncmp(p->p_name,
1183 "Camera",
1184 sizeof (p->p_name)) == 0) {
1185 task_set_could_use_secluded_mem(task, TRUE);
1186 } else {
1187 task_set_could_use_secluded_mem(task, FALSE);
1188 }
1189 if (strncmp(p->p_name,
1190 "mediaserverd",
1191 sizeof (p->p_name)) == 0) {
1192 task_set_could_also_use_secluded_mem(task, TRUE);
1193 }
1194 }
1195 #endif /* CONFIG_SECLUDED_MEMORY */
1196
1197 pal_dbg_set_task_name(task);
1198
1199 /*
1200 * The load result will have already been munged by AMFI to include the
1201 * platform binary flag if boot-args dictated it (AMFI will mark anything
1202 * that doesn't go through the upcall path as a platform binary if its
1203 * enforcement is disabled).
1204 */
1205 if (load_result.platform_binary) {
1206 if (cs_debug) {
1207 printf("setting platform binary on task: pid = %d\n", p->p_pid);
1208 }
1209
1210 /*
1211 * We must use 'task' here because the proc's task has not yet been
1212 * switched to the new one.
1213 */
1214 task_set_platform_binary(task, TRUE);
1215 } else {
1216 if (cs_debug) {
1217 printf("clearing platform binary on task: pid = %d\n", p->p_pid);
1218 }
1219
1220 task_set_platform_binary(task, FALSE);
1221 }
1222
1223 #if DEVELOPMENT || DEBUG
1224 /*
1225 * Update the pid an proc name for importance base if any
1226 */
1227 task_importance_update_owner_info(task);
1228 #endif
1229
1230 memcpy(&p->p_uuid[0], &load_result.uuid[0], sizeof(p->p_uuid));
1231
1232 #if CONFIG_DTRACE
1233 dtrace_proc_exec(p);
1234 #endif
1235
1236 if (kdebug_enable) {
1237 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
1238
1239 /*
1240 * Collect the pathname for tracing
1241 */
1242 kdbg_trace_string(p, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
1243
1244 uintptr_t fsid = 0, fileid = 0;
1245 if (imgp->ip_vattr) {
1246 uint64_t fsid64 = get_va_fsid(imgp->ip_vattr);
1247 fsid = fsid64;
1248 fileid = imgp->ip_vattr->va_fileid;
1249 // check for (unexpected) overflow and trace zero in that case
1250 if (fsid != fsid64 || fileid != imgp->ip_vattr->va_fileid) {
1251 fsid = fileid = 0;
1252 }
1253 }
1254 KERNEL_DEBUG_CONSTANT1(TRACE_DATA_EXEC | DBG_FUNC_NONE,
1255 p->p_pid , fsid, fileid, 0, (uintptr_t)thread_tid(thread));
1256 KERNEL_DEBUG_CONSTANT1(TRACE_STRING_EXEC | DBG_FUNC_NONE,
1257 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, (uintptr_t)thread_tid(thread));
1258 }
1259
1260 /*
1261 * If posix_spawned with the START_SUSPENDED flag, stop the
1262 * process before it runs.
1263 */
1264 if (imgp->ip_px_sa != NULL) {
1265 psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
1266 if (psa->psa_flags & POSIX_SPAWN_START_SUSPENDED) {
1267 proc_lock(p);
1268 p->p_stat = SSTOP;
1269 proc_unlock(p);
1270 (void) task_suspend_internal(task);
1271 }
1272 }
1273
1274 /*
1275 * mark as execed, wakeup the process that vforked (if any) and tell
1276 * it that it now has its own resources back
1277 */
1278 OSBitOrAtomic(P_EXEC, &p->p_flag);
1279 proc_resetregister(p);
1280 if (p->p_pptr && (p->p_lflag & P_LPPWAIT)) {
1281 proc_lock(p);
1282 p->p_lflag &= ~P_LPPWAIT;
1283 proc_unlock(p);
1284 wakeup((caddr_t)p->p_pptr);
1285 }
1286
1287 /*
1288 * Pay for our earlier safety; deliver the delayed signals from
1289 * the incomplete vfexec process now that it's complete.
1290 */
1291 if (vfexec && (p->p_lflag & P_LTRACED)) {
1292 psignal_vfork(p, new_task, thread, SIGTRAP);
1293 }
1294
1295 goto done;
1296
1297 badtoolate:
1298 /* Don't allow child process to execute any instructions */
1299 if (!spawn) {
1300 if (vfexec) {
1301 assert(exec_failure_reason != OS_REASON_NULL);
1302 psignal_vfork_with_reason(p, new_task, thread, SIGKILL, exec_failure_reason);
1303 exec_failure_reason = OS_REASON_NULL;
1304 } else {
1305 assert(exec_failure_reason != OS_REASON_NULL);
1306 psignal_with_reason(p, SIGKILL, exec_failure_reason);
1307 exec_failure_reason = OS_REASON_NULL;
1308
1309 if (exec) {
1310 /* Terminate the exec copy task */
1311 task_terminate_internal(task);
1312 }
1313 }
1314
1315 /* We can't stop this system call at this point, so just pretend we succeeded */
1316 error = 0;
1317 } else {
1318 os_reason_free(exec_failure_reason);
1319 exec_failure_reason = OS_REASON_NULL;
1320 }
1321
1322 done:
1323 if (load_result.threadstate) {
1324 kfree(load_result.threadstate, load_result.threadstate_sz);
1325 load_result.threadstate = NULL;
1326 }
1327
1328 bad:
1329 /* If we hit this, we likely would have leaked an exit reason */
1330 assert(exec_failure_reason == OS_REASON_NULL);
1331 return(error);
1332 }
1333
1334
1335
1336
1337 /*
1338 * Our image activator table; this is the table of the image types we are
1339 * capable of loading. We list them in order of preference to ensure the
1340 * fastest image load speed.
1341 *
1342 * XXX hardcoded, for now; should use linker sets
1343 */
1344 struct execsw {
1345 int (*ex_imgact)(struct image_params *);
1346 const char *ex_name;
1347 } execsw[] = {
1348 { exec_mach_imgact, "Mach-o Binary" },
1349 { exec_fat_imgact, "Fat Binary" },
1350 { exec_shell_imgact, "Interpreter Script" },
1351 { NULL, NULL}
1352 };
1353
1354
1355 /*
1356 * exec_activate_image
1357 *
1358 * Description: Iterate through the available image activators, and activate
1359 * the image associated with the imgp structure. We start with
1360 * the activator for Mach-o binaries followed by that for Fat binaries
1361 * for Interpreter scripts.
1362 *
1363 * Parameters: struct image_params * Image parameter block
1364 *
1365 * Returns: 0 Success
1366 * EBADEXEC The executable is corrupt/unknown
1367 * execargs_alloc:EINVAL Invalid argument
1368 * execargs_alloc:EACCES Permission denied
1369 * execargs_alloc:EINTR Interrupted function
1370 * execargs_alloc:ENOMEM Not enough space
1371 * exec_save_path:EFAULT Bad address
1372 * exec_save_path:ENAMETOOLONG Filename too long
1373 * exec_check_permissions:EACCES Permission denied
1374 * exec_check_permissions:ENOEXEC Executable file format error
1375 * exec_check_permissions:ETXTBSY Text file busy [misuse of error code]
1376 * exec_check_permissions:???
1377 * namei:???
1378 * vn_rdwr:??? [anything vn_rdwr can return]
1379 * <ex_imgact>:??? [anything an imgact can return]
1380 * EDEADLK Process is being terminated
1381 */
1382 static int
1383 exec_activate_image(struct image_params *imgp)
1384 {
1385 struct nameidata *ndp = NULL;
1386 const char *excpath;
1387 int error;
1388 int resid;
1389 int once = 1; /* save SGUID-ness for interpreted files */
1390 int i;
1391 int itercount = 0;
1392 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
1393
1394 error = execargs_alloc(imgp);
1395 if (error)
1396 goto bad_notrans;
1397
1398 error = exec_save_path(imgp, imgp->ip_user_fname, imgp->ip_seg, &excpath);
1399 if (error) {
1400 goto bad_notrans;
1401 }
1402
1403 /* Use excpath, which contains the copyin-ed exec path */
1404 DTRACE_PROC1(exec, uintptr_t, excpath);
1405
1406 MALLOC(ndp, struct nameidata *, sizeof(*ndp), M_TEMP, M_WAITOK | M_ZERO);
1407 if (ndp == NULL) {
1408 error = ENOMEM;
1409 goto bad_notrans;
1410 }
1411
1412 NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
1413 UIO_SYSSPACE, CAST_USER_ADDR_T(excpath), imgp->ip_vfs_context);
1414
1415 again:
1416 error = namei(ndp);
1417 if (error)
1418 goto bad_notrans;
1419 imgp->ip_ndp = ndp; /* successful namei(); call nameidone() later */
1420 imgp->ip_vp = ndp->ni_vp; /* if set, need to vnode_put() at some point */
1421
1422 /*
1423 * Before we start the transition from binary A to binary B, make
1424 * sure another thread hasn't started exiting the process. We grab
1425 * the proc lock to check p_lflag initially, and the transition
1426 * mechanism ensures that the value doesn't change after we release
1427 * the lock.
1428 */
1429 proc_lock(p);
1430 if (p->p_lflag & P_LEXIT) {
1431 error = EDEADLK;
1432 proc_unlock(p);
1433 goto bad_notrans;
1434 }
1435 error = proc_transstart(p, 1, 0);
1436 proc_unlock(p);
1437 if (error)
1438 goto bad_notrans;
1439
1440 error = exec_check_permissions(imgp);
1441 if (error)
1442 goto bad;
1443
1444 /* Copy; avoid invocation of an interpreter overwriting the original */
1445 if (once) {
1446 once = 0;
1447 *imgp->ip_origvattr = *imgp->ip_vattr;
1448 }
1449
1450 error = vn_rdwr(UIO_READ, imgp->ip_vp, imgp->ip_vdata, PAGE_SIZE, 0,
1451 UIO_SYSSPACE, IO_NODELOCKED,
1452 vfs_context_ucred(imgp->ip_vfs_context),
1453 &resid, vfs_context_proc(imgp->ip_vfs_context));
1454 if (error)
1455 goto bad;
1456
1457 if (resid) {
1458 memset(imgp->ip_vdata + (PAGE_SIZE - resid), 0x0, resid);
1459 }
1460
1461 encapsulated_binary:
1462 /* Limit the number of iterations we will attempt on each binary */
1463 if (++itercount > EAI_ITERLIMIT) {
1464 error = EBADEXEC;
1465 goto bad;
1466 }
1467 error = -1;
1468 for(i = 0; error == -1 && execsw[i].ex_imgact != NULL; i++) {
1469
1470 error = (*execsw[i].ex_imgact)(imgp);
1471
1472 switch (error) {
1473 /* case -1: not claimed: continue */
1474 case -2: /* Encapsulated binary, imgp->ip_XXX set for next iteration */
1475 goto encapsulated_binary;
1476
1477 case -3: /* Interpreter */
1478 #if CONFIG_MACF
1479 /*
1480 * Copy the script label for later use. Note that
1481 * the label can be different when the script is
1482 * actually read by the interpreter.
1483 */
1484 if (imgp->ip_scriptlabelp)
1485 mac_vnode_label_free(imgp->ip_scriptlabelp);
1486 imgp->ip_scriptlabelp = mac_vnode_label_alloc();
1487 if (imgp->ip_scriptlabelp == NULL) {
1488 error = ENOMEM;
1489 break;
1490 }
1491 mac_vnode_label_copy(imgp->ip_vp->v_label,
1492 imgp->ip_scriptlabelp);
1493
1494 /*
1495 * Take a ref of the script vnode for later use.
1496 */
1497 if (imgp->ip_scriptvp)
1498 vnode_put(imgp->ip_scriptvp);
1499 if (vnode_getwithref(imgp->ip_vp) == 0)
1500 imgp->ip_scriptvp = imgp->ip_vp;
1501 #endif
1502
1503 nameidone(ndp);
1504
1505 vnode_put(imgp->ip_vp);
1506 imgp->ip_vp = NULL; /* already put */
1507 imgp->ip_ndp = NULL; /* already nameidone */
1508
1509 /* Use excpath, which exec_shell_imgact reset to the interpreter */
1510 NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF,
1511 UIO_SYSSPACE, CAST_USER_ADDR_T(excpath), imgp->ip_vfs_context);
1512
1513 proc_transend(p, 0);
1514 goto again;
1515
1516 default:
1517 break;
1518 }
1519 }
1520
1521 /*
1522 * Call out to allow 3rd party notification of exec.
1523 * Ignore result of kauth_authorize_fileop call.
1524 */
1525 if (error == 0 && kauth_authorize_fileop_has_listeners()) {
1526 kauth_authorize_fileop(vfs_context_ucred(imgp->ip_vfs_context),
1527 KAUTH_FILEOP_EXEC,
1528 (uintptr_t)ndp->ni_vp, 0);
1529 }
1530 bad:
1531 proc_transend(p, 0);
1532
1533 bad_notrans:
1534 if (imgp->ip_strings)
1535 execargs_free(imgp);
1536 if (imgp->ip_ndp)
1537 nameidone(imgp->ip_ndp);
1538 if (ndp)
1539 FREE(ndp, M_TEMP);
1540
1541 return (error);
1542 }
1543
1544
1545 /*
1546 * exec_handle_spawnattr_policy
1547 *
1548 * Description: Decode and apply the posix_spawn apptype, qos clamp, and watchport ports to the task.
1549 *
1550 * Parameters: proc_t p process to apply attributes to
1551 * int psa_apptype posix spawn attribute apptype
1552 *
1553 * Returns: 0 Success
1554 */
1555 static errno_t
1556 exec_handle_spawnattr_policy(proc_t p, int psa_apptype, uint64_t psa_qos_clamp, uint64_t psa_darwin_role,
1557 ipc_port_t * portwatch_ports, int portwatch_count)
1558 {
1559 int apptype = TASK_APPTYPE_NONE;
1560 int qos_clamp = THREAD_QOS_UNSPECIFIED;
1561 int role = TASK_UNSPECIFIED;
1562
1563 if ((psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK) != 0) {
1564 int proctype = psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK;
1565
1566 switch(proctype) {
1567 case POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE:
1568 apptype = TASK_APPTYPE_DAEMON_INTERACTIVE;
1569 break;
1570 case POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD:
1571 apptype = TASK_APPTYPE_DAEMON_STANDARD;
1572 break;
1573 case POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE:
1574 apptype = TASK_APPTYPE_DAEMON_ADAPTIVE;
1575 break;
1576 case POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND:
1577 apptype = TASK_APPTYPE_DAEMON_BACKGROUND;
1578 break;
1579 case POSIX_SPAWN_PROC_TYPE_APP_DEFAULT:
1580 apptype = TASK_APPTYPE_APP_DEFAULT;
1581 break;
1582 #if !CONFIG_EMBEDDED
1583 case POSIX_SPAWN_PROC_TYPE_APP_TAL:
1584 apptype = TASK_APPTYPE_APP_TAL;
1585 break;
1586 #endif /* !CONFIG_EMBEDDED */
1587 default:
1588 apptype = TASK_APPTYPE_NONE;
1589 /* TODO: Should an invalid value here fail the spawn? */
1590 break;
1591 }
1592 }
1593
1594 if (psa_qos_clamp != POSIX_SPAWN_PROC_CLAMP_NONE) {
1595 switch (psa_qos_clamp) {
1596 case POSIX_SPAWN_PROC_CLAMP_UTILITY:
1597 qos_clamp = THREAD_QOS_UTILITY;
1598 break;
1599 case POSIX_SPAWN_PROC_CLAMP_BACKGROUND:
1600 qos_clamp = THREAD_QOS_BACKGROUND;
1601 break;
1602 case POSIX_SPAWN_PROC_CLAMP_MAINTENANCE:
1603 qos_clamp = THREAD_QOS_MAINTENANCE;
1604 break;
1605 default:
1606 qos_clamp = THREAD_QOS_UNSPECIFIED;
1607 /* TODO: Should an invalid value here fail the spawn? */
1608 break;
1609 }
1610 }
1611
1612 if (psa_darwin_role != PRIO_DARWIN_ROLE_DEFAULT) {
1613 proc_darwin_role_to_task_role(psa_darwin_role, &role);
1614 }
1615
1616 if (apptype != TASK_APPTYPE_NONE ||
1617 qos_clamp != THREAD_QOS_UNSPECIFIED ||
1618 role != TASK_UNSPECIFIED) {
1619 proc_set_task_spawnpolicy(p->task, apptype, qos_clamp, role,
1620 portwatch_ports, portwatch_count);
1621 }
1622
1623 return (0);
1624 }
1625
1626
1627 /*
1628 * exec_handle_port_actions
1629 *
1630 * Description: Go through the _posix_port_actions_t contents,
1631 * calling task_set_special_port, task_set_exception_ports
1632 * and/or audit_session_spawnjoin for the current task.
1633 *
1634 * Parameters: struct image_params * Image parameter block
1635 *
1636 * Returns: 0 Success
1637 * EINVAL Failure
1638 * ENOTSUP Illegal posix_spawn attr flag was set
1639 */
1640 static errno_t
1641 exec_handle_port_actions(struct image_params *imgp, boolean_t * portwatch_present,
1642 ipc_port_t * portwatch_ports)
1643 {
1644 _posix_spawn_port_actions_t pacts = imgp->ip_px_spa;
1645 #if CONFIG_AUDIT
1646 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
1647 #endif
1648 _ps_port_action_t *act = NULL;
1649 task_t task = get_threadtask(imgp->ip_new_thread);
1650 ipc_port_t port = NULL;
1651 errno_t ret = 0;
1652 int i;
1653 kern_return_t kr;
1654
1655 *portwatch_present = FALSE;
1656
1657 for (i = 0; i < pacts->pspa_count; i++) {
1658 act = &pacts->pspa_actions[i];
1659
1660 if (MACH_PORT_VALID(act->new_port)) {
1661 kr = ipc_object_copyin(get_task_ipcspace(current_task()),
1662 act->new_port, MACH_MSG_TYPE_COPY_SEND,
1663 (ipc_object_t *) &port);
1664
1665 if (kr != KERN_SUCCESS) {
1666 ret = EINVAL;
1667 goto done;
1668 }
1669 } else {
1670 /* it's NULL or DEAD */
1671 port = CAST_MACH_NAME_TO_PORT(act->new_port);
1672 }
1673
1674 switch (act->port_type) {
1675 case PSPA_SPECIAL:
1676 kr = task_set_special_port(task, act->which, port);
1677
1678 if (kr != KERN_SUCCESS)
1679 ret = EINVAL;
1680 break;
1681
1682 case PSPA_EXCEPTION:
1683 kr = task_set_exception_ports(task, act->mask, port,
1684 act->behavior, act->flavor);
1685 if (kr != KERN_SUCCESS)
1686 ret = EINVAL;
1687 break;
1688 #if CONFIG_AUDIT
1689 case PSPA_AU_SESSION:
1690 ret = audit_session_spawnjoin(p, task, port);
1691 if (ret) {
1692 /* audit_session_spawnjoin() has already dropped the reference in case of error. */
1693 goto done;
1694 }
1695
1696 break;
1697 #endif
1698 case PSPA_IMP_WATCHPORTS:
1699 if (portwatch_ports != NULL && IPC_PORT_VALID(port)) {
1700 *portwatch_present = TRUE;
1701 /* hold on to this till end of spawn */
1702 portwatch_ports[i] = port;
1703 } else {
1704 ipc_port_release_send(port);
1705 }
1706
1707 break;
1708 default:
1709 ret = EINVAL;
1710 break;
1711 }
1712
1713 if (ret) {
1714 /* action failed, so release port resources */
1715 ipc_port_release_send(port);
1716 break;
1717 }
1718 }
1719
1720 done:
1721 if (0 != ret)
1722 DTRACE_PROC1(spawn__port__failure, mach_port_name_t, act->new_port);
1723 return (ret);
1724 }
1725
1726 /*
1727 * exec_handle_file_actions
1728 *
1729 * Description: Go through the _posix_file_actions_t contents applying the
1730 * open, close, and dup2 operations to the open file table for
1731 * the current process.
1732 *
1733 * Parameters: struct image_params * Image parameter block
1734 *
1735 * Returns: 0 Success
1736 * ???
1737 *
1738 * Note: Actions are applied in the order specified, with the credential
1739 * of the parent process. This is done to permit the parent
1740 * process to utilize POSIX_SPAWN_RESETIDS to drop privilege in
1741 * the child following operations the child may in fact not be
1742 * normally permitted to perform.
1743 */
1744 static int
1745 exec_handle_file_actions(struct image_params *imgp, short psa_flags)
1746 {
1747 int error = 0;
1748 int action;
1749 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
1750 _posix_spawn_file_actions_t px_sfap = imgp->ip_px_sfa;
1751 int ival[2]; /* dummy retval for system calls) */
1752
1753 for (action = 0; action < px_sfap->psfa_act_count; action++) {
1754 _psfa_action_t *psfa = &px_sfap->psfa_act_acts[ action];
1755
1756 switch(psfa->psfaa_type) {
1757 case PSFA_OPEN: {
1758 /*
1759 * Open is different, in that it requires the use of
1760 * a path argument, which is normally copied in from
1761 * user space; because of this, we have to support an
1762 * open from kernel space that passes an address space
1763 * context of UIO_SYSSPACE, and casts the address
1764 * argument to a user_addr_t.
1765 */
1766 char *bufp = NULL;
1767 struct vnode_attr *vap;
1768 struct nameidata *ndp;
1769 int mode = psfa->psfaa_openargs.psfao_mode;
1770 struct dup2_args dup2a;
1771 struct close_nocancel_args ca;
1772 int origfd;
1773
1774 MALLOC(bufp, char *, sizeof(*vap) + sizeof(*ndp), M_TEMP, M_WAITOK | M_ZERO);
1775 if (bufp == NULL) {
1776 error = ENOMEM;
1777 break;
1778 }
1779
1780 vap = (struct vnode_attr *) bufp;
1781 ndp = (struct nameidata *) (bufp + sizeof(*vap));
1782
1783 VATTR_INIT(vap);
1784 /* Mask off all but regular access permissions */
1785 mode = ((mode &~ p->p_fd->fd_cmask) & ALLPERMS) & ~S_ISTXT;
1786 VATTR_SET(vap, va_mode, mode & ACCESSPERMS);
1787
1788 NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_SYSSPACE,
1789 CAST_USER_ADDR_T(psfa->psfaa_openargs.psfao_path),
1790 imgp->ip_vfs_context);
1791
1792 error = open1(imgp->ip_vfs_context,
1793 ndp,
1794 psfa->psfaa_openargs.psfao_oflag,
1795 vap,
1796 fileproc_alloc_init, NULL,
1797 ival);
1798
1799 FREE(bufp, M_TEMP);
1800
1801 /*
1802 * If there's an error, or we get the right fd by
1803 * accident, then drop out here. This is easier than
1804 * reworking all the open code to preallocate fd
1805 * slots, and internally taking one as an argument.
1806 */
1807 if (error || ival[0] == psfa->psfaa_filedes)
1808 break;
1809
1810 origfd = ival[0];
1811 /*
1812 * If we didn't fall out from an error, we ended up
1813 * with the wrong fd; so now we've got to try to dup2
1814 * it to the right one.
1815 */
1816 dup2a.from = origfd;
1817 dup2a.to = psfa->psfaa_filedes;
1818
1819 /*
1820 * The dup2() system call implementation sets
1821 * ival to newfd in the success case, but we
1822 * can ignore that, since if we didn't get the
1823 * fd we wanted, the error will stop us.
1824 */
1825 error = dup2(p, &dup2a, ival);
1826 if (error)
1827 break;
1828
1829 /*
1830 * Finally, close the original fd.
1831 */
1832 ca.fd = origfd;
1833
1834 error = close_nocancel(p, &ca, ival);
1835 }
1836 break;
1837
1838 case PSFA_DUP2: {
1839 struct dup2_args dup2a;
1840
1841 dup2a.from = psfa->psfaa_filedes;
1842 dup2a.to = psfa->psfaa_openargs.psfao_oflag;
1843
1844 /*
1845 * The dup2() system call implementation sets
1846 * ival to newfd in the success case, but we
1847 * can ignore that, since if we didn't get the
1848 * fd we wanted, the error will stop us.
1849 */
1850 error = dup2(p, &dup2a, ival);
1851 }
1852 break;
1853
1854 case PSFA_CLOSE: {
1855 struct close_nocancel_args ca;
1856
1857 ca.fd = psfa->psfaa_filedes;
1858
1859 error = close_nocancel(p, &ca, ival);
1860 }
1861 break;
1862
1863 case PSFA_INHERIT: {
1864 struct fcntl_nocancel_args fcntla;
1865
1866 /*
1867 * Check to see if the descriptor exists, and
1868 * ensure it's -not- marked as close-on-exec.
1869 *
1870 * Attempting to "inherit" a guarded fd will
1871 * result in a error.
1872 */
1873 fcntla.fd = psfa->psfaa_filedes;
1874 fcntla.cmd = F_GETFD;
1875 if ((error = fcntl_nocancel(p, &fcntla, ival)) != 0)
1876 break;
1877
1878 if ((ival[0] & FD_CLOEXEC) == FD_CLOEXEC) {
1879 fcntla.fd = psfa->psfaa_filedes;
1880 fcntla.cmd = F_SETFD;
1881 fcntla.arg = ival[0] & ~FD_CLOEXEC;
1882 error = fcntl_nocancel(p, &fcntla, ival);
1883 }
1884
1885 }
1886 break;
1887
1888 default:
1889 error = EINVAL;
1890 break;
1891 }
1892
1893 /* All file actions failures are considered fatal, per POSIX */
1894
1895 if (error) {
1896 if (PSFA_OPEN == psfa->psfaa_type) {
1897 DTRACE_PROC1(spawn__open__failure, uintptr_t,
1898 psfa->psfaa_openargs.psfao_path);
1899 } else {
1900 DTRACE_PROC1(spawn__fd__failure, int, psfa->psfaa_filedes);
1901 }
1902 break;
1903 }
1904 }
1905
1906 if (error != 0 || (psa_flags & POSIX_SPAWN_CLOEXEC_DEFAULT) == 0)
1907 return (error);
1908
1909 /*
1910 * If POSIX_SPAWN_CLOEXEC_DEFAULT is set, behave (during
1911 * this spawn only) as if "close on exec" is the default
1912 * disposition of all pre-existing file descriptors. In this case,
1913 * the list of file descriptors mentioned in the file actions
1914 * are the only ones that can be inherited, so mark them now.
1915 *
1916 * The actual closing part comes later, in fdexec().
1917 */
1918 proc_fdlock(p);
1919 for (action = 0; action < px_sfap->psfa_act_count; action++) {
1920 _psfa_action_t *psfa = &px_sfap->psfa_act_acts[action];
1921 int fd = psfa->psfaa_filedes;
1922
1923 switch (psfa->psfaa_type) {
1924 case PSFA_DUP2:
1925 fd = psfa->psfaa_openargs.psfao_oflag;
1926 /*FALLTHROUGH*/
1927 case PSFA_OPEN:
1928 case PSFA_INHERIT:
1929 *fdflags(p, fd) |= UF_INHERIT;
1930 break;
1931
1932 case PSFA_CLOSE:
1933 break;
1934 }
1935 }
1936 proc_fdunlock(p);
1937
1938 return (0);
1939 }
1940
1941 #if CONFIG_MACF
1942 /*
1943 * exec_spawnattr_getmacpolicyinfo
1944 */
1945 void *
1946 exec_spawnattr_getmacpolicyinfo(const void *macextensions, const char *policyname, size_t *lenp)
1947 {
1948 const struct _posix_spawn_mac_policy_extensions *psmx = macextensions;
1949 int i;
1950
1951 if (psmx == NULL)
1952 return NULL;
1953
1954 for (i = 0; i < psmx->psmx_count; i++) {
1955 const _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[i];
1956 if (strncmp(extension->policyname, policyname, sizeof(extension->policyname)) == 0) {
1957 if (lenp != NULL)
1958 *lenp = extension->datalen;
1959 return extension->datap;
1960 }
1961 }
1962
1963 if (lenp != NULL)
1964 *lenp = 0;
1965 return NULL;
1966 }
1967
1968 static int
1969 spawn_copyin_macpolicyinfo(const struct user__posix_spawn_args_desc *px_args, _posix_spawn_mac_policy_extensions_t *psmxp)
1970 {
1971 _posix_spawn_mac_policy_extensions_t psmx = NULL;
1972 int error = 0;
1973 int copycnt = 0;
1974 int i = 0;
1975
1976 *psmxp = NULL;
1977
1978 if (px_args->mac_extensions_size < PS_MAC_EXTENSIONS_SIZE(1) ||
1979 px_args->mac_extensions_size > PAGE_SIZE) {
1980 error = EINVAL;
1981 goto bad;
1982 }
1983
1984 MALLOC(psmx, _posix_spawn_mac_policy_extensions_t, px_args->mac_extensions_size, M_TEMP, M_WAITOK);
1985 if ((error = copyin(px_args->mac_extensions, psmx, px_args->mac_extensions_size)) != 0)
1986 goto bad;
1987
1988 size_t extsize = PS_MAC_EXTENSIONS_SIZE(psmx->psmx_count);
1989 if (extsize == 0 || extsize > px_args->mac_extensions_size) {
1990 error = EINVAL;
1991 goto bad;
1992 }
1993
1994 for (i = 0; i < psmx->psmx_count; i++) {
1995 _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[i];
1996 if (extension->datalen == 0 || extension->datalen > PAGE_SIZE) {
1997 error = EINVAL;
1998 goto bad;
1999 }
2000 }
2001
2002 for (copycnt = 0; copycnt < psmx->psmx_count; copycnt++) {
2003 _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[copycnt];
2004 void *data = NULL;
2005
2006 MALLOC(data, void *, extension->datalen, M_TEMP, M_WAITOK);
2007 if ((error = copyin(extension->data, data, extension->datalen)) != 0) {
2008 FREE(data, M_TEMP);
2009 goto bad;
2010 }
2011 extension->datap = data;
2012 }
2013
2014 *psmxp = psmx;
2015 return 0;
2016
2017 bad:
2018 if (psmx != NULL) {
2019 for (i = 0; i < copycnt; i++)
2020 FREE(psmx->psmx_extensions[i].datap, M_TEMP);
2021 FREE(psmx, M_TEMP);
2022 }
2023 return error;
2024 }
2025
2026 static void
2027 spawn_free_macpolicyinfo(_posix_spawn_mac_policy_extensions_t psmx)
2028 {
2029 int i;
2030
2031 if (psmx == NULL)
2032 return;
2033 for (i = 0; i < psmx->psmx_count; i++)
2034 FREE(psmx->psmx_extensions[i].datap, M_TEMP);
2035 FREE(psmx, M_TEMP);
2036 }
2037 #endif /* CONFIG_MACF */
2038
2039 #if CONFIG_COALITIONS
2040 static inline void spawn_coalitions_release_all(coalition_t coal[COALITION_NUM_TYPES])
2041 {
2042 for (int c = 0; c < COALITION_NUM_TYPES; c++) {
2043 if (coal[c]) {
2044 coalition_remove_active(coal[c]);
2045 coalition_release(coal[c]);
2046 }
2047 }
2048 }
2049 #endif
2050
2051 #if CONFIG_PERSONAS
2052 static int spawn_validate_persona(struct _posix_spawn_persona_info *px_persona)
2053 {
2054 int error = 0;
2055 struct persona *persona = NULL;
2056 int verify = px_persona->pspi_flags & POSIX_SPAWN_PERSONA_FLAGS_VERIFY;
2057
2058 /*
2059 * TODO: rdar://problem/19981151
2060 * Add entitlement check!
2061 */
2062 if (!kauth_cred_issuser(kauth_cred_get()))
2063 return EPERM;
2064
2065 persona = persona_lookup(px_persona->pspi_id);
2066 if (!persona) {
2067 error = ESRCH;
2068 goto out;
2069 }
2070
2071 if (verify) {
2072 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_UID) {
2073 if (px_persona->pspi_uid != persona_get_uid(persona)) {
2074 error = EINVAL;
2075 goto out;
2076 }
2077 }
2078 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GID) {
2079 if (px_persona->pspi_gid != persona_get_gid(persona)) {
2080 error = EINVAL;
2081 goto out;
2082 }
2083 }
2084 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GROUPS) {
2085 int ngroups = 0;
2086 gid_t groups[NGROUPS_MAX];
2087
2088 if (persona_get_groups(persona, &ngroups, groups,
2089 px_persona->pspi_ngroups) != 0) {
2090 error = EINVAL;
2091 goto out;
2092 }
2093 if (ngroups != (int)px_persona->pspi_ngroups) {
2094 error = EINVAL;
2095 goto out;
2096 }
2097 while (ngroups--) {
2098 if (px_persona->pspi_groups[ngroups] != groups[ngroups]) {
2099 error = EINVAL;
2100 goto out;
2101 }
2102 }
2103 if (px_persona->pspi_gmuid != persona_get_gmuid(persona)) {
2104 error = EINVAL;
2105 goto out;
2106 }
2107 }
2108 }
2109
2110 out:
2111 if (persona)
2112 persona_put(persona);
2113
2114 return error;
2115 }
2116
2117 static int spawn_persona_adopt(proc_t p, struct _posix_spawn_persona_info *px_persona)
2118 {
2119 int ret;
2120 kauth_cred_t cred;
2121 struct persona *persona = NULL;
2122 int override = !!(px_persona->pspi_flags & POSIX_SPAWN_PERSONA_FLAGS_OVERRIDE);
2123
2124 if (!override)
2125 return persona_proc_adopt_id(p, px_persona->pspi_id, NULL);
2126
2127 /*
2128 * we want to spawn into the given persona, but we want to override
2129 * the kauth with a different UID/GID combo
2130 */
2131 persona = persona_lookup(px_persona->pspi_id);
2132 if (!persona)
2133 return ESRCH;
2134
2135 cred = persona_get_cred(persona);
2136 if (!cred) {
2137 ret = EINVAL;
2138 goto out;
2139 }
2140
2141 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_UID) {
2142 cred = kauth_cred_setresuid(cred,
2143 px_persona->pspi_uid,
2144 px_persona->pspi_uid,
2145 px_persona->pspi_uid,
2146 KAUTH_UID_NONE);
2147 }
2148
2149 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GID) {
2150 cred = kauth_cred_setresgid(cred,
2151 px_persona->pspi_gid,
2152 px_persona->pspi_gid,
2153 px_persona->pspi_gid);
2154 }
2155
2156 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GROUPS) {
2157 cred = kauth_cred_setgroups(cred,
2158 px_persona->pspi_groups,
2159 px_persona->pspi_ngroups,
2160 px_persona->pspi_gmuid);
2161 }
2162
2163 ret = persona_proc_adopt(p, persona, cred);
2164
2165 out:
2166 persona_put(persona);
2167 return ret;
2168 }
2169 #endif
2170
2171 /*
2172 * posix_spawn
2173 *
2174 * Parameters: uap->pid Pointer to pid return area
2175 * uap->fname File name to exec
2176 * uap->argp Argument list
2177 * uap->envp Environment list
2178 *
2179 * Returns: 0 Success
2180 * EINVAL Invalid argument
2181 * ENOTSUP Not supported
2182 * ENOEXEC Executable file format error
2183 * exec_activate_image:EINVAL Invalid argument
2184 * exec_activate_image:EACCES Permission denied
2185 * exec_activate_image:EINTR Interrupted function
2186 * exec_activate_image:ENOMEM Not enough space
2187 * exec_activate_image:EFAULT Bad address
2188 * exec_activate_image:ENAMETOOLONG Filename too long
2189 * exec_activate_image:ENOEXEC Executable file format error
2190 * exec_activate_image:ETXTBSY Text file busy [misuse of error code]
2191 * exec_activate_image:EBADEXEC The executable is corrupt/unknown
2192 * exec_activate_image:???
2193 * mac_execve_enter:???
2194 *
2195 * TODO: Expect to need __mac_posix_spawn() at some point...
2196 * Handle posix_spawnattr_t
2197 * Handle posix_spawn_file_actions_t
2198 */
2199 int
2200 posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval)
2201 {
2202 proc_t p = ap; /* quiet bogus GCC vfork() warning */
2203 user_addr_t pid = uap->pid;
2204 int ival[2]; /* dummy retval for setpgid() */
2205 char *bufp = NULL;
2206 struct image_params *imgp;
2207 struct vnode_attr *vap;
2208 struct vnode_attr *origvap;
2209 struct uthread *uthread = 0; /* compiler complains if not set to 0*/
2210 int error, sig;
2211 int is_64 = IS_64BIT_PROCESS(p);
2212 struct vfs_context context;
2213 struct user__posix_spawn_args_desc px_args;
2214 struct _posix_spawnattr px_sa;
2215 _posix_spawn_file_actions_t px_sfap = NULL;
2216 _posix_spawn_port_actions_t px_spap = NULL;
2217 struct __kern_sigaction vec;
2218 boolean_t spawn_no_exec = FALSE;
2219 boolean_t proc_transit_set = TRUE;
2220 boolean_t exec_done = FALSE;
2221 int portwatch_count = 0;
2222 ipc_port_t * portwatch_ports = NULL;
2223 vm_size_t px_sa_offset = offsetof(struct _posix_spawnattr, psa_ports);
2224 task_t new_task = NULL;
2225 boolean_t should_release_proc_ref = FALSE;
2226 void *inherit = NULL;
2227 #if CONFIG_PERSONAS
2228 struct _posix_spawn_persona_info *px_persona = NULL;
2229 #endif
2230
2231 /*
2232 * Allocate a big chunk for locals instead of using stack since these
2233 * structures are pretty big.
2234 */
2235 MALLOC(bufp, char *, (sizeof(*imgp) + sizeof(*vap) + sizeof(*origvap)), M_TEMP, M_WAITOK | M_ZERO);
2236 imgp = (struct image_params *) bufp;
2237 if (bufp == NULL) {
2238 error = ENOMEM;
2239 goto bad;
2240 }
2241 vap = (struct vnode_attr *) (bufp + sizeof(*imgp));
2242 origvap = (struct vnode_attr *) (bufp + sizeof(*imgp) + sizeof(*vap));
2243
2244 /* Initialize the common data in the image_params structure */
2245 imgp->ip_user_fname = uap->path;
2246 imgp->ip_user_argv = uap->argv;
2247 imgp->ip_user_envv = uap->envp;
2248 imgp->ip_vattr = vap;
2249 imgp->ip_origvattr = origvap;
2250 imgp->ip_vfs_context = &context;
2251 imgp->ip_flags = (is_64 ? IMGPF_WAS_64BIT : IMGPF_NONE);
2252 imgp->ip_seg = (is_64 ? UIO_USERSPACE64 : UIO_USERSPACE32);
2253 imgp->ip_mac_return = 0;
2254 imgp->ip_px_persona = NULL;
2255 imgp->ip_cs_error = OS_REASON_NULL;
2256
2257 if (uap->adesc != USER_ADDR_NULL) {
2258 if(is_64) {
2259 error = copyin(uap->adesc, &px_args, sizeof(px_args));
2260 } else {
2261 struct user32__posix_spawn_args_desc px_args32;
2262
2263 error = copyin(uap->adesc, &px_args32, sizeof(px_args32));
2264
2265 /*
2266 * Convert arguments descriptor from external 32 bit
2267 * representation to internal 64 bit representation
2268 */
2269 px_args.attr_size = px_args32.attr_size;
2270 px_args.attrp = CAST_USER_ADDR_T(px_args32.attrp);
2271 px_args.file_actions_size = px_args32.file_actions_size;
2272 px_args.file_actions = CAST_USER_ADDR_T(px_args32.file_actions);
2273 px_args.port_actions_size = px_args32.port_actions_size;
2274 px_args.port_actions = CAST_USER_ADDR_T(px_args32.port_actions);
2275 px_args.mac_extensions_size = px_args32.mac_extensions_size;
2276 px_args.mac_extensions = CAST_USER_ADDR_T(px_args32.mac_extensions);
2277 px_args.coal_info_size = px_args32.coal_info_size;
2278 px_args.coal_info = CAST_USER_ADDR_T(px_args32.coal_info);
2279 px_args.persona_info_size = px_args32.persona_info_size;
2280 px_args.persona_info = CAST_USER_ADDR_T(px_args32.persona_info);
2281 }
2282 if (error)
2283 goto bad;
2284
2285 if (px_args.attr_size != 0) {
2286 /*
2287 * We are not copying the port_actions pointer,
2288 * because we already have it from px_args.
2289 * This is a bit fragile: <rdar://problem/16427422>
2290 */
2291
2292 if ((error = copyin(px_args.attrp, &px_sa, px_sa_offset) != 0))
2293 goto bad;
2294
2295 bzero( (void *)( (unsigned long) &px_sa + px_sa_offset), sizeof(px_sa) - px_sa_offset );
2296
2297 imgp->ip_px_sa = &px_sa;
2298 }
2299 if (px_args.file_actions_size != 0) {
2300 /* Limit file_actions to allowed number of open files */
2301 int maxfa = (p->p_limit ? p->p_rlimit[RLIMIT_NOFILE].rlim_cur : NOFILE);
2302 size_t maxfa_size = PSF_ACTIONS_SIZE(maxfa);
2303 if (px_args.file_actions_size < PSF_ACTIONS_SIZE(1) ||
2304 maxfa_size == 0 || px_args.file_actions_size > maxfa_size) {
2305 error = EINVAL;
2306 goto bad;
2307 }
2308 MALLOC(px_sfap, _posix_spawn_file_actions_t, px_args.file_actions_size, M_TEMP, M_WAITOK);
2309 if (px_sfap == NULL) {
2310 error = ENOMEM;
2311 goto bad;
2312 }
2313 imgp->ip_px_sfa = px_sfap;
2314
2315 if ((error = copyin(px_args.file_actions, px_sfap,
2316 px_args.file_actions_size)) != 0)
2317 goto bad;
2318
2319 /* Verify that the action count matches the struct size */
2320 size_t psfsize = PSF_ACTIONS_SIZE(px_sfap->psfa_act_count);
2321 if (psfsize == 0 || psfsize != px_args.file_actions_size) {
2322 error = EINVAL;
2323 goto bad;
2324 }
2325 }
2326 if (px_args.port_actions_size != 0) {
2327 /* Limit port_actions to one page of data */
2328 if (px_args.port_actions_size < PS_PORT_ACTIONS_SIZE(1) ||
2329 px_args.port_actions_size > PAGE_SIZE) {
2330 error = EINVAL;
2331 goto bad;
2332 }
2333
2334 MALLOC(px_spap, _posix_spawn_port_actions_t,
2335 px_args.port_actions_size, M_TEMP, M_WAITOK);
2336 if (px_spap == NULL) {
2337 error = ENOMEM;
2338 goto bad;
2339 }
2340 imgp->ip_px_spa = px_spap;
2341
2342 if ((error = copyin(px_args.port_actions, px_spap,
2343 px_args.port_actions_size)) != 0)
2344 goto bad;
2345
2346 /* Verify that the action count matches the struct size */
2347 size_t pasize = PS_PORT_ACTIONS_SIZE(px_spap->pspa_count);
2348 if (pasize == 0 || pasize != px_args.port_actions_size) {
2349 error = EINVAL;
2350 goto bad;
2351 }
2352 }
2353 #if CONFIG_PERSONAS
2354 /* copy in the persona info */
2355 if (px_args.persona_info_size != 0 && px_args.persona_info != 0) {
2356 /* for now, we need the exact same struct in user space */
2357 if (px_args.persona_info_size != sizeof(*px_persona)) {
2358 error = ERANGE;
2359 goto bad;
2360 }
2361
2362 MALLOC(px_persona, struct _posix_spawn_persona_info *, px_args.persona_info_size, M_TEMP, M_WAITOK|M_ZERO);
2363 if (px_persona == NULL) {
2364 error = ENOMEM;
2365 goto bad;
2366 }
2367 imgp->ip_px_persona = px_persona;
2368
2369 if ((error = copyin(px_args.persona_info, px_persona,
2370 px_args.persona_info_size)) != 0)
2371 goto bad;
2372 if ((error = spawn_validate_persona(px_persona)) != 0)
2373 goto bad;
2374 }
2375 #endif
2376 #if CONFIG_MACF
2377 if (px_args.mac_extensions_size != 0) {
2378 if ((error = spawn_copyin_macpolicyinfo(&px_args, (_posix_spawn_mac_policy_extensions_t *)&imgp->ip_px_smpx)) != 0)
2379 goto bad;
2380 }
2381 #endif /* CONFIG_MACF */
2382 }
2383
2384 /* set uthread to parent */
2385 uthread = get_bsdthread_info(current_thread());
2386
2387 /*
2388 * <rdar://6640530>; this does not result in a behaviour change
2389 * relative to Leopard, so there should not be any existing code
2390 * which depends on it.
2391 */
2392 if (uthread->uu_flag & UT_VFORK) {
2393 error = EINVAL;
2394 goto bad;
2395 }
2396
2397 /*
2398 * If we don't have the extension flag that turns "posix_spawn()"
2399 * into "execve() with options", then we will be creating a new
2400 * process which does not inherit memory from the parent process,
2401 * which is one of the most expensive things about using fork()
2402 * and execve().
2403 */
2404 if (imgp->ip_px_sa == NULL || !(px_sa.psa_flags & POSIX_SPAWN_SETEXEC)){
2405
2406 /* Set the new task's coalition, if it is requested. */
2407 coalition_t coal[COALITION_NUM_TYPES] = { COALITION_NULL };
2408 #if CONFIG_COALITIONS
2409 int i, ncoals;
2410 kern_return_t kr = KERN_SUCCESS;
2411 struct _posix_spawn_coalition_info coal_info;
2412 int coal_role[COALITION_NUM_TYPES];
2413
2414 if (imgp->ip_px_sa == NULL || !px_args.coal_info)
2415 goto do_fork1;
2416
2417 memset(&coal_info, 0, sizeof(coal_info));
2418
2419 if (px_args.coal_info_size > sizeof(coal_info))
2420 px_args.coal_info_size = sizeof(coal_info);
2421 error = copyin(px_args.coal_info,
2422 &coal_info, px_args.coal_info_size);
2423 if (error != 0)
2424 goto bad;
2425
2426 ncoals = 0;
2427 for (i = 0; i < COALITION_NUM_TYPES; i++) {
2428 uint64_t cid = coal_info.psci_info[i].psci_id;
2429 if (cid != 0) {
2430 /*
2431 * don't allow tasks which are not in a
2432 * privileged coalition to spawn processes
2433 * into coalitions other than their own
2434 */
2435 if (!task_is_in_privileged_coalition(p->task, i)) {
2436 coal_dbg("ERROR: %d not in privilegd "
2437 "coalition of type %d",
2438 p->p_pid, i);
2439 spawn_coalitions_release_all(coal);
2440 error = EPERM;
2441 goto bad;
2442 }
2443
2444 coal_dbg("searching for coalition id:%llu", cid);
2445 /*
2446 * take a reference and activation on the
2447 * coalition to guard against free-while-spawn
2448 * races
2449 */
2450 coal[i] = coalition_find_and_activate_by_id(cid);
2451 if (coal[i] == COALITION_NULL) {
2452 coal_dbg("could not find coalition id:%llu "
2453 "(perhaps it has been terminated or reaped)", cid);
2454 /*
2455 * release any other coalition's we
2456 * may have a reference to
2457 */
2458 spawn_coalitions_release_all(coal);
2459 error = ESRCH;
2460 goto bad;
2461 }
2462 if (coalition_type(coal[i]) != i) {
2463 coal_dbg("coalition with id:%lld is not of type:%d"
2464 " (it's type:%d)", cid, i, coalition_type(coal[i]));
2465 error = ESRCH;
2466 goto bad;
2467 }
2468 coal_role[i] = coal_info.psci_info[i].psci_role;
2469 ncoals++;
2470 }
2471 }
2472 if (ncoals < COALITION_NUM_TYPES) {
2473 /*
2474 * If the user is attempting to spawn into a subset of
2475 * the known coalition types, then make sure they have
2476 * _at_least_ specified a resource coalition. If not,
2477 * the following fork1() call will implicitly force an
2478 * inheritance from 'p' and won't actually spawn the
2479 * new task into the coalitions the user specified.
2480 * (also the call to coalitions_set_roles will panic)
2481 */
2482 if (coal[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
2483 spawn_coalitions_release_all(coal);
2484 error = EINVAL;
2485 goto bad;
2486 }
2487 }
2488 do_fork1:
2489 #endif /* CONFIG_COALITIONS */
2490
2491 /*
2492 * note that this will implicitly inherit the
2493 * caller's persona (if it exists)
2494 */
2495 error = fork1(p, &imgp->ip_new_thread, PROC_CREATE_SPAWN, coal);
2496 /* returns a thread and task reference */
2497
2498 if (error == 0) {
2499 new_task = get_threadtask(imgp->ip_new_thread);
2500 }
2501 #if CONFIG_COALITIONS
2502 /* set the roles of this task within each given coalition */
2503 if (error == 0) {
2504 kr = coalitions_set_roles(coal, new_task, coal_role);
2505 if (kr != KERN_SUCCESS)
2506 error = EINVAL;
2507 if (kdebug_debugid_enabled(MACHDBG_CODE(DBG_MACH_COALITION,
2508 MACH_COALITION_ADOPT))) {
2509 for (i = 0; i < COALITION_NUM_TYPES; i++) {
2510 if (coal[i] != COALITION_NULL) {
2511 /*
2512 * On 32-bit targets, uniqueid
2513 * will get truncated to 32 bits
2514 */
2515 KDBG_RELEASE(MACHDBG_CODE(
2516 DBG_MACH_COALITION,
2517 MACH_COALITION_ADOPT),
2518 coalition_id(coal[i]),
2519 get_task_uniqueid(new_task));
2520 }
2521 }
2522 }
2523 }
2524
2525 /* drop our references and activations - fork1() now holds them */
2526 spawn_coalitions_release_all(coal);
2527 #endif /* CONFIG_COALITIONS */
2528 if (error != 0) {
2529 goto bad;
2530 }
2531 imgp->ip_flags |= IMGPF_SPAWN; /* spawn w/o exec */
2532 spawn_no_exec = TRUE; /* used in later tests */
2533
2534 #if CONFIG_PERSONAS
2535 /*
2536 * If the parent isn't in a persona (launchd), and
2537 * hasn't specified a new persona for the process,
2538 * then we'll put the process into the system persona
2539 *
2540 * TODO: this will have to be re-worked because as of
2541 * now, without any launchd adoption, the resulting
2542 * xpcproxy process will not have sufficient
2543 * privileges to setuid/gid.
2544 */
2545 #if 0
2546 if (!proc_has_persona(p) && imgp->ip_px_persona == NULL) {
2547 MALLOC(px_persona, struct _posix_spawn_persona_info *,
2548 sizeof(*px_persona), M_TEMP, M_WAITOK|M_ZERO);
2549 if (px_persona == NULL) {
2550 error = ENOMEM;
2551 goto bad;
2552 }
2553 px_persona->pspi_id = persona_get_id(g_system_persona);
2554 imgp->ip_px_persona = px_persona;
2555 }
2556 #endif /* 0 */
2557 #endif /* CONFIG_PERSONAS */
2558 } else {
2559 /*
2560 * For execve case, create a new task and thread
2561 * which points to current_proc. The current_proc will point
2562 * to the new task after image activation and proc ref drain.
2563 *
2564 * proc (current_proc) <----- old_task (current_task)
2565 * ^ | ^
2566 * | | |
2567 * | ----------------------------------
2568 * |
2569 * --------- new_task (task marked as TF_EXEC_COPY)
2570 *
2571 * After image activation, the proc will point to the new task
2572 * and would look like following.
2573 *
2574 * proc (current_proc) <----- old_task (current_task, marked as TPF_DID_EXEC)
2575 * ^ |
2576 * | |
2577 * | ----------> new_task
2578 * | |
2579 * -----------------
2580 *
2581 * During exec any transition from new_task -> proc is fine, but don't allow
2582 * transition from proc->task, since it will modify old_task.
2583 */
2584 imgp->ip_new_thread = fork_create_child(current_task(),
2585 NULL, p, FALSE, p->p_flag & P_LP64, TRUE);
2586 /* task and thread ref returned by fork_create_child */
2587 if (imgp->ip_new_thread == NULL) {
2588 error = ENOMEM;
2589 goto bad;
2590 }
2591
2592 new_task = get_threadtask(imgp->ip_new_thread);
2593 imgp->ip_flags |= IMGPF_EXEC;
2594 }
2595
2596 if (spawn_no_exec) {
2597 p = (proc_t)get_bsdthreadtask_info(imgp->ip_new_thread);
2598
2599 /*
2600 * We had to wait until this point before firing the
2601 * proc:::create probe, otherwise p would not point to the
2602 * child process.
2603 */
2604 DTRACE_PROC1(create, proc_t, p);
2605 }
2606 assert(p != NULL);
2607
2608 context.vc_thread = imgp->ip_new_thread;
2609 context.vc_ucred = p->p_ucred; /* XXX must NOT be kauth_cred_get() */
2610
2611 /*
2612 * Post fdcopy(), pre exec_handle_sugid() - this is where we want
2613 * to handle the file_actions. Since vfork() also ends up setting
2614 * us into the parent process group, and saved off the signal flags,
2615 * this is also where we want to handle the spawn flags.
2616 */
2617
2618 /* Has spawn file actions? */
2619 if (imgp->ip_px_sfa != NULL) {
2620 /*
2621 * The POSIX_SPAWN_CLOEXEC_DEFAULT flag
2622 * is handled in exec_handle_file_actions().
2623 */
2624 if ((error = exec_handle_file_actions(imgp,
2625 imgp->ip_px_sa != NULL ? px_sa.psa_flags : 0)) != 0)
2626 goto bad;
2627 }
2628
2629 /* Has spawn port actions? */
2630 if (imgp->ip_px_spa != NULL) {
2631 boolean_t is_adaptive = FALSE;
2632 boolean_t portwatch_present = FALSE;
2633
2634 /* Will this process become adaptive? The apptype isn't ready yet, so we can't look there. */
2635 if (imgp->ip_px_sa != NULL && px_sa.psa_apptype == POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE)
2636 is_adaptive = TRUE;
2637
2638 /*
2639 * portwatch only:
2640 * Allocate a place to store the ports we want to bind to the new task
2641 * We can't bind them until after the apptype is set.
2642 */
2643 if (px_spap->pspa_count != 0 && is_adaptive) {
2644 portwatch_count = px_spap->pspa_count;
2645 MALLOC(portwatch_ports, ipc_port_t *, (sizeof(ipc_port_t) * portwatch_count), M_TEMP, M_WAITOK | M_ZERO);
2646 } else {
2647 portwatch_ports = NULL;
2648 }
2649
2650 if ((error = exec_handle_port_actions(imgp, &portwatch_present, portwatch_ports)) != 0)
2651 goto bad;
2652
2653 if (portwatch_present == FALSE && portwatch_ports != NULL) {
2654 FREE(portwatch_ports, M_TEMP);
2655 portwatch_ports = NULL;
2656 portwatch_count = 0;
2657 }
2658 }
2659
2660 /* Has spawn attr? */
2661 if (imgp->ip_px_sa != NULL) {
2662 /*
2663 * Set the process group ID of the child process; this has
2664 * to happen before the image activation.
2665 */
2666 if (px_sa.psa_flags & POSIX_SPAWN_SETPGROUP) {
2667 struct setpgid_args spga;
2668 spga.pid = p->p_pid;
2669 spga.pgid = px_sa.psa_pgroup;
2670 /*
2671 * Effectively, call setpgid() system call; works
2672 * because there are no pointer arguments.
2673 */
2674 if((error = setpgid(p, &spga, ival)) != 0)
2675 goto bad;
2676 }
2677
2678 /*
2679 * Reset UID/GID to parent's RUID/RGID; This works only
2680 * because the operation occurs *after* the vfork() and
2681 * before the call to exec_handle_sugid() by the image
2682 * activator called from exec_activate_image(). POSIX
2683 * requires that any setuid/setgid bits on the process
2684 * image will take precedence over the spawn attributes
2685 * (re)setting them.
2686 *
2687 * Modifications to p_ucred must be guarded using the
2688 * proc's ucred lock. This prevents others from accessing
2689 * a garbage credential.
2690 */
2691 while (px_sa.psa_flags & POSIX_SPAWN_RESETIDS) {
2692 kauth_cred_t my_cred = kauth_cred_proc_ref(p);
2693 kauth_cred_t my_new_cred = kauth_cred_setuidgid(my_cred, kauth_cred_getruid(my_cred), kauth_cred_getrgid(my_cred));
2694
2695 if (my_cred == my_new_cred) {
2696 kauth_cred_unref(&my_cred);
2697 break;
2698 }
2699
2700 /* update cred on proc */
2701 proc_ucred_lock(p);
2702
2703 if (p->p_ucred != my_cred) {
2704 proc_ucred_unlock(p);
2705 kauth_cred_unref(&my_new_cred);
2706 continue;
2707 }
2708
2709 /* donate cred reference on my_new_cred to p->p_ucred */
2710 p->p_ucred = my_new_cred;
2711 PROC_UPDATE_CREDS_ONPROC(p);
2712 proc_ucred_unlock(p);
2713
2714 /* drop additional reference that was taken on the previous cred */
2715 kauth_cred_unref(&my_cred);
2716 }
2717
2718 #if CONFIG_PERSONAS
2719 if (spawn_no_exec && imgp->ip_px_persona != NULL) {
2720 /*
2721 * If we were asked to spawn a process into a new persona,
2722 * do the credential switch now (which may override the UID/GID
2723 * inherit done just above). It's important to do this switch
2724 * before image activation both for reasons stated above, and
2725 * to ensure that the new persona has access to the image/file
2726 * being executed.
2727 */
2728 error = spawn_persona_adopt(p, imgp->ip_px_persona);
2729 if (error != 0)
2730 goto bad;
2731 }
2732 #endif /* CONFIG_PERSONAS */
2733 #if !SECURE_KERNEL
2734 /*
2735 * Disable ASLR for the spawned process.
2736 *
2737 * But only do so if we are not embedded + RELEASE.
2738 * While embedded allows for a boot-arg (-disable_aslr)
2739 * to deal with this (which itself is only honored on
2740 * DEVELOPMENT or DEBUG builds of xnu), it is often
2741 * useful or necessary to disable ASLR on a per-process
2742 * basis for unit testing and debugging.
2743 */
2744 if (px_sa.psa_flags & _POSIX_SPAWN_DISABLE_ASLR)
2745 OSBitOrAtomic(P_DISABLE_ASLR, &p->p_flag);
2746 #endif /* !SECURE_KERNEL */
2747
2748 /* Randomize high bits of ASLR slide */
2749 if (px_sa.psa_flags & _POSIX_SPAWN_HIGH_BITS_ASLR)
2750 imgp->ip_flags |= IMGPF_HIGH_BITS_ASLR;
2751
2752 /*
2753 * Forcibly disallow execution from data pages for the spawned process
2754 * even if it would otherwise be permitted by the architecture default.
2755 */
2756 if (px_sa.psa_flags & _POSIX_SPAWN_ALLOW_DATA_EXEC)
2757 imgp->ip_flags |= IMGPF_ALLOW_DATA_EXEC;
2758 }
2759
2760 /*
2761 * Disable ASLR during image activation. This occurs either if the
2762 * _POSIX_SPAWN_DISABLE_ASLR attribute was found above or if
2763 * P_DISABLE_ASLR was inherited from the parent process.
2764 */
2765 if (p->p_flag & P_DISABLE_ASLR)
2766 imgp->ip_flags |= IMGPF_DISABLE_ASLR;
2767
2768 /*
2769 * Clear transition flag so we won't hang if exec_activate_image() causes
2770 * an automount (and launchd does a proc sysctl to service it).
2771 *
2772 * <rdar://problem/6848672>, <rdar://problem/5959568>.
2773 */
2774 if (spawn_no_exec) {
2775 proc_transend(p, 0);
2776 proc_transit_set = 0;
2777 }
2778
2779 #if MAC_SPAWN /* XXX */
2780 if (uap->mac_p != USER_ADDR_NULL) {
2781 error = mac_execve_enter(uap->mac_p, imgp);
2782 if (error)
2783 goto bad;
2784 }
2785 #endif
2786
2787 /*
2788 * Activate the image
2789 */
2790 error = exec_activate_image(imgp);
2791
2792 if (error == 0 && !spawn_no_exec) {
2793 p = proc_exec_switch_task(p, current_task(), new_task, imgp->ip_new_thread);
2794 /* proc ref returned */
2795 should_release_proc_ref = TRUE;
2796 }
2797
2798 if (error == 0) {
2799 /* process completed the exec */
2800 exec_done = TRUE;
2801 } else if (error == -1) {
2802 /* Image not claimed by any activator? */
2803 error = ENOEXEC;
2804 }
2805
2806 /*
2807 * If we have a spawn attr, and it contains signal related flags,
2808 * the we need to process them in the "context" of the new child
2809 * process, so we have to process it following image activation,
2810 * prior to making the thread runnable in user space. This is
2811 * necessitated by some signal information being per-thread rather
2812 * than per-process, and we don't have the new allocation in hand
2813 * until after the image is activated.
2814 */
2815 if (!error && imgp->ip_px_sa != NULL) {
2816 thread_t child_thread = imgp->ip_new_thread;
2817 uthread_t child_uthread = get_bsdthread_info(child_thread);
2818
2819 /*
2820 * Mask a list of signals, instead of them being unmasked, if
2821 * they were unmasked in the parent; note that some signals
2822 * are not maskable.
2823 */
2824 if (px_sa.psa_flags & POSIX_SPAWN_SETSIGMASK)
2825 child_uthread->uu_sigmask = (px_sa.psa_sigmask & ~sigcantmask);
2826 /*
2827 * Default a list of signals instead of ignoring them, if
2828 * they were ignored in the parent. Note that we pass
2829 * spawn_no_exec to setsigvec() to indicate that we called
2830 * fork1() and therefore do not need to call proc_signalstart()
2831 * internally.
2832 */
2833 if (px_sa.psa_flags & POSIX_SPAWN_SETSIGDEF) {
2834 vec.sa_handler = SIG_DFL;
2835 vec.sa_tramp = 0;
2836 vec.sa_mask = 0;
2837 vec.sa_flags = 0;
2838 for (sig = 1; sig < NSIG; sig++)
2839 if (px_sa.psa_sigdefault & (1 << (sig-1))) {
2840 error = setsigvec(p, child_thread, sig, &vec, spawn_no_exec);
2841 }
2842 }
2843
2844 /*
2845 * Activate the CPU usage monitor, if requested. This is done via a task-wide, per-thread CPU
2846 * usage limit, which will generate a resource exceeded exception if any one thread exceeds the
2847 * limit.
2848 *
2849 * Userland gives us interval in seconds, and the kernel SPI expects nanoseconds.
2850 */
2851 if (px_sa.psa_cpumonitor_percent != 0) {
2852 /*
2853 * Always treat a CPU monitor activation coming from spawn as entitled. Requiring
2854 * an entitlement to configure the monitor a certain way seems silly, since
2855 * whomever is turning it on could just as easily choose not to do so.
2856 */
2857 error = proc_set_task_ruse_cpu(p->task,
2858 TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC,
2859 px_sa.psa_cpumonitor_percent,
2860 px_sa.psa_cpumonitor_interval * NSEC_PER_SEC,
2861 0, TRUE);
2862 }
2863 }
2864
2865 bad:
2866
2867 if (error == 0) {
2868 /* reset delay idle sleep status if set */
2869 #if !CONFIG_EMBEDDED
2870 if ((p->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP)
2871 OSBitAndAtomic(~((uint32_t)P_DELAYIDLESLEEP), &p->p_flag);
2872 #endif /* !CONFIG_EMBEDDED */
2873 /* upon successful spawn, re/set the proc control state */
2874 if (imgp->ip_px_sa != NULL) {
2875 switch (px_sa.psa_pcontrol) {
2876 case POSIX_SPAWN_PCONTROL_THROTTLE:
2877 p->p_pcaction = P_PCTHROTTLE;
2878 break;
2879 case POSIX_SPAWN_PCONTROL_SUSPEND:
2880 p->p_pcaction = P_PCSUSP;
2881 break;
2882 case POSIX_SPAWN_PCONTROL_KILL:
2883 p->p_pcaction = P_PCKILL;
2884 break;
2885 case POSIX_SPAWN_PCONTROL_NONE:
2886 default:
2887 p->p_pcaction = 0;
2888 break;
2889 };
2890 }
2891 exec_resettextvp(p, imgp);
2892
2893 #if CONFIG_MEMORYSTATUS
2894 /* Has jetsam attributes? */
2895 if (imgp->ip_px_sa != NULL && (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_SET)) {
2896 /*
2897 * With 2-level high-water-mark support, POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND is no
2898 * longer relevant, as background limits are described via the inactive limit slots.
2899 *
2900 * That said, however, if the POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND is passed in,
2901 * we attempt to mimic previous behavior by forcing the BG limit data into the
2902 * inactive/non-fatal mode and force the active slots to hold system_wide/fatal mode.
2903 */
2904 if (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND) {
2905 memorystatus_update(p, px_sa.psa_priority, 0,
2906 (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY),
2907 TRUE,
2908 -1, TRUE,
2909 px_sa.psa_memlimit_inactive, FALSE);
2910 } else {
2911 memorystatus_update(p, px_sa.psa_priority, 0,
2912 (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY),
2913 TRUE,
2914 px_sa.psa_memlimit_active,
2915 (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_MEMLIMIT_ACTIVE_FATAL),
2916 px_sa.psa_memlimit_inactive,
2917 (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_MEMLIMIT_INACTIVE_FATAL));
2918 }
2919
2920 }
2921 #endif /* CONFIG_MEMORYSTATUS */
2922 }
2923
2924 /*
2925 * If we successfully called fork1(), we always need to do this;
2926 * we identify this case by noting the IMGPF_SPAWN flag. This is
2927 * because we come back from that call with signals blocked in the
2928 * child, and we have to unblock them, but we want to wait until
2929 * after we've performed any spawn actions. This has to happen
2930 * before check_for_signature(), which uses psignal.
2931 */
2932 if (spawn_no_exec) {
2933 if (proc_transit_set)
2934 proc_transend(p, 0);
2935
2936 /*
2937 * Drop the signal lock on the child which was taken on our
2938 * behalf by forkproc()/cloneproc() to prevent signals being
2939 * received by the child in a partially constructed state.
2940 */
2941 proc_signalend(p, 0);
2942
2943 /* flag the 'fork' has occurred */
2944 proc_knote(p->p_pptr, NOTE_FORK | p->p_pid);
2945 }
2946
2947 /* flag exec has occurred, notify only if it has not failed due to FP Key error */
2948 if (!error && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0))
2949 proc_knote(p, NOTE_EXEC);
2950
2951
2952 if (error == 0) {
2953 /*
2954 * We need to initialize the bank context behind the protection of
2955 * the proc_trans lock to prevent a race with exit. We can't do this during
2956 * exec_activate_image because task_bank_init checks entitlements that
2957 * aren't loaded until subsequent calls (including exec_resettextvp).
2958 */
2959 error = proc_transstart(p, 0, 0);
2960
2961 if (error == 0) {
2962 task_bank_init(get_threadtask(imgp->ip_new_thread));
2963 proc_transend(p, 0);
2964 }
2965 }
2966
2967 /* Inherit task role from old task to new task for exec */
2968 if (error == 0 && !spawn_no_exec) {
2969 proc_inherit_task_role(get_threadtask(imgp->ip_new_thread), current_task());
2970 }
2971
2972 /*
2973 * Apply the spawnattr policy, apptype (which primes the task for importance donation),
2974 * and bind any portwatch ports to the new task.
2975 * This must be done after the exec so that the child's thread is ready,
2976 * and after the in transit state has been released, because priority is
2977 * dropped here so we need to be prepared for a potentially long preemption interval
2978 *
2979 * TODO: Consider splitting this up into separate phases
2980 */
2981 if (error == 0 && imgp->ip_px_sa != NULL) {
2982 struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
2983
2984 exec_handle_spawnattr_policy(p, psa->psa_apptype, psa->psa_qos_clamp, psa->psa_darwin_role,
2985 portwatch_ports, portwatch_count);
2986 }
2987
2988 /*
2989 * Need to transfer pending watch port boosts to the new task while still making
2990 * sure that the old task remains in the importance linkage. Create an importance
2991 * linkage from old task to new task, then switch the task importance base
2992 * of old task and new task. After the switch the port watch boost will be
2993 * boosting the new task and new task will be donating importance to old task.
2994 */
2995 if (error == 0 && task_did_exec(current_task())) {
2996 inherit = ipc_importance_exec_switch_task(current_task(), get_threadtask(imgp->ip_new_thread));
2997 }
2998
2999 if (error == 0) {
3000 /* Apply the main thread qos */
3001 thread_t main_thread = imgp->ip_new_thread;
3002 task_set_main_thread_qos(get_threadtask(imgp->ip_new_thread), main_thread);
3003
3004 #if CONFIG_MACF
3005 /*
3006 * Processes with the MAP_JIT entitlement are permitted to have
3007 * a jumbo-size map.
3008 */
3009 if (mac_proc_check_map_anon(p, 0, 0, 0, MAP_JIT, NULL) == 0) {
3010 vm_map_set_jumbo(get_task_map(p->task));
3011 }
3012 #endif /* CONFIG_MACF */
3013 }
3014
3015 /*
3016 * Release any ports we kept around for binding to the new task
3017 * We need to release the rights even if the posix_spawn has failed.
3018 */
3019 if (portwatch_ports != NULL) {
3020 for (int i = 0; i < portwatch_count; i++) {
3021 ipc_port_t port = NULL;
3022 if ((port = portwatch_ports[i]) != NULL) {
3023 ipc_port_release_send(port);
3024 }
3025 }
3026 FREE(portwatch_ports, M_TEMP);
3027 portwatch_ports = NULL;
3028 portwatch_count = 0;
3029 }
3030
3031 /*
3032 * We have to delay operations which might throw a signal until after
3033 * the signals have been unblocked; however, we want that to happen
3034 * after exec_resettextvp() so that the textvp is correct when they
3035 * fire.
3036 */
3037 if (error == 0) {
3038 error = check_for_signature(p, imgp);
3039
3040 /*
3041 * Pay for our earlier safety; deliver the delayed signals from
3042 * the incomplete spawn process now that it's complete.
3043 */
3044 if (imgp != NULL && spawn_no_exec && (p->p_lflag & P_LTRACED)) {
3045 psignal_vfork(p, p->task, imgp->ip_new_thread, SIGTRAP);
3046 }
3047
3048 if (error == 0 && !spawn_no_exec)
3049 KDBG(BSDDBG_CODE(DBG_BSD_PROC,BSD_PROC_EXEC),
3050 p->p_pid);
3051 }
3052
3053
3054 if (imgp != NULL) {
3055 if (imgp->ip_vp)
3056 vnode_put(imgp->ip_vp);
3057 if (imgp->ip_scriptvp)
3058 vnode_put(imgp->ip_scriptvp);
3059 if (imgp->ip_strings)
3060 execargs_free(imgp);
3061 if (imgp->ip_px_sfa != NULL)
3062 FREE(imgp->ip_px_sfa, M_TEMP);
3063 if (imgp->ip_px_spa != NULL)
3064 FREE(imgp->ip_px_spa, M_TEMP);
3065 #if CONFIG_PERSONAS
3066 if (imgp->ip_px_persona != NULL)
3067 FREE(imgp->ip_px_persona, M_TEMP);
3068 #endif
3069 #if CONFIG_MACF
3070 if (imgp->ip_px_smpx != NULL)
3071 spawn_free_macpolicyinfo(imgp->ip_px_smpx);
3072 if (imgp->ip_execlabelp)
3073 mac_cred_label_free(imgp->ip_execlabelp);
3074 if (imgp->ip_scriptlabelp)
3075 mac_vnode_label_free(imgp->ip_scriptlabelp);
3076 if (imgp->ip_cs_error != OS_REASON_NULL) {
3077 os_reason_free(imgp->ip_cs_error);
3078 imgp->ip_cs_error = OS_REASON_NULL;
3079 }
3080 #endif
3081 }
3082
3083 #if CONFIG_DTRACE
3084 if (spawn_no_exec) {
3085 /*
3086 * In the original DTrace reference implementation,
3087 * posix_spawn() was a libc routine that just
3088 * did vfork(2) then exec(2). Thus the proc::: probes
3089 * are very fork/exec oriented. The details of this
3090 * in-kernel implementation of posix_spawn() is different
3091 * (while producing the same process-observable effects)
3092 * particularly w.r.t. errors, and which thread/process
3093 * is constructing what on behalf of whom.
3094 */
3095 if (error) {
3096 DTRACE_PROC1(spawn__failure, int, error);
3097 } else {
3098 DTRACE_PROC(spawn__success);
3099 /*
3100 * Some DTrace scripts, e.g. newproc.d in
3101 * /usr/bin, rely on the the 'exec-success'
3102 * probe being fired in the child after the
3103 * new process image has been constructed
3104 * in order to determine the associated pid.
3105 *
3106 * So, even though the parent built the image
3107 * here, for compatibility, mark the new thread
3108 * so 'exec-success' fires on it as it leaves
3109 * the kernel.
3110 */
3111 dtrace_thread_didexec(imgp->ip_new_thread);
3112 }
3113 } else {
3114 if (error) {
3115 DTRACE_PROC1(exec__failure, int, error);
3116 } else {
3117 dtrace_thread_didexec(imgp->ip_new_thread);
3118 }
3119 }
3120
3121 if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL) {
3122 (*dtrace_proc_waitfor_hook)(p);
3123 }
3124 #endif
3125 /*
3126 * clear bsd_info from old task if it did exec.
3127 */
3128 if (task_did_exec(current_task())) {
3129 set_bsdtask_info(current_task(), NULL);
3130 }
3131
3132 /* clear bsd_info from new task and terminate it if exec failed */
3133 if (new_task != NULL && task_is_exec_copy(new_task)) {
3134 set_bsdtask_info(new_task, NULL);
3135 task_terminate_internal(new_task);
3136 }
3137
3138 /* Return to both the parent and the child? */
3139 if (imgp != NULL && spawn_no_exec) {
3140 /*
3141 * If the parent wants the pid, copy it out
3142 */
3143 if (pid != USER_ADDR_NULL)
3144 (void)suword(pid, p->p_pid);
3145 retval[0] = error;
3146
3147 /*
3148 * If we had an error, perform an internal reap ; this is
3149 * entirely safe, as we have a real process backing us.
3150 */
3151 if (error) {
3152 proc_list_lock();
3153 p->p_listflag |= P_LIST_DEADPARENT;
3154 proc_list_unlock();
3155 proc_lock(p);
3156 /* make sure no one else has killed it off... */
3157 if (p->p_stat != SZOMB && p->exit_thread == NULL) {
3158 p->exit_thread = current_thread();
3159 proc_unlock(p);
3160 exit1(p, 1, (int *)NULL);
3161 } else {
3162 /* someone is doing it for us; just skip it */
3163 proc_unlock(p);
3164 }
3165 }
3166 }
3167
3168 /*
3169 * Do not terminate the current task, if proc_exec_switch_task did not
3170 * switch the tasks, terminating the current task without the switch would
3171 * result in loosing the SIGKILL status.
3172 */
3173 if (task_did_exec(current_task())) {
3174 /* Terminate the current task, since exec will start in new task */
3175 task_terminate_internal(current_task());
3176 }
3177
3178 /* Release the thread ref returned by fork_create_child/fork1 */
3179 if (imgp != NULL && imgp->ip_new_thread) {
3180 /* wake up the new thread */
3181 task_clear_return_wait(get_threadtask(imgp->ip_new_thread));
3182 thread_deallocate(imgp->ip_new_thread);
3183 imgp->ip_new_thread = NULL;
3184 }
3185
3186 /* Release the ref returned by fork_create_child/fork1 */
3187 if (new_task) {
3188 task_deallocate(new_task);
3189 new_task = NULL;
3190 }
3191
3192 if (should_release_proc_ref) {
3193 proc_rele(p);
3194 }
3195
3196 if (bufp != NULL) {
3197 FREE(bufp, M_TEMP);
3198 }
3199
3200 if (inherit != NULL) {
3201 ipc_importance_release(inherit);
3202 }
3203
3204 return(error);
3205 }
3206
3207 /*
3208 * proc_exec_switch_task
3209 *
3210 * Parameters: p proc
3211 * old_task task before exec
3212 * new_task task after exec
3213 * new_thread thread in new task
3214 *
3215 * Returns: proc.
3216 *
3217 * Note: The function will switch the task pointer of proc
3218 * from old task to new task. The switch needs to happen
3219 * after draining all proc refs and inside a proc translock.
3220 * In the case of failure to switch the task, which might happen
3221 * if the process received a SIGKILL or jetsam killed it, it will make
3222 * sure that the new tasks terminates. User proc ref returned
3223 * to caller.
3224 *
3225 * This function is called after point of no return, in the case
3226 * failure to switch, it will terminate the new task and swallow the
3227 * error and let the terminated process complete exec and die.
3228 */
3229 proc_t
3230 proc_exec_switch_task(proc_t p, task_t old_task, task_t new_task, thread_t new_thread)
3231 {
3232 int error = 0;
3233 boolean_t task_active;
3234 boolean_t proc_active;
3235 boolean_t thread_active;
3236 thread_t old_thread = current_thread();
3237
3238 /*
3239 * Switch the task pointer of proc to new task.
3240 * Before switching the task, wait for proc_refdrain.
3241 * After the switch happens, the proc can disappear,
3242 * take a ref before it disappears. Waiting for
3243 * proc_refdrain in exec will block all other threads
3244 * trying to take a proc ref, boost the current thread
3245 * to avoid priority inversion.
3246 */
3247 thread_set_exec_promotion(old_thread);
3248 p = proc_refdrain_with_refwait(p, TRUE);
3249 /* extra proc ref returned to the caller */
3250
3251 assert(get_threadtask(new_thread) == new_task);
3252 task_active = task_is_active(new_task);
3253
3254 /* Take the proc_translock to change the task ptr */
3255 proc_lock(p);
3256 proc_active = !(p->p_lflag & P_LEXIT);
3257
3258 /* Check if the current thread is not aborted due to SIGKILL */
3259 thread_active = thread_is_active(old_thread);
3260
3261 /*
3262 * Do not switch the task if the new task or proc is already terminated
3263 * as a result of error in exec past point of no return
3264 */
3265 if (proc_active && task_active && thread_active) {
3266 error = proc_transstart(p, 1, 0);
3267 if (error == 0) {
3268 uthread_t new_uthread = get_bsdthread_info(new_thread);
3269 uthread_t old_uthread = get_bsdthread_info(current_thread());
3270
3271 /*
3272 * bsd_info of old_task will get cleared in execve and posix_spawn
3273 * after firing exec-success/error dtrace probe.
3274 */
3275 p->task = new_task;
3276
3277 /* Clear dispatchqueue and workloop ast offset */
3278 p->p_dispatchqueue_offset = 0;
3279 p->p_dispatchqueue_serialno_offset = 0;
3280 p->p_return_to_kernel_offset = 0;
3281
3282 /* Copy the signal state, dtrace state and set bsd ast on new thread */
3283 act_set_astbsd(new_thread);
3284 new_uthread->uu_siglist = old_uthread->uu_siglist;
3285 new_uthread->uu_sigwait = old_uthread->uu_sigwait;
3286 new_uthread->uu_sigmask = old_uthread->uu_sigmask;
3287 new_uthread->uu_oldmask = old_uthread->uu_oldmask;
3288 new_uthread->uu_vforkmask = old_uthread->uu_vforkmask;
3289 new_uthread->uu_exit_reason = old_uthread->uu_exit_reason;
3290 #if CONFIG_DTRACE
3291 new_uthread->t_dtrace_sig = old_uthread->t_dtrace_sig;
3292 new_uthread->t_dtrace_stop = old_uthread->t_dtrace_stop;
3293 new_uthread->t_dtrace_resumepid = old_uthread->t_dtrace_resumepid;
3294 assert(new_uthread->t_dtrace_scratch == NULL);
3295 new_uthread->t_dtrace_scratch = old_uthread->t_dtrace_scratch;
3296
3297 old_uthread->t_dtrace_sig = 0;
3298 old_uthread->t_dtrace_stop = 0;
3299 old_uthread->t_dtrace_resumepid = 0;
3300 old_uthread->t_dtrace_scratch = NULL;
3301 #endif
3302 /* Copy the resource accounting info */
3303 thread_copy_resource_info(new_thread, current_thread());
3304
3305 /* Clear the exit reason and signal state on old thread */
3306 old_uthread->uu_exit_reason = NULL;
3307 old_uthread->uu_siglist = 0;
3308
3309 /* Add the new uthread to proc uthlist and remove the old one */
3310 TAILQ_INSERT_TAIL(&p->p_uthlist, new_uthread, uu_list);
3311 TAILQ_REMOVE(&p->p_uthlist, old_uthread, uu_list);
3312
3313 task_set_did_exec_flag(old_task);
3314 task_clear_exec_copy_flag(new_task);
3315
3316 task_copy_fields_for_exec(new_task, old_task);
3317
3318 proc_transend(p, 1);
3319 }
3320 }
3321
3322 proc_unlock(p);
3323 proc_refwake(p);
3324 thread_clear_exec_promotion(old_thread);
3325
3326 if (error != 0 || !task_active || !proc_active || !thread_active) {
3327 task_terminate_internal(new_task);
3328 }
3329
3330 return p;
3331 }
3332
3333 /*
3334 * execve
3335 *
3336 * Parameters: uap->fname File name to exec
3337 * uap->argp Argument list
3338 * uap->envp Environment list
3339 *
3340 * Returns: 0 Success
3341 * __mac_execve:EINVAL Invalid argument
3342 * __mac_execve:ENOTSUP Invalid argument
3343 * __mac_execve:EACCES Permission denied
3344 * __mac_execve:EINTR Interrupted function
3345 * __mac_execve:ENOMEM Not enough space
3346 * __mac_execve:EFAULT Bad address
3347 * __mac_execve:ENAMETOOLONG Filename too long
3348 * __mac_execve:ENOEXEC Executable file format error
3349 * __mac_execve:ETXTBSY Text file busy [misuse of error code]
3350 * __mac_execve:???
3351 *
3352 * TODO: Dynamic linker header address on stack is copied via suword()
3353 */
3354 /* ARGSUSED */
3355 int
3356 execve(proc_t p, struct execve_args *uap, int32_t *retval)
3357 {
3358 struct __mac_execve_args muap;
3359 int err;
3360
3361 memoryshot(VM_EXECVE, DBG_FUNC_NONE);
3362
3363 muap.fname = uap->fname;
3364 muap.argp = uap->argp;
3365 muap.envp = uap->envp;
3366 muap.mac_p = USER_ADDR_NULL;
3367 err = __mac_execve(p, &muap, retval);
3368
3369 return(err);
3370 }
3371
3372 /*
3373 * __mac_execve
3374 *
3375 * Parameters: uap->fname File name to exec
3376 * uap->argp Argument list
3377 * uap->envp Environment list
3378 * uap->mac_p MAC label supplied by caller
3379 *
3380 * Returns: 0 Success
3381 * EINVAL Invalid argument
3382 * ENOTSUP Not supported
3383 * ENOEXEC Executable file format error
3384 * exec_activate_image:EINVAL Invalid argument
3385 * exec_activate_image:EACCES Permission denied
3386 * exec_activate_image:EINTR Interrupted function
3387 * exec_activate_image:ENOMEM Not enough space
3388 * exec_activate_image:EFAULT Bad address
3389 * exec_activate_image:ENAMETOOLONG Filename too long
3390 * exec_activate_image:ENOEXEC Executable file format error
3391 * exec_activate_image:ETXTBSY Text file busy [misuse of error code]
3392 * exec_activate_image:EBADEXEC The executable is corrupt/unknown
3393 * exec_activate_image:???
3394 * mac_execve_enter:???
3395 *
3396 * TODO: Dynamic linker header address on stack is copied via suword()
3397 */
3398 int
3399 __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval)
3400 {
3401 char *bufp = NULL;
3402 struct image_params *imgp;
3403 struct vnode_attr *vap;
3404 struct vnode_attr *origvap;
3405 int error;
3406 int is_64 = IS_64BIT_PROCESS(p);
3407 struct vfs_context context;
3408 struct uthread *uthread;
3409 task_t new_task = NULL;
3410 boolean_t should_release_proc_ref = FALSE;
3411 boolean_t exec_done = FALSE;
3412 boolean_t in_vfexec = FALSE;
3413 void *inherit = NULL;
3414
3415 context.vc_thread = current_thread();
3416 context.vc_ucred = kauth_cred_proc_ref(p); /* XXX must NOT be kauth_cred_get() */
3417
3418 /* Allocate a big chunk for locals instead of using stack since these
3419 * structures a pretty big.
3420 */
3421 MALLOC(bufp, char *, (sizeof(*imgp) + sizeof(*vap) + sizeof(*origvap)), M_TEMP, M_WAITOK | M_ZERO);
3422 imgp = (struct image_params *) bufp;
3423 if (bufp == NULL) {
3424 error = ENOMEM;
3425 goto exit_with_error;
3426 }
3427 vap = (struct vnode_attr *) (bufp + sizeof(*imgp));
3428 origvap = (struct vnode_attr *) (bufp + sizeof(*imgp) + sizeof(*vap));
3429
3430 /* Initialize the common data in the image_params structure */
3431 imgp->ip_user_fname = uap->fname;
3432 imgp->ip_user_argv = uap->argp;
3433 imgp->ip_user_envv = uap->envp;
3434 imgp->ip_vattr = vap;
3435 imgp->ip_origvattr = origvap;
3436 imgp->ip_vfs_context = &context;
3437 imgp->ip_flags = (is_64 ? IMGPF_WAS_64BIT : IMGPF_NONE) | ((p->p_flag & P_DISABLE_ASLR) ? IMGPF_DISABLE_ASLR : IMGPF_NONE);
3438 imgp->ip_seg = (is_64 ? UIO_USERSPACE64 : UIO_USERSPACE32);
3439 imgp->ip_mac_return = 0;
3440 imgp->ip_cs_error = OS_REASON_NULL;
3441
3442 #if CONFIG_MACF
3443 if (uap->mac_p != USER_ADDR_NULL) {
3444 error = mac_execve_enter(uap->mac_p, imgp);
3445 if (error) {
3446 kauth_cred_unref(&context.vc_ucred);
3447 goto exit_with_error;
3448 }
3449 }
3450 #endif
3451 uthread = get_bsdthread_info(current_thread());
3452 if (uthread->uu_flag & UT_VFORK) {
3453 imgp->ip_flags |= IMGPF_VFORK_EXEC;
3454 in_vfexec = TRUE;
3455 } else {
3456 imgp->ip_flags |= IMGPF_EXEC;
3457
3458 /*
3459 * For execve case, create a new task and thread
3460 * which points to current_proc. The current_proc will point
3461 * to the new task after image activation and proc ref drain.
3462 *
3463 * proc (current_proc) <----- old_task (current_task)
3464 * ^ | ^
3465 * | | |
3466 * | ----------------------------------
3467 * |
3468 * --------- new_task (task marked as TF_EXEC_COPY)
3469 *
3470 * After image activation, the proc will point to the new task
3471 * and would look like following.
3472 *
3473 * proc (current_proc) <----- old_task (current_task, marked as TPF_DID_EXEC)
3474 * ^ |
3475 * | |
3476 * | ----------> new_task
3477 * | |
3478 * -----------------
3479 *
3480 * During exec any transition from new_task -> proc is fine, but don't allow
3481 * transition from proc->task, since it will modify old_task.
3482 */
3483 imgp->ip_new_thread = fork_create_child(current_task(),
3484 NULL, p, FALSE, p->p_flag & P_LP64, TRUE);
3485 /* task and thread ref returned by fork_create_child */
3486 if (imgp->ip_new_thread == NULL) {
3487 error = ENOMEM;
3488 goto exit_with_error;
3489 }
3490
3491 new_task = get_threadtask(imgp->ip_new_thread);
3492 context.vc_thread = imgp->ip_new_thread;
3493 }
3494
3495 error = exec_activate_image(imgp);
3496 /* thread and task ref returned for vfexec case */
3497
3498 if (imgp->ip_new_thread != NULL) {
3499 /*
3500 * task reference might be returned by exec_activate_image
3501 * for vfexec.
3502 */
3503 new_task = get_threadtask(imgp->ip_new_thread);
3504 }
3505
3506 if (!error && !in_vfexec) {
3507 p = proc_exec_switch_task(p, current_task(), new_task, imgp->ip_new_thread);
3508 /* proc ref returned */
3509 should_release_proc_ref = TRUE;
3510 }
3511
3512 kauth_cred_unref(&context.vc_ucred);
3513
3514 /* Image not claimed by any activator? */
3515 if (error == -1)
3516 error = ENOEXEC;
3517
3518 if (!error) {
3519 exec_done = TRUE;
3520 assert(imgp->ip_new_thread != NULL);
3521
3522 exec_resettextvp(p, imgp);
3523 error = check_for_signature(p, imgp);
3524 }
3525
3526 /* flag exec has occurred, notify only if it has not failed due to FP Key error */
3527 if (exec_done && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0))
3528 proc_knote(p, NOTE_EXEC);
3529
3530 if (imgp->ip_vp != NULLVP)
3531 vnode_put(imgp->ip_vp);
3532 if (imgp->ip_scriptvp != NULLVP)
3533 vnode_put(imgp->ip_scriptvp);
3534 if (imgp->ip_strings)
3535 execargs_free(imgp);
3536 #if CONFIG_MACF
3537 if (imgp->ip_execlabelp)
3538 mac_cred_label_free(imgp->ip_execlabelp);
3539 if (imgp->ip_scriptlabelp)
3540 mac_vnode_label_free(imgp->ip_scriptlabelp);
3541 #endif
3542 if (imgp->ip_cs_error != OS_REASON_NULL) {
3543 os_reason_free(imgp->ip_cs_error);
3544 imgp->ip_cs_error = OS_REASON_NULL;
3545 }
3546
3547 if (!error) {
3548 /*
3549 * We need to initialize the bank context behind the protection of
3550 * the proc_trans lock to prevent a race with exit. We can't do this during
3551 * exec_activate_image because task_bank_init checks entitlements that
3552 * aren't loaded until subsequent calls (including exec_resettextvp).
3553 */
3554 error = proc_transstart(p, 0, 0);
3555 }
3556
3557 if (!error) {
3558 task_bank_init(get_threadtask(imgp->ip_new_thread));
3559 proc_transend(p, 0);
3560
3561 /* Sever any extant thread affinity */
3562 thread_affinity_exec(current_thread());
3563
3564 /* Inherit task role from old task to new task for exec */
3565 if (!in_vfexec) {
3566 proc_inherit_task_role(get_threadtask(imgp->ip_new_thread), current_task());
3567 }
3568
3569 thread_t main_thread = imgp->ip_new_thread;
3570
3571 task_set_main_thread_qos(new_task, main_thread);
3572
3573 #if CONFIG_MACF
3574 /*
3575 * Processes with the MAP_JIT entitlement are permitted to have
3576 * a jumbo-size map.
3577 */
3578 if (mac_proc_check_map_anon(p, 0, 0, 0, MAP_JIT, NULL) == 0) {
3579 vm_map_set_jumbo(get_task_map(new_task));
3580 }
3581 #endif /* CONFIG_MACF */
3582
3583
3584 #if CONFIG_DTRACE
3585 dtrace_thread_didexec(imgp->ip_new_thread);
3586
3587 if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL)
3588 (*dtrace_proc_waitfor_hook)(p);
3589 #endif
3590
3591 if (in_vfexec) {
3592 vfork_return(p, retval, p->p_pid);
3593 }
3594 } else {
3595 DTRACE_PROC1(exec__failure, int, error);
3596 }
3597
3598 exit_with_error:
3599
3600 /*
3601 * clear bsd_info from old task if it did exec.
3602 */
3603 if (task_did_exec(current_task())) {
3604 set_bsdtask_info(current_task(), NULL);
3605 }
3606
3607 /* clear bsd_info from new task and terminate it if exec failed */
3608 if (new_task != NULL && task_is_exec_copy(new_task)) {
3609 set_bsdtask_info(new_task, NULL);
3610 task_terminate_internal(new_task);
3611 }
3612
3613 /*
3614 * Need to transfer pending watch port boosts to the new task while still making
3615 * sure that the old task remains in the importance linkage. Create an importance
3616 * linkage from old task to new task, then switch the task importance base
3617 * of old task and new task. After the switch the port watch boost will be
3618 * boosting the new task and new task will be donating importance to old task.
3619 */
3620 if (error == 0 && task_did_exec(current_task())) {
3621 inherit = ipc_importance_exec_switch_task(current_task(), get_threadtask(imgp->ip_new_thread));
3622 }
3623
3624 if (imgp != NULL) {
3625 /*
3626 * Do not terminate the current task, if proc_exec_switch_task did not
3627 * switch the tasks, terminating the current task without the switch would
3628 * result in loosing the SIGKILL status.
3629 */
3630 if (task_did_exec(current_task())) {
3631 /* Terminate the current task, since exec will start in new task */
3632 task_terminate_internal(current_task());
3633 }
3634
3635 /* Release the thread ref returned by fork_create_child */
3636 if (imgp->ip_new_thread) {
3637 /* wake up the new exec thread */
3638 task_clear_return_wait(get_threadtask(imgp->ip_new_thread));
3639 thread_deallocate(imgp->ip_new_thread);
3640 imgp->ip_new_thread = NULL;
3641 }
3642 }
3643
3644 /* Release the ref returned by fork_create_child */
3645 if (new_task) {
3646 task_deallocate(new_task);
3647 new_task = NULL;
3648 }
3649
3650 if (should_release_proc_ref) {
3651 proc_rele(p);
3652 }
3653
3654 if (bufp != NULL) {
3655 FREE(bufp, M_TEMP);
3656 }
3657
3658 if (inherit != NULL) {
3659 ipc_importance_release(inherit);
3660 }
3661
3662 return(error);
3663 }
3664
3665
3666 /*
3667 * copyinptr
3668 *
3669 * Description: Copy a pointer in from user space to a user_addr_t in kernel
3670 * space, based on 32/64 bitness of the user space
3671 *
3672 * Parameters: froma User space address
3673 * toptr Address of kernel space user_addr_t
3674 * ptr_size 4/8, based on 'froma' address space
3675 *
3676 * Returns: 0 Success
3677 * EFAULT Bad 'froma'
3678 *
3679 * Implicit returns:
3680 * *ptr_size Modified
3681 */
3682 static int
3683 copyinptr(user_addr_t froma, user_addr_t *toptr, int ptr_size)
3684 {
3685 int error;
3686
3687 if (ptr_size == 4) {
3688 /* 64 bit value containing 32 bit address */
3689 unsigned int i;
3690
3691 error = copyin(froma, &i, 4);
3692 *toptr = CAST_USER_ADDR_T(i); /* SAFE */
3693 } else {
3694 error = copyin(froma, toptr, 8);
3695 }
3696 return (error);
3697 }
3698
3699
3700 /*
3701 * copyoutptr
3702 *
3703 * Description: Copy a pointer out from a user_addr_t in kernel space to
3704 * user space, based on 32/64 bitness of the user space
3705 *
3706 * Parameters: ua User space address to copy to
3707 * ptr Address of kernel space user_addr_t
3708 * ptr_size 4/8, based on 'ua' address space
3709 *
3710 * Returns: 0 Success
3711 * EFAULT Bad 'ua'
3712 *
3713 */
3714 static int
3715 copyoutptr(user_addr_t ua, user_addr_t ptr, int ptr_size)
3716 {
3717 int error;
3718
3719 if (ptr_size == 4) {
3720 /* 64 bit value containing 32 bit address */
3721 unsigned int i = CAST_DOWN_EXPLICIT(unsigned int,ua); /* SAFE */
3722
3723 error = copyout(&i, ptr, 4);
3724 } else {
3725 error = copyout(&ua, ptr, 8);
3726 }
3727 return (error);
3728 }
3729
3730
3731 /*
3732 * exec_copyout_strings
3733 *
3734 * Copy out the strings segment to user space. The strings segment is put
3735 * on a preinitialized stack frame.
3736 *
3737 * Parameters: struct image_params * the image parameter block
3738 * int * a pointer to the stack offset variable
3739 *
3740 * Returns: 0 Success
3741 * !0 Faiure: errno
3742 *
3743 * Implicit returns:
3744 * (*stackp) The stack offset, modified
3745 *
3746 * Note: The strings segment layout is backward, from the beginning
3747 * of the top of the stack to consume the minimal amount of
3748 * space possible; the returned stack pointer points to the
3749 * end of the area consumed (stacks grow downward).
3750 *
3751 * argc is an int; arg[i] are pointers; env[i] are pointers;
3752 * the 0's are (void *)NULL's
3753 *
3754 * The stack frame layout is:
3755 *
3756 * +-------------+ <- p->user_stack
3757 * | 16b |
3758 * +-------------+
3759 * | STRING AREA |
3760 * | : |
3761 * | : |
3762 * | : |
3763 * +- -- -- -- --+
3764 * | PATH AREA |
3765 * +-------------+
3766 * | 0 |
3767 * +-------------+
3768 * | applev[n] |
3769 * +-------------+
3770 * :
3771 * :
3772 * +-------------+
3773 * | applev[1] |
3774 * +-------------+
3775 * | exec_path / |
3776 * | applev[0] |
3777 * +-------------+
3778 * | 0 |
3779 * +-------------+
3780 * | env[n] |
3781 * +-------------+
3782 * :
3783 * :
3784 * +-------------+
3785 * | env[0] |
3786 * +-------------+
3787 * | 0 |
3788 * +-------------+
3789 * | arg[argc-1] |
3790 * +-------------+
3791 * :
3792 * :
3793 * +-------------+
3794 * | arg[0] |
3795 * +-------------+
3796 * | argc |
3797 * sp-> +-------------+
3798 *
3799 * Although technically a part of the STRING AREA, we treat the PATH AREA as
3800 * a separate entity. This allows us to align the beginning of the PATH AREA
3801 * to a pointer boundary so that the exec_path, env[i], and argv[i] pointers
3802 * which preceed it on the stack are properly aligned.
3803 */
3804
3805 static int
3806 exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp)
3807 {
3808 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
3809 int ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT) ? 8 : 4;
3810 int ptr_area_size;
3811 void *ptr_buffer_start, *ptr_buffer;
3812 int string_size;
3813
3814 user_addr_t string_area; /* *argv[], *env[] */
3815 user_addr_t ptr_area; /* argv[], env[], applev[] */
3816 user_addr_t argc_area; /* argc */
3817 user_addr_t stack;
3818 int error;
3819
3820 unsigned i;
3821 struct copyout_desc {
3822 char *start_string;
3823 int count;
3824 #if CONFIG_DTRACE
3825 user_addr_t *dtrace_cookie;
3826 #endif
3827 boolean_t null_term;
3828 } descriptors[] = {
3829 {
3830 .start_string = imgp->ip_startargv,
3831 .count = imgp->ip_argc,
3832 #if CONFIG_DTRACE
3833 .dtrace_cookie = &p->p_dtrace_argv,
3834 #endif
3835 .null_term = TRUE
3836 },
3837 {
3838 .start_string = imgp->ip_endargv,
3839 .count = imgp->ip_envc,
3840 #if CONFIG_DTRACE
3841 .dtrace_cookie = &p->p_dtrace_envp,
3842 #endif
3843 .null_term = TRUE
3844 },
3845 {
3846 .start_string = imgp->ip_strings,
3847 .count = 1,
3848 #if CONFIG_DTRACE
3849 .dtrace_cookie = NULL,
3850 #endif
3851 .null_term = FALSE
3852 },
3853 {
3854 .start_string = imgp->ip_endenvv,
3855 .count = imgp->ip_applec - 1, /* exec_path handled above */
3856 #if CONFIG_DTRACE
3857 .dtrace_cookie = NULL,
3858 #endif
3859 .null_term = TRUE
3860 }
3861 };
3862
3863 stack = *stackp;
3864
3865 /*
3866 * All previous contributors to the string area
3867 * should have aligned their sub-area
3868 */
3869 if (imgp->ip_strspace % ptr_size != 0) {
3870 error = EINVAL;
3871 goto bad;
3872 }
3873
3874 /* Grow the stack down for the strings we've been building up */
3875 string_size = imgp->ip_strendp - imgp->ip_strings;
3876 stack -= string_size;
3877 string_area = stack;
3878
3879 /*
3880 * Need room for one pointer for each string, plus
3881 * one for the NULLs terminating the argv, envv, and apple areas.
3882 */
3883 ptr_area_size = (imgp->ip_argc + imgp->ip_envc + imgp->ip_applec + 3) *
3884 ptr_size;
3885 stack -= ptr_area_size;
3886 ptr_area = stack;
3887
3888 /* We'll construct all the pointer arrays in our string buffer,
3889 * which we already know is aligned properly, and ip_argspace
3890 * was used to verify we have enough space.
3891 */
3892 ptr_buffer_start = ptr_buffer = (void *)imgp->ip_strendp;
3893
3894 /*
3895 * Need room for pointer-aligned argc slot.
3896 */
3897 stack -= ptr_size;
3898 argc_area = stack;
3899
3900 /*
3901 * Record the size of the arguments area so that sysctl_procargs()
3902 * can return the argument area without having to parse the arguments.
3903 */
3904 proc_lock(p);
3905 p->p_argc = imgp->ip_argc;
3906 p->p_argslen = (int)(*stackp - string_area);
3907 proc_unlock(p);
3908
3909 /* Return the initial stack address: the location of argc */
3910 *stackp = stack;
3911
3912 /*
3913 * Copy out the entire strings area.
3914 */
3915 error = copyout(imgp->ip_strings, string_area,
3916 string_size);
3917 if (error)
3918 goto bad;
3919
3920 for (i = 0; i < sizeof(descriptors)/sizeof(descriptors[0]); i++) {
3921 char *cur_string = descriptors[i].start_string;
3922 int j;
3923
3924 #if CONFIG_DTRACE
3925 if (descriptors[i].dtrace_cookie) {
3926 proc_lock(p);
3927 *descriptors[i].dtrace_cookie = ptr_area + ((uintptr_t)ptr_buffer - (uintptr_t)ptr_buffer_start); /* dtrace convenience */
3928 proc_unlock(p);
3929 }
3930 #endif /* CONFIG_DTRACE */
3931
3932 /*
3933 * For each segment (argv, envv, applev), copy as many pointers as requested
3934 * to our pointer buffer.
3935 */
3936 for (j = 0; j < descriptors[i].count; j++) {
3937 user_addr_t cur_address = string_area + (cur_string - imgp->ip_strings);
3938
3939 /* Copy out the pointer to the current string. Alignment has been verified */
3940 if (ptr_size == 8) {
3941 *(uint64_t *)ptr_buffer = (uint64_t)cur_address;
3942 } else {
3943 *(uint32_t *)ptr_buffer = (uint32_t)cur_address;
3944 }
3945
3946 ptr_buffer = (void *)((uintptr_t)ptr_buffer + ptr_size);
3947 cur_string += strlen(cur_string) + 1; /* Only a NUL between strings in the same area */
3948 }
3949
3950 if (descriptors[i].null_term) {
3951 if (ptr_size == 8) {
3952 *(uint64_t *)ptr_buffer = 0ULL;
3953 } else {
3954 *(uint32_t *)ptr_buffer = 0;
3955 }
3956
3957 ptr_buffer = (void *)((uintptr_t)ptr_buffer + ptr_size);
3958 }
3959 }
3960
3961 /*
3962 * Copy out all our pointer arrays in bulk.
3963 */
3964 error = copyout(ptr_buffer_start, ptr_area,
3965 ptr_area_size);
3966 if (error)
3967 goto bad;
3968
3969 /* argc (int32, stored in a ptr_size area) */
3970 error = copyoutptr((user_addr_t)imgp->ip_argc, argc_area, ptr_size);
3971 if (error)
3972 goto bad;
3973
3974 bad:
3975 return(error);
3976 }
3977
3978
3979 /*
3980 * exec_extract_strings
3981 *
3982 * Copy arguments and environment from user space into work area; we may
3983 * have already copied some early arguments into the work area, and if
3984 * so, any arguments opied in are appended to those already there.
3985 * This function is the primary manipulator of ip_argspace, since
3986 * these are the arguments the client of execve(2) knows about. After
3987 * each argv[]/envv[] string is copied, we charge the string length
3988 * and argv[]/envv[] pointer slot to ip_argspace, so that we can
3989 * full preflight the arg list size.
3990 *
3991 * Parameters: struct image_params * the image parameter block
3992 *
3993 * Returns: 0 Success
3994 * !0 Failure: errno
3995 *
3996 * Implicit returns;
3997 * (imgp->ip_argc) Count of arguments, updated
3998 * (imgp->ip_envc) Count of environment strings, updated
3999 * (imgp->ip_argspace) Count of remaining of NCARGS
4000 * (imgp->ip_interp_buffer) Interpreter and args (mutated in place)
4001 *
4002 *
4003 * Note: The argument and environment vectors are user space pointers
4004 * to arrays of user space pointers.
4005 */
4006 static int
4007 exec_extract_strings(struct image_params *imgp)
4008 {
4009 int error = 0;
4010 int ptr_size = (imgp->ip_flags & IMGPF_WAS_64BIT) ? 8 : 4;
4011 int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT) ? 8 : 4;
4012 user_addr_t argv = imgp->ip_user_argv;
4013 user_addr_t envv = imgp->ip_user_envv;
4014
4015 /*
4016 * Adjust space reserved for the path name by however much padding it
4017 * needs. Doing this here since we didn't know if this would be a 32-
4018 * or 64-bit process back in exec_save_path.
4019 */
4020 while (imgp->ip_strspace % new_ptr_size != 0) {
4021 *imgp->ip_strendp++ = '\0';
4022 imgp->ip_strspace--;
4023 /* imgp->ip_argspace--; not counted towards exec args total */
4024 }
4025
4026 /*
4027 * From now on, we start attributing string space to ip_argspace
4028 */
4029 imgp->ip_startargv = imgp->ip_strendp;
4030 imgp->ip_argc = 0;
4031
4032 if((imgp->ip_flags & IMGPF_INTERPRET) != 0) {
4033 user_addr_t arg;
4034 char *argstart, *ch;
4035
4036 /* First, the arguments in the "#!" string are tokenized and extracted. */
4037 argstart = imgp->ip_interp_buffer;
4038 while (argstart) {
4039 ch = argstart;
4040 while (*ch && !IS_WHITESPACE(*ch)) {
4041 ch++;
4042 }
4043
4044 if (*ch == '\0') {
4045 /* last argument, no need to NUL-terminate */
4046 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(argstart), UIO_SYSSPACE, TRUE);
4047 argstart = NULL;
4048 } else {
4049 /* NUL-terminate */
4050 *ch = '\0';
4051 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(argstart), UIO_SYSSPACE, TRUE);
4052
4053 /*
4054 * Find the next string. We know spaces at the end of the string have already
4055 * been stripped.
4056 */
4057 argstart = ch + 1;
4058 while (IS_WHITESPACE(*argstart)) {
4059 argstart++;
4060 }
4061 }
4062
4063 /* Error-check, regardless of whether this is the last interpreter arg or not */
4064 if (error)
4065 goto bad;
4066 if (imgp->ip_argspace < new_ptr_size) {
4067 error = E2BIG;
4068 goto bad;
4069 }
4070 imgp->ip_argspace -= new_ptr_size; /* to hold argv[] entry */
4071 imgp->ip_argc++;
4072 }
4073
4074 if (argv != 0LL) {
4075 /*
4076 * If we are running an interpreter, replace the av[0] that was
4077 * passed to execve() with the path name that was
4078 * passed to execve() for interpreters which do not use the PATH
4079 * to locate their script arguments.
4080 */
4081 error = copyinptr(argv, &arg, ptr_size);
4082 if (error)
4083 goto bad;
4084 if (arg != 0LL) {
4085 argv += ptr_size; /* consume without using */
4086 }
4087 }
4088
4089 if (imgp->ip_interp_sugid_fd != -1) {
4090 char temp[19]; /* "/dev/fd/" + 10 digits + NUL */
4091 snprintf(temp, sizeof(temp), "/dev/fd/%d", imgp->ip_interp_sugid_fd);
4092 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(temp), UIO_SYSSPACE, TRUE);
4093 } else {
4094 error = exec_add_user_string(imgp, imgp->ip_user_fname, imgp->ip_seg, TRUE);
4095 }
4096
4097 if (error)
4098 goto bad;
4099 if (imgp->ip_argspace < new_ptr_size) {
4100 error = E2BIG;
4101 goto bad;
4102 }
4103 imgp->ip_argspace -= new_ptr_size; /* to hold argv[] entry */
4104 imgp->ip_argc++;
4105 }
4106
4107 while (argv != 0LL) {
4108 user_addr_t arg;
4109
4110 error = copyinptr(argv, &arg, ptr_size);
4111 if (error)
4112 goto bad;
4113
4114 if (arg == 0LL) {
4115 break;
4116 }
4117
4118 argv += ptr_size;
4119
4120 /*
4121 * av[n...] = arg[n]
4122 */
4123 error = exec_add_user_string(imgp, arg, imgp->ip_seg, TRUE);
4124 if (error)
4125 goto bad;
4126 if (imgp->ip_argspace < new_ptr_size) {
4127 error = E2BIG;
4128 goto bad;
4129 }
4130 imgp->ip_argspace -= new_ptr_size; /* to hold argv[] entry */
4131 imgp->ip_argc++;
4132 }
4133
4134 /* Save space for argv[] NULL terminator */
4135 if (imgp->ip_argspace < new_ptr_size) {
4136 error = E2BIG;
4137 goto bad;
4138 }
4139 imgp->ip_argspace -= new_ptr_size;
4140
4141 /* Note where the args ends and env begins. */
4142 imgp->ip_endargv = imgp->ip_strendp;
4143 imgp->ip_envc = 0;
4144
4145 /* Now, get the environment */
4146 while (envv != 0LL) {
4147 user_addr_t env;
4148
4149 error = copyinptr(envv, &env, ptr_size);
4150 if (error)
4151 goto bad;
4152
4153 envv += ptr_size;
4154 if (env == 0LL) {
4155 break;
4156 }
4157 /*
4158 * av[n...] = env[n]
4159 */
4160 error = exec_add_user_string(imgp, env, imgp->ip_seg, TRUE);
4161 if (error)
4162 goto bad;
4163 if (imgp->ip_argspace < new_ptr_size) {
4164 error = E2BIG;
4165 goto bad;
4166 }
4167 imgp->ip_argspace -= new_ptr_size; /* to hold envv[] entry */
4168 imgp->ip_envc++;
4169 }
4170
4171 /* Save space for envv[] NULL terminator */
4172 if (imgp->ip_argspace < new_ptr_size) {
4173 error = E2BIG;
4174 goto bad;
4175 }
4176 imgp->ip_argspace -= new_ptr_size;
4177
4178 /* Align the tail of the combined argv+envv area */
4179 while (imgp->ip_strspace % new_ptr_size != 0) {
4180 if (imgp->ip_argspace < 1) {
4181 error = E2BIG;
4182 goto bad;
4183 }
4184 *imgp->ip_strendp++ = '\0';
4185 imgp->ip_strspace--;
4186 imgp->ip_argspace--;
4187 }
4188
4189 /* Note where the envv ends and applev begins. */
4190 imgp->ip_endenvv = imgp->ip_strendp;
4191
4192 /*
4193 * From now on, we are no longer charging argument
4194 * space to ip_argspace.
4195 */
4196
4197 bad:
4198 return error;
4199 }
4200
4201 /*
4202 * Libc has an 8-element array set up for stack guard values. It only fills
4203 * in one of those entries, and both gcc and llvm seem to use only a single
4204 * 8-byte guard. Until somebody needs more than an 8-byte guard value, don't
4205 * do the work to construct them.
4206 */
4207 #define GUARD_VALUES 1
4208 #define GUARD_KEY "stack_guard="
4209
4210 /*
4211 * System malloc needs some entropy when it is initialized.
4212 */
4213 #define ENTROPY_VALUES 2
4214 #define ENTROPY_KEY "malloc_entropy="
4215
4216 /*
4217 * System malloc engages nanozone for UIAPP.
4218 */
4219 #define NANO_ENGAGE_KEY "MallocNanoZone=1"
4220
4221 #define PFZ_KEY "pfz="
4222 extern user32_addr_t commpage_text32_location;
4223 extern user64_addr_t commpage_text64_location;
4224
4225 #define MAIN_STACK_VALUES 4
4226 #define MAIN_STACK_KEY "main_stack="
4227
4228 #define FSID_KEY "executable_file="
4229 #define DYLD_FSID_KEY "dyld_file="
4230 #define CDHASH_KEY "executable_cdhash="
4231
4232 #define FSID_MAX_STRING "0x1234567890abcdef,0x1234567890abcdef"
4233
4234 #define HEX_STR_LEN 18 // 64-bit hex value "0x0123456701234567"
4235
4236 static int
4237 exec_add_entropy_key(struct image_params *imgp,
4238 const char *key,
4239 int values,
4240 boolean_t embedNUL)
4241 {
4242 const int limit = 8;
4243 uint64_t entropy[limit];
4244 char str[strlen(key) + (HEX_STR_LEN + 1) * limit + 1];
4245 if (values > limit) {
4246 values = limit;
4247 }
4248
4249 read_random(entropy, sizeof(entropy[0]) * values);
4250
4251 if (embedNUL) {
4252 entropy[0] &= ~(0xffull << 8);
4253 }
4254
4255 int len = snprintf(str, sizeof(str), "%s0x%llx", key, entropy[0]);
4256 int remaining = sizeof(str) - len;
4257 for (int i = 1; i < values && remaining > 0; ++i) {
4258 int start = sizeof(str) - remaining;
4259 len = snprintf(&str[start], remaining, ",0x%llx", entropy[i]);
4260 remaining -= len;
4261 }
4262
4263 return exec_add_user_string(imgp, CAST_USER_ADDR_T(str), UIO_SYSSPACE, FALSE);
4264 }
4265
4266 /*
4267 * Build up the contents of the apple[] string vector
4268 */
4269 static int
4270 exec_add_apple_strings(struct image_params *imgp,
4271 const load_result_t *load_result)
4272 {
4273 int error;
4274 int img_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT) ? 8 : 4;
4275
4276 /* exec_save_path stored the first string */
4277 imgp->ip_applec = 1;
4278
4279 /* adding the pfz string */
4280 {
4281 char pfz_string[strlen(PFZ_KEY) + HEX_STR_LEN + 1];
4282
4283 if (img_ptr_size == 8) {
4284 snprintf(pfz_string, sizeof(pfz_string), PFZ_KEY "0x%llx", commpage_text64_location);
4285 } else {
4286 snprintf(pfz_string, sizeof(pfz_string), PFZ_KEY "0x%x", commpage_text32_location);
4287 }
4288 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(pfz_string), UIO_SYSSPACE, FALSE);
4289 if (error) {
4290 goto bad;
4291 }
4292 imgp->ip_applec++;
4293 }
4294
4295 /* adding the NANO_ENGAGE_KEY key */
4296 if (imgp->ip_px_sa) {
4297 int proc_flags = (((struct _posix_spawnattr *) imgp->ip_px_sa)->psa_flags);
4298
4299 if ((proc_flags & _POSIX_SPAWN_NANO_ALLOCATOR) == _POSIX_SPAWN_NANO_ALLOCATOR) {
4300 const char *nano_string = NANO_ENGAGE_KEY;
4301 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(nano_string), UIO_SYSSPACE, FALSE);
4302 if (error){
4303 goto bad;
4304 }
4305 imgp->ip_applec++;
4306 }
4307 }
4308
4309 /*
4310 * Supply libc with a collection of random values to use when
4311 * implementing -fstack-protector.
4312 *
4313 * (The first random string always contains an embedded NUL so that
4314 * __stack_chk_guard also protects against C string vulnerabilities)
4315 */
4316 error = exec_add_entropy_key(imgp, GUARD_KEY, GUARD_VALUES, TRUE);
4317 if (error) {
4318 goto bad;
4319 }
4320 imgp->ip_applec++;
4321
4322 /*
4323 * Supply libc with entropy for system malloc.
4324 */
4325 error = exec_add_entropy_key(imgp, ENTROPY_KEY, ENTROPY_VALUES, FALSE);
4326 if (error) {
4327 goto bad;
4328 }
4329 imgp->ip_applec++;
4330
4331 /*
4332 * Add MAIN_STACK_KEY: Supplies the address and size of the main thread's
4333 * stack if it was allocated by the kernel.
4334 *
4335 * The guard page is not included in this stack size as libpthread
4336 * expects to add it back in after receiving this value.
4337 */
4338 if (load_result->unixproc) {
4339 char stack_string[strlen(MAIN_STACK_KEY) + (HEX_STR_LEN + 1) * MAIN_STACK_VALUES + 1];
4340 snprintf(stack_string, sizeof(stack_string),
4341 MAIN_STACK_KEY "0x%llx,0x%llx,0x%llx,0x%llx",
4342 (uint64_t)load_result->user_stack,
4343 (uint64_t)load_result->user_stack_size,
4344 (uint64_t)load_result->user_stack_alloc,
4345 (uint64_t)load_result->user_stack_alloc_size);
4346 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(stack_string), UIO_SYSSPACE, FALSE);
4347 if (error) {
4348 goto bad;
4349 }
4350 imgp->ip_applec++;
4351 }
4352
4353 if (imgp->ip_vattr) {
4354 uint64_t fsid = get_va_fsid(imgp->ip_vattr);
4355 uint64_t fsobjid = imgp->ip_vattr->va_fileid;
4356
4357 char fsid_string[strlen(FSID_KEY) + strlen(FSID_MAX_STRING) + 1];
4358 snprintf(fsid_string, sizeof(fsid_string),
4359 FSID_KEY "0x%llx,0x%llx", fsid, fsobjid);
4360 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(fsid_string), UIO_SYSSPACE, FALSE);
4361 if (error) {
4362 goto bad;
4363 }
4364 imgp->ip_applec++;
4365 }
4366
4367 if (imgp->ip_dyld_fsid || imgp->ip_dyld_fsobjid ) {
4368 char fsid_string[strlen(DYLD_FSID_KEY) + strlen(FSID_MAX_STRING) + 1];
4369 snprintf(fsid_string, sizeof(fsid_string),
4370 DYLD_FSID_KEY "0x%llx,0x%llx", imgp->ip_dyld_fsid, imgp->ip_dyld_fsobjid);
4371 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(fsid_string), UIO_SYSSPACE, FALSE);
4372 if (error) {
4373 goto bad;
4374 }
4375 imgp->ip_applec++;
4376 }
4377
4378 uint8_t cdhash[SHA1_RESULTLEN];
4379 int cdhash_errror = ubc_cs_getcdhash(imgp->ip_vp, imgp->ip_arch_offset, cdhash);
4380 if (cdhash_errror == 0) {
4381 char hash_string[strlen(CDHASH_KEY) + 2*SHA1_RESULTLEN + 1];
4382 strncpy(hash_string, CDHASH_KEY, sizeof(hash_string));
4383 char *p = hash_string + sizeof(CDHASH_KEY) - 1;
4384 for (int i = 0; i < SHA1_RESULTLEN; i++) {
4385 snprintf(p, 3, "%02x", (int) cdhash[i]);
4386 p += 2;
4387 }
4388 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(hash_string), UIO_SYSSPACE, FALSE);
4389 if (error) {
4390 goto bad;
4391 }
4392 imgp->ip_applec++;
4393 }
4394
4395 /* Align the tail of the combined applev area */
4396 while (imgp->ip_strspace % img_ptr_size != 0) {
4397 *imgp->ip_strendp++ = '\0';
4398 imgp->ip_strspace--;
4399 }
4400
4401 bad:
4402 return error;
4403 }
4404
4405 #define unix_stack_size(p) (p->p_rlimit[RLIMIT_STACK].rlim_cur)
4406
4407 /*
4408 * exec_check_permissions
4409 *
4410 * Description: Verify that the file that is being attempted to be executed
4411 * is in fact allowed to be executed based on it POSIX file
4412 * permissions and other access control criteria
4413 *
4414 * Parameters: struct image_params * the image parameter block
4415 *
4416 * Returns: 0 Success
4417 * EACCES Permission denied
4418 * ENOEXEC Executable file format error
4419 * ETXTBSY Text file busy [misuse of error code]
4420 * vnode_getattr:???
4421 * vnode_authorize:???
4422 */
4423 static int
4424 exec_check_permissions(struct image_params *imgp)
4425 {
4426 struct vnode *vp = imgp->ip_vp;
4427 struct vnode_attr *vap = imgp->ip_vattr;
4428 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
4429 int error;
4430 kauth_action_t action;
4431
4432 /* Only allow execution of regular files */
4433 if (!vnode_isreg(vp))
4434 return (EACCES);
4435
4436 /* Get the file attributes that we will be using here and elsewhere */
4437 VATTR_INIT(vap);
4438 VATTR_WANTED(vap, va_uid);
4439 VATTR_WANTED(vap, va_gid);
4440 VATTR_WANTED(vap, va_mode);
4441 VATTR_WANTED(vap, va_fsid);
4442 VATTR_WANTED(vap, va_fsid64);
4443 VATTR_WANTED(vap, va_fileid);
4444 VATTR_WANTED(vap, va_data_size);
4445 if ((error = vnode_getattr(vp, vap, imgp->ip_vfs_context)) != 0)
4446 return (error);
4447
4448 /*
4449 * Ensure that at least one execute bit is on - otherwise root
4450 * will always succeed, and we don't want to happen unless the
4451 * file really is executable.
4452 */
4453 if (!vfs_authopaque(vnode_mount(vp)) && ((vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0))
4454 return (EACCES);
4455
4456 /* Disallow zero length files */
4457 if (vap->va_data_size == 0)
4458 return (ENOEXEC);
4459
4460 imgp->ip_arch_offset = (user_size_t)0;
4461 imgp->ip_arch_size = vap->va_data_size;
4462
4463 /* Disable setuid-ness for traced programs or if MNT_NOSUID */
4464 if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_lflag & P_LTRACED))
4465 vap->va_mode &= ~(VSUID | VSGID);
4466
4467 /*
4468 * Disable _POSIX_SPAWN_ALLOW_DATA_EXEC and _POSIX_SPAWN_DISABLE_ASLR
4469 * flags for setuid/setgid binaries.
4470 */
4471 if (vap->va_mode & (VSUID | VSGID))
4472 imgp->ip_flags &= ~(IMGPF_ALLOW_DATA_EXEC | IMGPF_DISABLE_ASLR);
4473
4474 #if CONFIG_MACF
4475 error = mac_vnode_check_exec(imgp->ip_vfs_context, vp, imgp);
4476 if (error)
4477 return (error);
4478 #endif
4479
4480 /* Check for execute permission */
4481 action = KAUTH_VNODE_EXECUTE;
4482 /* Traced images must also be readable */
4483 if (p->p_lflag & P_LTRACED)
4484 action |= KAUTH_VNODE_READ_DATA;
4485 if ((error = vnode_authorize(vp, NULL, action, imgp->ip_vfs_context)) != 0)
4486 return (error);
4487
4488 #if 0
4489 /* Don't let it run if anyone had it open for writing */
4490 vnode_lock(vp);
4491 if (vp->v_writecount) {
4492 panic("going to return ETXTBSY %x", vp);
4493 vnode_unlock(vp);
4494 return (ETXTBSY);
4495 }
4496 vnode_unlock(vp);
4497 #endif
4498
4499
4500 /* XXX May want to indicate to underlying FS that vnode is open */
4501
4502 return (error);
4503 }
4504
4505
4506 /*
4507 * exec_handle_sugid
4508 *
4509 * Initially clear the P_SUGID in the process flags; if an SUGID process is
4510 * exec'ing a non-SUGID image, then this is the point of no return.
4511 *
4512 * If the image being activated is SUGID, then replace the credential with a
4513 * copy, disable tracing (unless the tracing process is root), reset the
4514 * mach task port to revoke it, set the P_SUGID bit,
4515 *
4516 * If the saved user and group ID will be changing, then make sure it happens
4517 * to a new credential, rather than a shared one.
4518 *
4519 * Set the security token (this is probably obsolete, given that the token
4520 * should not technically be separate from the credential itself).
4521 *
4522 * Parameters: struct image_params * the image parameter block
4523 *
4524 * Returns: void No failure indication
4525 *
4526 * Implicit returns:
4527 * <process credential> Potentially modified/replaced
4528 * <task port> Potentially revoked
4529 * <process flags> P_SUGID bit potentially modified
4530 * <security token> Potentially modified
4531 */
4532 static int
4533 exec_handle_sugid(struct image_params *imgp)
4534 {
4535 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
4536 kauth_cred_t cred = vfs_context_ucred(imgp->ip_vfs_context);
4537 kauth_cred_t my_cred, my_new_cred;
4538 int i;
4539 int leave_sugid_clear = 0;
4540 int mac_reset_ipc = 0;
4541 int error = 0;
4542 task_t task = NULL;
4543 #if CONFIG_MACF
4544 int mac_transition, disjoint_cred = 0;
4545 int label_update_return = 0;
4546
4547 /*
4548 * Determine whether a call to update the MAC label will result in the
4549 * credential changing.
4550 *
4551 * Note: MAC policies which do not actually end up modifying
4552 * the label subsequently are strongly encouraged to
4553 * return 0 for this check, since a non-zero answer will
4554 * slow down the exec fast path for normal binaries.
4555 */
4556 mac_transition = mac_cred_check_label_update_execve(
4557 imgp->ip_vfs_context,
4558 imgp->ip_vp,
4559 imgp->ip_arch_offset,
4560 imgp->ip_scriptvp,
4561 imgp->ip_scriptlabelp,
4562 imgp->ip_execlabelp,
4563 p,
4564 imgp->ip_px_smpx);
4565 #endif
4566
4567 OSBitAndAtomic(~((uint32_t)P_SUGID), &p->p_flag);
4568
4569 /*
4570 * Order of the following is important; group checks must go last,
4571 * as we use the success of the 'ismember' check combined with the
4572 * failure of the explicit match to indicate that we will be setting
4573 * the egid of the process even though the new process did not
4574 * require VSUID/VSGID bits in order for it to set the new group as
4575 * its egid.
4576 *
4577 * Note: Technically, by this we are implying a call to
4578 * setegid() in the new process, rather than implying
4579 * it used its VSGID bit to set the effective group,
4580 * even though there is no code in that process to make
4581 * such a call.
4582 */
4583 if (((imgp->ip_origvattr->va_mode & VSUID) != 0 &&
4584 kauth_cred_getuid(cred) != imgp->ip_origvattr->va_uid) ||
4585 ((imgp->ip_origvattr->va_mode & VSGID) != 0 &&
4586 ((kauth_cred_ismember_gid(cred, imgp->ip_origvattr->va_gid, &leave_sugid_clear) || !leave_sugid_clear) ||
4587 (kauth_cred_getgid(cred) != imgp->ip_origvattr->va_gid)))) {
4588
4589 #if CONFIG_MACF
4590 /* label for MAC transition and neither VSUID nor VSGID */
4591 handle_mac_transition:
4592 #endif
4593
4594 #if !SECURE_KERNEL
4595 /*
4596 * Replace the credential with a copy of itself if euid or
4597 * egid change.
4598 *
4599 * Note: setuid binaries will automatically opt out of
4600 * group resolver participation as a side effect
4601 * of this operation. This is an intentional
4602 * part of the security model, which requires a
4603 * participating credential be established by
4604 * escalating privilege, setting up all other
4605 * aspects of the credential including whether
4606 * or not to participate in external group
4607 * membership resolution, then dropping their
4608 * effective privilege to that of the desired
4609 * final credential state.
4610 *
4611 * Modifications to p_ucred must be guarded using the
4612 * proc's ucred lock. This prevents others from accessing
4613 * a garbage credential.
4614 */
4615 while (imgp->ip_origvattr->va_mode & VSUID) {
4616 my_cred = kauth_cred_proc_ref(p);
4617 my_new_cred = kauth_cred_setresuid(my_cred, KAUTH_UID_NONE, imgp->ip_origvattr->va_uid, imgp->ip_origvattr->va_uid, KAUTH_UID_NONE);
4618
4619 if (my_new_cred == my_cred) {
4620 kauth_cred_unref(&my_cred);
4621 break;
4622 }
4623
4624 /* update cred on proc */
4625 proc_ucred_lock(p);
4626
4627 if (p->p_ucred != my_cred) {
4628 proc_ucred_unlock(p);
4629 kauth_cred_unref(&my_new_cred);
4630 continue;
4631 }
4632
4633 /* donate cred reference on my_new_cred to p->p_ucred */
4634 p->p_ucred = my_new_cred;
4635 PROC_UPDATE_CREDS_ONPROC(p);
4636 proc_ucred_unlock(p);
4637
4638 /* drop additional reference that was taken on the previous cred */
4639 kauth_cred_unref(&my_cred);
4640
4641 break;
4642 }
4643
4644 while (imgp->ip_origvattr->va_mode & VSGID) {
4645 my_cred = kauth_cred_proc_ref(p);
4646 my_new_cred = kauth_cred_setresgid(my_cred, KAUTH_GID_NONE, imgp->ip_origvattr->va_gid, imgp->ip_origvattr->va_gid);
4647
4648 if (my_new_cred == my_cred) {
4649 kauth_cred_unref(&my_cred);
4650 break;
4651 }
4652
4653 /* update cred on proc */
4654 proc_ucred_lock(p);
4655
4656 if (p->p_ucred != my_cred) {
4657 proc_ucred_unlock(p);
4658 kauth_cred_unref(&my_new_cred);
4659 continue;
4660 }
4661
4662 /* donate cred reference on my_new_cred to p->p_ucred */
4663 p->p_ucred = my_new_cred;
4664 PROC_UPDATE_CREDS_ONPROC(p);
4665 proc_ucred_unlock(p);
4666
4667 /* drop additional reference that was taken on the previous cred */
4668 kauth_cred_unref(&my_cred);
4669
4670 break;
4671 }
4672 #endif /* !SECURE_KERNEL */
4673
4674 #if CONFIG_MACF
4675 /*
4676 * If a policy has indicated that it will transition the label,
4677 * before making the call into the MAC policies, get a new
4678 * duplicate credential, so they can modify it without
4679 * modifying any others sharing it.
4680 */
4681 if (mac_transition) {
4682 /*
4683 * This hook may generate upcalls that require
4684 * importance donation from the kernel.
4685 * (23925818)
4686 */
4687 thread_t thread = current_thread();
4688 thread_enable_send_importance(thread, TRUE);
4689 kauth_proc_label_update_execve(p,
4690 imgp->ip_vfs_context,
4691 imgp->ip_vp,
4692 imgp->ip_arch_offset,
4693 imgp->ip_scriptvp,
4694 imgp->ip_scriptlabelp,
4695 imgp->ip_execlabelp,
4696 &imgp->ip_csflags,
4697 imgp->ip_px_smpx,
4698 &disjoint_cred, /* will be non zero if disjoint */
4699 &label_update_return);
4700 thread_enable_send_importance(thread, FALSE);
4701
4702 if (disjoint_cred) {
4703 /*
4704 * If updating the MAC label resulted in a
4705 * disjoint credential, flag that we need to
4706 * set the P_SUGID bit. This protects
4707 * against debuggers being attached by an
4708 * insufficiently privileged process onto the
4709 * result of a transition to a more privileged
4710 * credential.
4711 */
4712 leave_sugid_clear = 0;
4713 }
4714
4715 imgp->ip_mac_return = label_update_return;
4716 }
4717
4718 mac_reset_ipc = mac_proc_check_inherit_ipc_ports(p, p->p_textvp, p->p_textoff, imgp->ip_vp, imgp->ip_arch_offset, imgp->ip_scriptvp);
4719
4720 #endif /* CONFIG_MACF */
4721
4722 /*
4723 * If 'leave_sugid_clear' is non-zero, then we passed the
4724 * VSUID and MACF checks, and successfully determined that
4725 * the previous cred was a member of the VSGID group, but
4726 * that it was not the default at the time of the execve,
4727 * and that the post-labelling credential was not disjoint.
4728 * So we don't set the P_SUGID or reset mach ports and fds
4729 * on the basis of simply running this code.
4730 */
4731 if (mac_reset_ipc || !leave_sugid_clear) {
4732 /*
4733 * Have mach reset the task and thread ports.
4734 * We don't want anyone who had the ports before
4735 * a setuid exec to be able to access/control the
4736 * task/thread after.
4737 */
4738 ipc_task_reset((imgp->ip_new_thread != NULL) ?
4739 get_threadtask(imgp->ip_new_thread) : p->task);
4740 ipc_thread_reset((imgp->ip_new_thread != NULL) ?
4741 imgp->ip_new_thread : current_thread());
4742 }
4743
4744 if (!leave_sugid_clear) {
4745 /*
4746 * Flag the process as setuid.
4747 */
4748 OSBitOrAtomic(P_SUGID, &p->p_flag);
4749
4750 /*
4751 * Radar 2261856; setuid security hole fix
4752 * XXX For setuid processes, attempt to ensure that
4753 * stdin, stdout, and stderr are already allocated.
4754 * We do not want userland to accidentally allocate
4755 * descriptors in this range which has implied meaning
4756 * to libc.
4757 */
4758 for (i = 0; i < 3; i++) {
4759
4760 if (p->p_fd->fd_ofiles[i] != NULL)
4761 continue;
4762
4763 /*
4764 * Do the kernel equivalent of
4765 *
4766 * if i == 0
4767 * (void) open("/dev/null", O_RDONLY);
4768 * else
4769 * (void) open("/dev/null", O_WRONLY);
4770 */
4771
4772 struct fileproc *fp;
4773 int indx;
4774 int flag;
4775 struct nameidata *ndp = NULL;
4776
4777 if (i == 0)
4778 flag = FREAD;
4779 else
4780 flag = FWRITE;
4781
4782 if ((error = falloc(p,
4783 &fp, &indx, imgp->ip_vfs_context)) != 0)
4784 continue;
4785
4786 MALLOC(ndp, struct nameidata *, sizeof(*ndp), M_TEMP, M_WAITOK | M_ZERO);
4787 if (ndp == NULL) {
4788 fp_free(p, indx, fp);
4789 error = ENOMEM;
4790 break;
4791 }
4792
4793 NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW, UIO_SYSSPACE,
4794 CAST_USER_ADDR_T("/dev/null"),
4795 imgp->ip_vfs_context);
4796
4797 if ((error = vn_open(ndp, flag, 0)) != 0) {
4798 fp_free(p, indx, fp);
4799 FREE(ndp, M_TEMP);
4800 break;
4801 }
4802
4803 struct fileglob *fg = fp->f_fglob;
4804
4805 fg->fg_flag = flag;
4806 fg->fg_ops = &vnops;
4807 fg->fg_data = ndp->ni_vp;
4808
4809 vnode_put(ndp->ni_vp);
4810
4811 proc_fdlock(p);
4812 procfdtbl_releasefd(p, indx, NULL);
4813 fp_drop(p, indx, fp, 1);
4814 proc_fdunlock(p);
4815
4816 FREE(ndp, M_TEMP);
4817 }
4818 }
4819 }
4820 #if CONFIG_MACF
4821 else {
4822 /*
4823 * We are here because we were told that the MAC label will
4824 * be transitioned, and the binary is not VSUID or VSGID; to
4825 * deal with this case, we could either duplicate a lot of
4826 * code, or we can indicate we want to default the P_SUGID
4827 * bit clear and jump back up.
4828 */
4829 if (mac_transition) {
4830 leave_sugid_clear = 1;
4831 goto handle_mac_transition;
4832 }
4833 }
4834
4835 #endif /* CONFIG_MACF */
4836
4837 /*
4838 * Implement the semantic where the effective user and group become
4839 * the saved user and group in exec'ed programs.
4840 *
4841 * Modifications to p_ucred must be guarded using the
4842 * proc's ucred lock. This prevents others from accessing
4843 * a garbage credential.
4844 */
4845 for (;;) {
4846 my_cred = kauth_cred_proc_ref(p);
4847 my_new_cred = kauth_cred_setsvuidgid(my_cred, kauth_cred_getuid(my_cred), kauth_cred_getgid(my_cred));
4848
4849 if (my_new_cred == my_cred) {
4850 kauth_cred_unref(&my_cred);
4851 break;
4852 }
4853
4854 /* update cred on proc */
4855 proc_ucred_lock(p);
4856
4857 if (p->p_ucred != my_cred) {
4858 proc_ucred_unlock(p);
4859 kauth_cred_unref(&my_new_cred);
4860 continue;
4861 }
4862
4863 /* donate cred reference on my_new_cred to p->p_ucred */
4864 p->p_ucred = my_new_cred;
4865 PROC_UPDATE_CREDS_ONPROC(p);
4866 proc_ucred_unlock(p);
4867
4868 /* drop additional reference that was taken on the previous cred */
4869 kauth_cred_unref(&my_cred);
4870
4871 break;
4872 }
4873
4874
4875 /* Update the process' identity version and set the security token */
4876 p->p_idversion++;
4877
4878 if (imgp->ip_new_thread != NULL) {
4879 task = get_threadtask(imgp->ip_new_thread);
4880 } else {
4881 task = p->task;
4882 }
4883 set_security_token_task_internal(p, task);
4884
4885 return(error);
4886 }
4887
4888
4889 /*
4890 * create_unix_stack
4891 *
4892 * Description: Set the user stack address for the process to the provided
4893 * address. If a custom stack was not set as a result of the
4894 * load process (i.e. as specified by the image file for the
4895 * executable), then allocate the stack in the provided map and
4896 * set up appropriate guard pages for enforcing administrative
4897 * limits on stack growth, if they end up being needed.
4898 *
4899 * Parameters: p Process to set stack on
4900 * load_result Information from mach-o load commands
4901 * map Address map in which to allocate the new stack
4902 *
4903 * Returns: KERN_SUCCESS Stack successfully created
4904 * !KERN_SUCCESS Mach failure code
4905 */
4906 static kern_return_t
4907 create_unix_stack(vm_map_t map, load_result_t* load_result,
4908 proc_t p)
4909 {
4910 mach_vm_size_t size, prot_size;
4911 mach_vm_offset_t addr, prot_addr;
4912 kern_return_t kr;
4913
4914 mach_vm_address_t user_stack = load_result->user_stack;
4915
4916 proc_lock(p);
4917 p->user_stack = user_stack;
4918 proc_unlock(p);
4919
4920 if (load_result->user_stack_alloc_size > 0) {
4921 /*
4922 * Allocate enough space for the maximum stack size we
4923 * will ever authorize and an extra page to act as
4924 * a guard page for stack overflows. For default stacks,
4925 * vm_initial_limit_stack takes care of the extra guard page.
4926 * Otherwise we must allocate it ourselves.
4927 */
4928 if (mach_vm_round_page_overflow(load_result->user_stack_alloc_size, &size)) {
4929 return KERN_INVALID_ARGUMENT;
4930 }
4931 addr = mach_vm_trunc_page(load_result->user_stack - size);
4932 kr = mach_vm_allocate_kernel(map, &addr, size,
4933 VM_FLAGS_FIXED, VM_MEMORY_STACK);
4934 if (kr != KERN_SUCCESS) {
4935 // Can't allocate at default location, try anywhere
4936 addr = 0;
4937 kr = mach_vm_allocate_kernel(map, &addr, size,
4938 VM_FLAGS_ANYWHERE, VM_MEMORY_STACK);
4939 if (kr != KERN_SUCCESS) {
4940 return kr;
4941 }
4942
4943 user_stack = addr + size;
4944 load_result->user_stack = user_stack;
4945
4946 proc_lock(p);
4947 p->user_stack = user_stack;
4948 proc_unlock(p);
4949 }
4950
4951 load_result->user_stack_alloc = addr;
4952
4953 /*
4954 * And prevent access to what's above the current stack
4955 * size limit for this process.
4956 */
4957 if (load_result->user_stack_size == 0) {
4958 load_result->user_stack_size = unix_stack_size(p);
4959 prot_size = mach_vm_trunc_page(size - load_result->user_stack_size);
4960 } else {
4961 prot_size = PAGE_SIZE;
4962 }
4963
4964 prot_addr = addr;
4965 kr = mach_vm_protect(map,
4966 prot_addr,
4967 prot_size,
4968 FALSE,
4969 VM_PROT_NONE);
4970 if (kr != KERN_SUCCESS) {
4971 (void)mach_vm_deallocate(map, addr, size);
4972 return kr;
4973 }
4974 }
4975
4976 return KERN_SUCCESS;
4977 }
4978
4979 #include <sys/reboot.h>
4980
4981 /*
4982 * load_init_program_at_path
4983 *
4984 * Description: Load the "init" program; in most cases, this will be "launchd"
4985 *
4986 * Parameters: p Process to call execve() to create
4987 * the "init" program
4988 * scratch_addr Page in p, scratch space
4989 * path NULL terminated path
4990 *
4991 * Returns: KERN_SUCCESS Success
4992 * !KERN_SUCCESS See execve/mac_execve for error codes
4993 *
4994 * Notes: The process that is passed in is the first manufactured
4995 * process on the system, and gets here via bsd_ast() firing
4996 * for the first time. This is done to ensure that bsd_init()
4997 * has run to completion.
4998 *
4999 * The address map of the first manufactured process matches the
5000 * word width of the kernel. Once the self-exec completes, the
5001 * initproc might be different.
5002 */
5003 static int
5004 load_init_program_at_path(proc_t p, user_addr_t scratch_addr, const char* path)
5005 {
5006 int retval[2];
5007 int error;
5008 struct execve_args init_exec_args;
5009 user_addr_t argv0 = USER_ADDR_NULL, argv1 = USER_ADDR_NULL;
5010
5011 /*
5012 * Validate inputs and pre-conditions
5013 */
5014 assert(p);
5015 assert(scratch_addr);
5016 assert(path);
5017
5018 /*
5019 * Copy out program name.
5020 */
5021 size_t path_length = strlen(path) + 1;
5022 argv0 = scratch_addr;
5023 error = copyout(path, argv0, path_length);
5024 if (error)
5025 return error;
5026
5027 scratch_addr = USER_ADDR_ALIGN(scratch_addr + path_length, sizeof(user_addr_t));
5028
5029 /*
5030 * Put out first (and only) argument, similarly.
5031 * Assumes everything fits in a page as allocated above.
5032 */
5033 if (boothowto & RB_SINGLE) {
5034 const char *init_args = "-s";
5035 size_t init_args_length = strlen(init_args)+1;
5036
5037 argv1 = scratch_addr;
5038 error = copyout(init_args, argv1, init_args_length);
5039 if (error)
5040 return error;
5041
5042 scratch_addr = USER_ADDR_ALIGN(scratch_addr + init_args_length, sizeof(user_addr_t));
5043 }
5044
5045 if (proc_is64bit(p)) {
5046 user64_addr_t argv64bit[3];
5047
5048 argv64bit[0] = argv0;
5049 argv64bit[1] = argv1;
5050 argv64bit[2] = USER_ADDR_NULL;
5051
5052 error = copyout(argv64bit, scratch_addr, sizeof(argv64bit));
5053 if (error)
5054 return error;
5055 } else {
5056 user32_addr_t argv32bit[3];
5057
5058 argv32bit[0] = (user32_addr_t)argv0;
5059 argv32bit[1] = (user32_addr_t)argv1;
5060 argv32bit[2] = USER_ADDR_NULL;
5061
5062 error = copyout(argv32bit, scratch_addr, sizeof(argv32bit));
5063 if (error)
5064 return error;
5065 }
5066
5067 /*
5068 * Set up argument block for fake call to execve.
5069 */
5070 init_exec_args.fname = argv0;
5071 init_exec_args.argp = scratch_addr;
5072 init_exec_args.envp = USER_ADDR_NULL;
5073
5074 /*
5075 * So that init task is set with uid,gid 0 token
5076 */
5077 set_security_token(p);
5078
5079 return execve(p, &init_exec_args, retval);
5080 }
5081
5082 static const char * init_programs[] = {
5083 #if DEBUG
5084 "/usr/local/sbin/launchd.debug",
5085 #endif
5086 #if DEVELOPMENT || DEBUG
5087 "/usr/local/sbin/launchd.development",
5088 #endif
5089 "/sbin/launchd",
5090 };
5091
5092 /*
5093 * load_init_program
5094 *
5095 * Description: Load the "init" program; in most cases, this will be "launchd"
5096 *
5097 * Parameters: p Process to call execve() to create
5098 * the "init" program
5099 *
5100 * Returns: (void)
5101 *
5102 * Notes: The process that is passed in is the first manufactured
5103 * process on the system, and gets here via bsd_ast() firing
5104 * for the first time. This is done to ensure that bsd_init()
5105 * has run to completion.
5106 *
5107 * In DEBUG & DEVELOPMENT builds, the launchdsuffix boot-arg
5108 * may be used to select a specific launchd executable. As with
5109 * the kcsuffix boot-arg, setting launchdsuffix to "" or "release"
5110 * will force /sbin/launchd to be selected.
5111 *
5112 * Search order by build:
5113 *
5114 * DEBUG DEVELOPMENT RELEASE PATH
5115 * ----------------------------------------------------------------------------------
5116 * 1 1 NA /usr/local/sbin/launchd.$LAUNCHDSUFFIX
5117 * 2 NA NA /usr/local/sbin/launchd.debug
5118 * 3 2 NA /usr/local/sbin/launchd.development
5119 * 4 3 1 /sbin/launchd
5120 */
5121 void
5122 load_init_program(proc_t p)
5123 {
5124 uint32_t i;
5125 int error;
5126 vm_map_t map = current_map();
5127 mach_vm_offset_t scratch_addr = 0;
5128 mach_vm_size_t map_page_size = vm_map_page_size(map);
5129
5130 (void) mach_vm_allocate_kernel(map, &scratch_addr, map_page_size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_NONE);
5131 #if CONFIG_MEMORYSTATUS
5132 (void) memorystatus_init_at_boot_snapshot();
5133 #endif /* CONFIG_MEMORYSTATUS */
5134
5135 #if DEBUG || DEVELOPMENT
5136 /* Check for boot-arg suffix first */
5137 char launchd_suffix[64];
5138 if (PE_parse_boot_argn("launchdsuffix", launchd_suffix, sizeof(launchd_suffix))) {
5139 char launchd_path[128];
5140 boolean_t is_release_suffix = ((launchd_suffix[0] == 0) ||
5141 (strcmp(launchd_suffix, "release") == 0));
5142
5143 if (is_release_suffix) {
5144 printf("load_init_program: attempting to load /sbin/launchd\n");
5145 error = load_init_program_at_path(p, (user_addr_t)scratch_addr, "/sbin/launchd");
5146 if (!error)
5147 return;
5148
5149 panic("Process 1 exec of launchd.release failed, errno %d", error);
5150 } else {
5151 strlcpy(launchd_path, "/usr/local/sbin/launchd.", sizeof(launchd_path));
5152 strlcat(launchd_path, launchd_suffix, sizeof(launchd_path));
5153
5154 printf("load_init_program: attempting to load %s\n", launchd_path);
5155 error = load_init_program_at_path(p, (user_addr_t)scratch_addr, launchd_path);
5156 if (!error) {
5157 return;
5158 } else {
5159 printf("load_init_program: failed loading %s: errno %d\n", launchd_path, error);
5160 }
5161 }
5162 }
5163 #endif
5164
5165 error = ENOENT;
5166 for (i = 0; i < sizeof(init_programs)/sizeof(init_programs[0]); i++) {
5167 printf("load_init_program: attempting to load %s\n", init_programs[i]);
5168 error = load_init_program_at_path(p, (user_addr_t)scratch_addr, init_programs[i]);
5169 if (!error) {
5170 return;
5171 } else {
5172 printf("load_init_program: failed loading %s: errno %d\n", init_programs[i], error);
5173 }
5174 }
5175
5176 panic("Process 1 exec of %s failed, errno %d", ((i == 0) ? "<null>" : init_programs[i-1]), error);
5177 }
5178
5179 /*
5180 * load_return_to_errno
5181 *
5182 * Description: Convert a load_return_t (Mach error) to an errno (BSD error)
5183 *
5184 * Parameters: lrtn Mach error number
5185 *
5186 * Returns: (int) BSD error number
5187 * 0 Success
5188 * EBADARCH Bad architecture
5189 * EBADMACHO Bad Mach object file
5190 * ESHLIBVERS Bad shared library version
5191 * ENOMEM Out of memory/resource shortage
5192 * EACCES Access denied
5193 * ENOENT Entry not found (usually "file does
5194 * does not exist")
5195 * EIO An I/O error occurred
5196 * EBADEXEC The executable is corrupt/unknown
5197 */
5198 static int
5199 load_return_to_errno(load_return_t lrtn)
5200 {
5201 switch (lrtn) {
5202 case LOAD_SUCCESS:
5203 return 0;
5204 case LOAD_BADARCH:
5205 return EBADARCH;
5206 case LOAD_BADMACHO:
5207 case LOAD_BADMACHO_UPX:
5208 return EBADMACHO;
5209 case LOAD_SHLIB:
5210 return ESHLIBVERS;
5211 case LOAD_NOSPACE:
5212 case LOAD_RESOURCE:
5213 return ENOMEM;
5214 case LOAD_PROTECT:
5215 return EACCES;
5216 case LOAD_ENOENT:
5217 return ENOENT;
5218 case LOAD_IOERROR:
5219 return EIO;
5220 case LOAD_FAILURE:
5221 case LOAD_DECRYPTFAIL:
5222 default:
5223 return EBADEXEC;
5224 }
5225 }
5226
5227 #include <mach/mach_types.h>
5228 #include <mach/vm_prot.h>
5229 #include <mach/semaphore.h>
5230 #include <mach/sync_policy.h>
5231 #include <kern/clock.h>
5232 #include <mach/kern_return.h>
5233
5234 /*
5235 * execargs_alloc
5236 *
5237 * Description: Allocate the block of memory used by the execve arguments.
5238 * At the same time, we allocate a page so that we can read in
5239 * the first page of the image.
5240 *
5241 * Parameters: struct image_params * the image parameter block
5242 *
5243 * Returns: 0 Success
5244 * EINVAL Invalid argument
5245 * EACCES Permission denied
5246 * EINTR Interrupted function
5247 * ENOMEM Not enough space
5248 *
5249 * Notes: This is a temporary allocation into the kernel address space
5250 * to enable us to copy arguments in from user space. This is
5251 * necessitated by not mapping the process calling execve() into
5252 * the kernel address space during the execve() system call.
5253 *
5254 * We assemble the argument and environment, etc., into this
5255 * region before copying it as a single block into the child
5256 * process address space (at the top or bottom of the stack,
5257 * depending on which way the stack grows; see the function
5258 * exec_copyout_strings() for details).
5259 *
5260 * This ends up with a second (possibly unnecessary) copy compared
5261 * with assembing the data directly into the child address space,
5262 * instead, but since we cannot be guaranteed that the parent has
5263 * not modified its environment, we can't really know that it's
5264 * really a block there as well.
5265 */
5266
5267
5268 static int execargs_waiters = 0;
5269 lck_mtx_t *execargs_cache_lock;
5270
5271 static void
5272 execargs_lock_lock(void) {
5273 lck_mtx_lock_spin(execargs_cache_lock);
5274 }
5275
5276 static void
5277 execargs_lock_unlock(void) {
5278 lck_mtx_unlock(execargs_cache_lock);
5279 }
5280
5281 static wait_result_t
5282 execargs_lock_sleep(void) {
5283 return(lck_mtx_sleep(execargs_cache_lock, LCK_SLEEP_DEFAULT, &execargs_free_count, THREAD_INTERRUPTIBLE));
5284 }
5285
5286 static kern_return_t
5287 execargs_purgeable_allocate(char **execarg_address) {
5288 kern_return_t kr = vm_allocate_kernel(bsd_pageable_map, (vm_offset_t *)execarg_address, BSD_PAGEABLE_SIZE_PER_EXEC, VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE, VM_KERN_MEMORY_NONE);
5289 assert(kr == KERN_SUCCESS);
5290 return kr;
5291 }
5292
5293 static kern_return_t
5294 execargs_purgeable_reference(void *execarg_address) {
5295 int state = VM_PURGABLE_NONVOLATILE;
5296 kern_return_t kr = vm_purgable_control(bsd_pageable_map, (vm_offset_t) execarg_address, VM_PURGABLE_SET_STATE, &state);
5297
5298 assert(kr == KERN_SUCCESS);
5299 return kr;
5300 }
5301
5302 static kern_return_t
5303 execargs_purgeable_volatilize(void *execarg_address) {
5304 int state = VM_PURGABLE_VOLATILE | VM_PURGABLE_ORDERING_OBSOLETE;
5305 kern_return_t kr;
5306 kr = vm_purgable_control(bsd_pageable_map, (vm_offset_t) execarg_address, VM_PURGABLE_SET_STATE, &state);
5307
5308 assert(kr == KERN_SUCCESS);
5309
5310 return kr;
5311 }
5312
5313 static void
5314 execargs_wakeup_waiters(void) {
5315 thread_wakeup(&execargs_free_count);
5316 }
5317
5318 static int
5319 execargs_alloc(struct image_params *imgp)
5320 {
5321 kern_return_t kret;
5322 wait_result_t res;
5323 int i, cache_index = -1;
5324
5325 execargs_lock_lock();
5326
5327 while (execargs_free_count == 0) {
5328 execargs_waiters++;
5329 res = execargs_lock_sleep();
5330 execargs_waiters--;
5331 if (res != THREAD_AWAKENED) {
5332 execargs_lock_unlock();
5333 return (EINTR);
5334 }
5335 }
5336
5337 execargs_free_count--;
5338
5339 for (i = 0; i < execargs_cache_size; i++) {
5340 vm_offset_t element = execargs_cache[i];
5341 if (element) {
5342 cache_index = i;
5343 imgp->ip_strings = (char *)(execargs_cache[i]);
5344 execargs_cache[i] = 0;
5345 break;
5346 }
5347 }
5348
5349 assert(execargs_free_count >= 0);
5350
5351 execargs_lock_unlock();
5352
5353 if (cache_index == -1) {
5354 kret = execargs_purgeable_allocate(&imgp->ip_strings);
5355 }
5356 else
5357 kret = execargs_purgeable_reference(imgp->ip_strings);
5358
5359 assert(kret == KERN_SUCCESS);
5360 if (kret != KERN_SUCCESS) {
5361 return (ENOMEM);
5362 }
5363
5364 /* last page used to read in file headers */
5365 imgp->ip_vdata = imgp->ip_strings + ( NCARGS + PAGE_SIZE );
5366 imgp->ip_strendp = imgp->ip_strings;
5367 imgp->ip_argspace = NCARGS;
5368 imgp->ip_strspace = ( NCARGS + PAGE_SIZE );
5369
5370 return (0);
5371 }
5372
5373 /*
5374 * execargs_free
5375 *
5376 * Description: Free the block of memory used by the execve arguments and the
5377 * first page of the executable by a previous call to the function
5378 * execargs_alloc().
5379 *
5380 * Parameters: struct image_params * the image parameter block
5381 *
5382 * Returns: 0 Success
5383 * EINVAL Invalid argument
5384 * EINTR Oeration interrupted
5385 */
5386 static int
5387 execargs_free(struct image_params *imgp)
5388 {
5389 kern_return_t kret;
5390 int i;
5391 boolean_t needs_wakeup = FALSE;
5392
5393 kret = execargs_purgeable_volatilize(imgp->ip_strings);
5394
5395 execargs_lock_lock();
5396 execargs_free_count++;
5397
5398 for (i = 0; i < execargs_cache_size; i++) {
5399 vm_offset_t element = execargs_cache[i];
5400 if (element == 0) {
5401 execargs_cache[i] = (vm_offset_t) imgp->ip_strings;
5402 imgp->ip_strings = NULL;
5403 break;
5404 }
5405 }
5406
5407 assert(imgp->ip_strings == NULL);
5408
5409 if (execargs_waiters > 0)
5410 needs_wakeup = TRUE;
5411
5412 execargs_lock_unlock();
5413
5414 if (needs_wakeup == TRUE)
5415 execargs_wakeup_waiters();
5416
5417 return ((kret == KERN_SUCCESS ? 0 : EINVAL));
5418 }
5419
5420 static void
5421 exec_resettextvp(proc_t p, struct image_params *imgp)
5422 {
5423 vnode_t vp;
5424 off_t offset;
5425 vnode_t tvp = p->p_textvp;
5426 int ret;
5427
5428 vp = imgp->ip_vp;
5429 offset = imgp->ip_arch_offset;
5430
5431 if (vp == NULLVP)
5432 panic("exec_resettextvp: expected valid vp");
5433
5434 ret = vnode_ref(vp);
5435 proc_lock(p);
5436 if (ret == 0) {
5437 p->p_textvp = vp;
5438 p->p_textoff = offset;
5439 } else {
5440 p->p_textvp = NULLVP; /* this is paranoia */
5441 p->p_textoff = 0;
5442 }
5443 proc_unlock(p);
5444
5445 if ( tvp != NULLVP) {
5446 if (vnode_getwithref(tvp) == 0) {
5447 vnode_rele(tvp);
5448 vnode_put(tvp);
5449 }
5450 }
5451
5452 }
5453
5454 // Includes the 0-byte (therefore "SIZE" instead of "LEN").
5455 static const size_t CS_CDHASH_STRING_SIZE = CS_CDHASH_LEN * 2 + 1;
5456
5457 static void cdhash_to_string(char str[CS_CDHASH_STRING_SIZE], uint8_t const * const cdhash) {
5458 static char const nibble[] = "0123456789abcdef";
5459
5460 /* Apparently still the safest way to get a hex representation
5461 * of binary data.
5462 * xnu's printf routines have %*D/%20D in theory, but "not really", see:
5463 * <rdar://problem/33328859> confusion around %*D/%nD in printf
5464 */
5465 for (int i = 0; i < CS_CDHASH_LEN; ++i) {
5466 str[i*2] = nibble[(cdhash[i] & 0xf0) >> 4];
5467 str[i*2+1] = nibble[cdhash[i] & 0x0f];
5468 }
5469 str[CS_CDHASH_STRING_SIZE - 1] = 0;
5470 }
5471
5472 /*
5473 * If the process is not signed or if it contains entitlements, we
5474 * need to communicate through the task_access_port to taskgated.
5475 *
5476 * taskgated will provide a detached code signature if present, and
5477 * will enforce any restrictions on entitlements.
5478 */
5479
5480 static boolean_t
5481 taskgated_required(proc_t p, boolean_t *require_success)
5482 {
5483 size_t length;
5484 void *blob;
5485 int error;
5486
5487 if (cs_debug > 2)
5488 csvnode_print_debug(p->p_textvp);
5489
5490 #if !CONFIG_EMBEDDED
5491 const int can_skip_taskgated = csproc_get_platform_binary(p) && !csproc_get_platform_path(p);
5492 #else
5493 const int can_skip_taskgated = csproc_get_platform_binary(p);
5494 #endif
5495 if (can_skip_taskgated) {
5496 if (cs_debug) printf("taskgated not required for: %s\n", p->p_name);
5497 *require_success = FALSE;
5498 return FALSE;
5499 }
5500
5501 if ((p->p_csflags & CS_VALID) == 0) {
5502 *require_success = FALSE;
5503 return TRUE;
5504 }
5505
5506 error = cs_entitlements_blob_get(p, &blob, &length);
5507 if (error == 0 && blob != NULL) {
5508 #if !CONFIG_EMBEDDED
5509 /*
5510 * fatal on the desktop when entitlements are present,
5511 * unless we started in single-user mode
5512 */
5513 if ((boothowto & RB_SINGLE) == 0)
5514 *require_success = TRUE;
5515 /*
5516 * Allow initproc to run without causing taskgated to launch
5517 */
5518 if (p == initproc) {
5519 *require_success = FALSE;
5520 return FALSE;
5521 }
5522
5523 #endif
5524 if (cs_debug) printf("taskgated required for: %s\n", p->p_name);
5525
5526 return TRUE;
5527 }
5528
5529 *require_success = FALSE;
5530 return FALSE;
5531 }
5532
5533 /*
5534 * __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__
5535 *
5536 * Description: Waits for the userspace daemon to respond to the request
5537 * we made. Function declared non inline to be visible in
5538 * stackshots and spindumps as well as debugging.
5539 */
5540 __attribute__((noinline)) int
5541 __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(mach_port_t task_access_port, int32_t new_pid)
5542 {
5543 return find_code_signature(task_access_port, new_pid);
5544 }
5545
5546 static int
5547 check_for_signature(proc_t p, struct image_params *imgp)
5548 {
5549 mach_port_t port = NULL;
5550 kern_return_t kr = KERN_FAILURE;
5551 int error = EACCES;
5552 boolean_t unexpected_failure = FALSE;
5553 unsigned char hash[CS_CDHASH_LEN];
5554 boolean_t require_success = FALSE;
5555 int spawn = (imgp->ip_flags & IMGPF_SPAWN);
5556 int vfexec = (imgp->ip_flags & IMGPF_VFORK_EXEC);
5557 os_reason_t signature_failure_reason = OS_REASON_NULL;
5558
5559 /*
5560 * Override inherited code signing flags with the
5561 * ones for the process that is being successfully
5562 * loaded
5563 */
5564 proc_lock(p);
5565 p->p_csflags = imgp->ip_csflags;
5566 proc_unlock(p);
5567
5568 /* Set the switch_protect flag on the map */
5569 if(p->p_csflags & (CS_HARD|CS_KILL)) {
5570 vm_map_switch_protect(get_task_map(p->task), TRUE);
5571 }
5572
5573 /*
5574 * image activation may be failed due to policy
5575 * which is unexpected but security framework does not
5576 * approve of exec, kill and return immediately.
5577 */
5578 if (imgp->ip_mac_return != 0) {
5579
5580 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
5581 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_SECURITY_POLICY, 0, 0);
5582 signature_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_SECURITY_POLICY);
5583 error = imgp->ip_mac_return;
5584 unexpected_failure = TRUE;
5585 goto done;
5586 }
5587
5588 if (imgp->ip_cs_error != OS_REASON_NULL) {
5589 signature_failure_reason = imgp->ip_cs_error;
5590 imgp->ip_cs_error = OS_REASON_NULL;
5591 error = EACCES;
5592 goto done;
5593 }
5594
5595 /* check if callout to taskgated is needed */
5596 if (!taskgated_required(p, &require_success)) {
5597 error = 0;
5598 goto done;
5599 }
5600
5601 kr = task_get_task_access_port(p->task, &port);
5602 if (KERN_SUCCESS != kr || !IPC_PORT_VALID(port)) {
5603 error = 0;
5604 if (require_success) {
5605 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
5606 p->p_pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASK_ACCESS_PORT, 0, 0);
5607 signature_failure_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASK_ACCESS_PORT);
5608 error = EACCES;
5609 }
5610 goto done;
5611 }
5612
5613 /*
5614 * taskgated returns KERN_SUCCESS if it has completed its work
5615 * and the exec should continue, KERN_FAILURE if the exec should
5616 * fail, or it may error out with different error code in an
5617 * event of mig failure (e.g. process was signalled during the
5618 * rpc call, taskgated died, mig server died etc.).
5619 */
5620
5621 kr = __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(port, p->p_pid);
5622 switch (kr) {
5623 case KERN_SUCCESS:
5624 error = 0;
5625 break;
5626 case KERN_FAILURE:
5627 error = EACCES;
5628
5629 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
5630 p->p_pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG, 0, 0);
5631 signature_failure_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG);
5632 goto done;
5633 default:
5634 error = EACCES;
5635
5636 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
5637 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_TASKGATED_OTHER, 0, 0);
5638 signature_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_TASKGATED_OTHER);
5639 unexpected_failure = TRUE;
5640 goto done;
5641 }
5642
5643 /* Only do this if exec_resettextvp() did not fail */
5644 if (p->p_textvp != NULLVP) {
5645 /*
5646 * If there's a new code directory, mark this process
5647 * as signed.
5648 */
5649 if (0 == ubc_cs_getcdhash(p->p_textvp, p->p_textoff, hash)) {
5650 proc_lock(p);
5651 p->p_csflags |= CS_VALID;
5652 proc_unlock(p);
5653 }
5654 }
5655
5656 done:
5657 if (0 == error) {
5658 /* The process's code signature related properties are
5659 * fully set up, so this is an opportune moment to log
5660 * platform binary execution, if desired. */
5661 if (platform_exec_logging != 0 && csproc_get_platform_binary(p)) {
5662 uint8_t cdhash[CS_CDHASH_LEN];
5663 char cdhash_string[CS_CDHASH_STRING_SIZE];
5664 proc_getcdhash(p, cdhash);
5665 cdhash_to_string(cdhash_string, cdhash);
5666
5667 os_log(peLog, "CS Platform Exec Logging: Executing platform signed binary "
5668 "'%s' with cdhash %s\n", p->p_name, cdhash_string);
5669 }
5670 } else {
5671 if (!unexpected_failure)
5672 p->p_csflags |= CS_KILLED;
5673 /* make very sure execution fails */
5674 if (vfexec || spawn) {
5675 assert(signature_failure_reason != OS_REASON_NULL);
5676 psignal_vfork_with_reason(p, p->task, imgp->ip_new_thread,
5677 SIGKILL, signature_failure_reason);
5678 signature_failure_reason = OS_REASON_NULL;
5679 error = 0;
5680 } else {
5681 assert(signature_failure_reason != OS_REASON_NULL);
5682 psignal_with_reason(p, SIGKILL, signature_failure_reason);
5683 signature_failure_reason = OS_REASON_NULL;
5684 }
5685 }
5686
5687 /* If we hit this, we likely would have leaked an exit reason */
5688 assert(signature_failure_reason == OS_REASON_NULL);
5689 return error;
5690 }
5691
5692 /*
5693 * Typically as soon as we start executing this process, the
5694 * first instruction will trigger a VM fault to bring the text
5695 * pages (as executable) into the address space, followed soon
5696 * thereafter by dyld data structures (for dynamic executable).
5697 * To optimize this, as well as improve support for hardware
5698 * debuggers that can only access resident pages present
5699 * in the process' page tables, we prefault some pages if
5700 * possible. Errors are non-fatal.
5701 */
5702 static void exec_prefault_data(proc_t p __unused, struct image_params *imgp, load_result_t *load_result)
5703 {
5704 int ret;
5705 size_t expected_all_image_infos_size;
5706
5707 /*
5708 * Prefault executable or dyld entry point.
5709 */
5710 vm_fault(current_map(),
5711 vm_map_trunc_page(load_result->entry_point,
5712 vm_map_page_mask(current_map())),
5713 VM_PROT_READ | VM_PROT_EXECUTE,
5714 FALSE, VM_KERN_MEMORY_NONE,
5715 THREAD_UNINT, NULL, 0);
5716
5717 if (imgp->ip_flags & IMGPF_IS_64BIT) {
5718 expected_all_image_infos_size = sizeof(struct user64_dyld_all_image_infos);
5719 } else {
5720 expected_all_image_infos_size = sizeof(struct user32_dyld_all_image_infos);
5721 }
5722
5723 /* Decode dyld anchor structure from <mach-o/dyld_images.h> */
5724 if (load_result->dynlinker &&
5725 load_result->all_image_info_addr &&
5726 load_result->all_image_info_size >= expected_all_image_infos_size) {
5727 union {
5728 struct user64_dyld_all_image_infos infos64;
5729 struct user32_dyld_all_image_infos infos32;
5730 } all_image_infos;
5731
5732 /*
5733 * Pre-fault to avoid copyin() going through the trap handler
5734 * and recovery path.
5735 */
5736 vm_fault(current_map(),
5737 vm_map_trunc_page(load_result->all_image_info_addr,
5738 vm_map_page_mask(current_map())),
5739 VM_PROT_READ | VM_PROT_WRITE,
5740 FALSE, VM_KERN_MEMORY_NONE,
5741 THREAD_UNINT, NULL, 0);
5742 if ((load_result->all_image_info_addr & PAGE_MASK) + expected_all_image_infos_size > PAGE_SIZE) {
5743 /* all_image_infos straddles a page */
5744 vm_fault(current_map(),
5745 vm_map_trunc_page(load_result->all_image_info_addr + expected_all_image_infos_size - 1,
5746 vm_map_page_mask(current_map())),
5747 VM_PROT_READ | VM_PROT_WRITE,
5748 FALSE, VM_KERN_MEMORY_NONE,
5749 THREAD_UNINT, NULL, 0);
5750 }
5751
5752 ret = copyin(load_result->all_image_info_addr,
5753 &all_image_infos,
5754 expected_all_image_infos_size);
5755 if (ret == 0 && all_image_infos.infos32.version >= DYLD_ALL_IMAGE_INFOS_ADDRESS_MINIMUM_VERSION) {
5756
5757 user_addr_t notification_address;
5758 user_addr_t dyld_image_address;
5759 user_addr_t dyld_version_address;
5760 user_addr_t dyld_all_image_infos_address;
5761 user_addr_t dyld_slide_amount;
5762
5763 if (imgp->ip_flags & IMGPF_IS_64BIT) {
5764 notification_address = all_image_infos.infos64.notification;
5765 dyld_image_address = all_image_infos.infos64.dyldImageLoadAddress;
5766 dyld_version_address = all_image_infos.infos64.dyldVersion;
5767 dyld_all_image_infos_address = all_image_infos.infos64.dyldAllImageInfosAddress;
5768 } else {
5769 notification_address = all_image_infos.infos32.notification;
5770 dyld_image_address = all_image_infos.infos32.dyldImageLoadAddress;
5771 dyld_version_address = all_image_infos.infos32.dyldVersion;
5772 dyld_all_image_infos_address = all_image_infos.infos32.dyldAllImageInfosAddress;
5773 }
5774
5775 /*
5776 * dyld statically sets up the all_image_infos in its Mach-O
5777 * binary at static link time, with pointers relative to its default
5778 * load address. Since ASLR might slide dyld before its first
5779 * instruction is executed, "dyld_slide_amount" tells us how far
5780 * dyld was loaded compared to its default expected load address.
5781 * All other pointers into dyld's image should be adjusted by this
5782 * amount. At some point later, dyld will fix up pointers to take
5783 * into account the slide, at which point the all_image_infos_address
5784 * field in the structure will match the runtime load address, and
5785 * "dyld_slide_amount" will be 0, if we were to consult it again.
5786 */
5787
5788 dyld_slide_amount = load_result->all_image_info_addr - dyld_all_image_infos_address;
5789
5790 #if 0
5791 kprintf("exec_prefault: 0x%016llx 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
5792 (uint64_t)load_result->all_image_info_addr,
5793 all_image_infos.infos32.version,
5794 (uint64_t)notification_address,
5795 (uint64_t)dyld_image_address,
5796 (uint64_t)dyld_version_address,
5797 (uint64_t)dyld_all_image_infos_address);
5798 #endif
5799
5800 vm_fault(current_map(),
5801 vm_map_trunc_page(notification_address + dyld_slide_amount,
5802 vm_map_page_mask(current_map())),
5803 VM_PROT_READ | VM_PROT_EXECUTE,
5804 FALSE, VM_KERN_MEMORY_NONE,
5805 THREAD_UNINT, NULL, 0);
5806 vm_fault(current_map(),
5807 vm_map_trunc_page(dyld_image_address + dyld_slide_amount,
5808 vm_map_page_mask(current_map())),
5809 VM_PROT_READ | VM_PROT_EXECUTE,
5810 FALSE, VM_KERN_MEMORY_NONE,
5811 THREAD_UNINT, NULL, 0);
5812 vm_fault(current_map(),
5813 vm_map_trunc_page(dyld_version_address + dyld_slide_amount,
5814 vm_map_page_mask(current_map())),
5815 VM_PROT_READ,
5816 FALSE, VM_KERN_MEMORY_NONE,
5817 THREAD_UNINT, NULL, 0);
5818 vm_fault(current_map(),
5819 vm_map_trunc_page(dyld_all_image_infos_address + dyld_slide_amount,
5820 vm_map_page_mask(current_map())),
5821 VM_PROT_READ | VM_PROT_WRITE,
5822 FALSE, VM_KERN_MEMORY_NONE,
5823 THREAD_UNINT, NULL, 0);
5824 }
5825 }
5826 }