]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/mach_loader.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / bsd / kern / mach_loader.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (C) 1988, 1989, NeXT, Inc.
30 *
31 * File: kern/mach_loader.c
32 * Author: Avadis Tevanian, Jr.
33 *
34 * Mach object file loader (kernel version, for now).
35 *
36 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
37 * Started.
38 */
39
40 #include <sys/param.h>
41 #include <sys/vnode_internal.h>
42 #include <sys/uio.h>
43 #include <sys/namei.h>
44 #include <sys/proc_internal.h>
45 #include <sys/kauth.h>
46 #include <sys/stat.h>
47 #include <sys/malloc.h>
48 #include <sys/mount_internal.h>
49 #include <sys/fcntl.h>
50 #include <sys/file_internal.h>
51 #include <sys/ubc_internal.h>
52 #include <sys/imgact.h>
53 #include <sys/codesign.h>
54 #include <sys/proc_uuid_policy.h>
55 #include <sys/reason.h>
56 #include <sys/kdebug.h>
57 #include <sys/spawn_internal.h>
58
59 #include <mach/mach_types.h>
60 #include <mach/vm_map.h> /* vm_allocate() */
61 #include <mach/mach_vm.h> /* mach_vm_allocate() */
62 #include <mach/vm_statistics.h>
63 #include <mach/task.h>
64 #include <mach/thread_act.h>
65
66 #include <machine/vmparam.h>
67 #include <machine/exec.h>
68 #include <machine/pal_routines.h>
69
70 #include <kern/ast.h>
71 #include <kern/kern_types.h>
72 #include <kern/cpu_number.h>
73 #include <kern/mach_loader.h>
74 #include <kern/mach_fat.h>
75 #include <kern/kalloc.h>
76 #include <kern/task.h>
77 #include <kern/thread.h>
78 #include <kern/page_decrypt.h>
79
80 #include <mach-o/fat.h>
81 #include <mach-o/loader.h>
82
83 #include <vm/pmap.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_kern.h>
86 #include <vm/vm_pager.h>
87 #include <vm/vnode_pager.h>
88 #include <vm/vm_protos.h>
89 #include <vm/vm_shared_region.h>
90 #include <IOKit/IOReturn.h> /* for kIOReturnNotPrivileged */
91 #include <IOKit/IOBSD.h> /* for IOVnodeHasEntitlement */
92
93 #include <os/overflow.h>
94
95 /*
96 * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
97 * when KERNEL is defined.
98 */
99 extern pmap_t pmap_create_options(ledger_t ledger, vm_map_size_t size,
100 unsigned int flags);
101 #if __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX
102 extern void pmap_disable_user_jop(pmap_t pmap);
103 #endif /* __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX */
104
105 /* XXX should have prototypes in a shared header file */
106 extern int get_map_nentries(vm_map_t);
107
108 extern kern_return_t memory_object_signed(memory_object_control_t control,
109 boolean_t is_signed);
110
111 /* An empty load_result_t */
112 static const load_result_t load_result_null = {
113 .mach_header = MACH_VM_MIN_ADDRESS,
114 .entry_point = MACH_VM_MIN_ADDRESS,
115 .user_stack = MACH_VM_MIN_ADDRESS,
116 .user_stack_size = 0,
117 .user_stack_alloc = MACH_VM_MIN_ADDRESS,
118 .user_stack_alloc_size = 0,
119 .all_image_info_addr = MACH_VM_MIN_ADDRESS,
120 .all_image_info_size = 0,
121 .thread_count = 0,
122 .unixproc = 0,
123 .dynlinker = 0,
124 .needs_dynlinker = 0,
125 .validentry = 0,
126 .using_lcmain = 0,
127 .is_64bit_addr = 0,
128 .is_64bit_data = 0,
129 .custom_stack = 0,
130 .csflags = 0,
131 .has_pagezero = 0,
132 .uuid = { 0 },
133 .min_vm_addr = MACH_VM_MAX_ADDRESS,
134 .max_vm_addr = MACH_VM_MIN_ADDRESS,
135 .cs_end_offset = 0,
136 .threadstate = NULL,
137 .threadstate_sz = 0,
138 .is_cambria = 0,
139 .dynlinker_mach_header = MACH_VM_MIN_ADDRESS,
140 .dynlinker_fd = -1,
141 };
142
143 /*
144 * Prototypes of static functions.
145 */
146 static load_return_t
147 parse_machfile(
148 struct vnode *vp,
149 vm_map_t map,
150 thread_t thread,
151 struct mach_header *header,
152 off_t file_offset,
153 off_t macho_size,
154 int depth,
155 int64_t slide,
156 int64_t dyld_slide,
157 load_result_t *result,
158 load_result_t *binresult,
159 struct image_params *imgp
160 );
161
162 static load_return_t
163 load_segment(
164 struct load_command *lcp,
165 uint32_t filetype,
166 void *control,
167 off_t pager_offset,
168 off_t macho_size,
169 struct vnode *vp,
170 vm_map_t map,
171 int64_t slide,
172 load_result_t *result,
173 struct image_params *imgp
174 );
175
176 static load_return_t
177 load_uuid(
178 struct uuid_command *uulp,
179 char *command_end,
180 load_result_t *result
181 );
182
183 static load_return_t
184 load_version(
185 struct version_min_command *vmc,
186 boolean_t *found_version_cmd,
187 int ip_flags,
188 load_result_t *result
189 );
190
191 static load_return_t
192 load_code_signature(
193 struct linkedit_data_command *lcp,
194 struct vnode *vp,
195 off_t macho_offset,
196 off_t macho_size,
197 cpu_type_t cputype,
198 cpu_subtype_t cpusubtype,
199 load_result_t *result,
200 struct image_params *imgp);
201
202 #if CONFIG_CODE_DECRYPTION
203 static load_return_t
204 set_code_unprotect(
205 struct encryption_info_command *lcp,
206 caddr_t addr,
207 vm_map_t map,
208 int64_t slide,
209 struct vnode *vp,
210 off_t macho_offset,
211 cpu_type_t cputype,
212 cpu_subtype_t cpusubtype);
213 #endif
214
215 static
216 load_return_t
217 load_main(
218 struct entry_point_command *epc,
219 thread_t thread,
220 int64_t slide,
221 load_result_t *result
222 );
223
224 static
225 load_return_t
226 setup_driver_main(
227 thread_t thread,
228 int64_t slide,
229 load_result_t *result
230 );
231
232 static load_return_t
233 load_unixthread(
234 struct thread_command *tcp,
235 thread_t thread,
236 int64_t slide,
237 boolean_t is_x86_64_compat_binary,
238 load_result_t *result
239 );
240
241 static load_return_t
242 load_threadstate(
243 thread_t thread,
244 uint32_t *ts,
245 uint32_t total_size,
246 load_result_t *
247 );
248
249 static load_return_t
250 load_threadstack(
251 thread_t thread,
252 uint32_t *ts,
253 uint32_t total_size,
254 mach_vm_offset_t *user_stack,
255 int *customstack,
256 boolean_t is_x86_64_compat_binary,
257 load_result_t *result
258 );
259
260 static load_return_t
261 load_threadentry(
262 thread_t thread,
263 uint32_t *ts,
264 uint32_t total_size,
265 mach_vm_offset_t *entry_point
266 );
267
268 static load_return_t
269 load_dylinker(
270 struct dylinker_command *lcp,
271 integer_t archbits,
272 vm_map_t map,
273 thread_t thread,
274 int depth,
275 int64_t slide,
276 load_result_t *result,
277 struct image_params *imgp
278 );
279
280
281 #if __x86_64__
282 extern int bootarg_no32exec;
283 static boolean_t
284 check_if_simulator_binary(
285 struct image_params *imgp,
286 off_t file_offset,
287 off_t macho_size);
288 #endif
289
290 struct macho_data;
291
292 static load_return_t
293 get_macho_vnode(
294 const char *path,
295 integer_t archbits,
296 struct mach_header *mach_header,
297 off_t *file_offset,
298 off_t *macho_size,
299 struct macho_data *macho_data,
300 struct vnode **vpp,
301 struct image_params *imgp
302 );
303
304 static inline void
305 widen_segment_command(const struct segment_command *scp32,
306 struct segment_command_64 *scp)
307 {
308 scp->cmd = scp32->cmd;
309 scp->cmdsize = scp32->cmdsize;
310 bcopy(scp32->segname, scp->segname, sizeof(scp->segname));
311 scp->vmaddr = scp32->vmaddr;
312 scp->vmsize = scp32->vmsize;
313 scp->fileoff = scp32->fileoff;
314 scp->filesize = scp32->filesize;
315 scp->maxprot = scp32->maxprot;
316 scp->initprot = scp32->initprot;
317 scp->nsects = scp32->nsects;
318 scp->flags = scp32->flags;
319 }
320
321 static void
322 note_all_image_info_section(const struct segment_command_64 *scp,
323 boolean_t is64, size_t section_size, const void *sections,
324 int64_t slide, load_result_t *result)
325 {
326 const union {
327 struct section s32;
328 struct section_64 s64;
329 } *sectionp;
330 unsigned int i;
331
332
333 if (strncmp(scp->segname, "__DATA_DIRTY", sizeof(scp->segname)) != 0 &&
334 strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0) {
335 return;
336 }
337 for (i = 0; i < scp->nsects; ++i) {
338 sectionp = (const void *)
339 ((const char *)sections + section_size * i);
340 if (0 == strncmp(sectionp->s64.sectname, "__all_image_info",
341 sizeof(sectionp->s64.sectname))) {
342 result->all_image_info_addr =
343 is64 ? sectionp->s64.addr : sectionp->s32.addr;
344 result->all_image_info_addr += slide;
345 result->all_image_info_size =
346 is64 ? sectionp->s64.size : sectionp->s32.size;
347 return;
348 }
349 }
350 }
351
352 #if __arm64__
353 /*
354 * Allow bypassing some security rules (hard pagezero, no write+execute)
355 * in exchange for better binary compatibility for legacy apps built
356 * before 16KB-alignment was enforced.
357 */
358 const int fourk_binary_compatibility_unsafe = TRUE;
359 const int fourk_binary_compatibility_allow_wx = FALSE;
360 #endif /* __arm64__ */
361
362 #if __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX
363 /**
364 * Determines whether this is an arm64e process which may host in-process
365 * plugins.
366 */
367 static inline bool
368 arm64e_plugin_host(struct image_params *imgp, load_result_t *result)
369 {
370 if (imgp->ip_flags & IMGPF_NOJOP) {
371 return false;
372 }
373
374 if (!result->platform_binary) {
375 return false;
376 }
377
378 struct cs_blob *csblob = csvnode_get_blob(imgp->ip_vp, imgp->ip_arch_offset);
379 const char *identity = csblob_get_identity(csblob);
380 if (!identity) {
381 return false;
382 }
383
384 /* Check if override host plugin entitlement is present and posix spawn attribute to disable A keys is passed */
385 if (IOVnodeHasEntitlement(imgp->ip_vp, (int64_t)imgp->ip_arch_offset, OVERRIDE_PLUGIN_HOST_ENTITLEMENT)) {
386 return imgp->ip_flags & IMGPF_PLUGIN_HOST_DISABLE_A_KEYS;
387 }
388
389 /* Disabling library validation is a good signal that this process plans to host plugins */
390 const char *const disable_lv_entitlements[] = {
391 "com.apple.security.cs.disable-library-validation",
392 "com.apple.private.cs.automator-plugins",
393 CLEAR_LV_ENTITLEMENT,
394 };
395 for (size_t i = 0; i < ARRAY_COUNT(disable_lv_entitlements); i++) {
396 if (IOVnodeHasEntitlement(imgp->ip_vp, (int64_t)imgp->ip_arch_offset, disable_lv_entitlements[i])) {
397 return true;
398 }
399 }
400
401 /* From /System/Library/Security/HardeningExceptions.plist */
402 const char *const hardening_exceptions[] = {
403 "com.apple.perl5", /* Scripting engines may load third party code and jit*/
404 "com.apple.perl", /* Scripting engines may load third party code and jit*/
405 "org.python.python", /* Scripting engines may load third party code and jit*/
406 "com.apple.expect", /* Scripting engines may load third party code and jit*/
407 "com.tcltk.wish", /* Scripting engines may load third party code and jit*/
408 "com.tcltk.tclsh", /* Scripting engines may load third party code and jit*/
409 "com.apple.ruby", /* Scripting engines may load third party code and jit*/
410 "com.apple.bash", /* Required for the 'enable' command */
411 "com.apple.zsh", /* Required for the 'zmodload' command */
412 "com.apple.ksh", /* Required for 'builtin' command */
413 };
414 for (size_t i = 0; i < ARRAY_COUNT(hardening_exceptions); i++) {
415 if (strncmp(hardening_exceptions[i], identity, strlen(hardening_exceptions[i])) == 0) {
416 return true;
417 }
418 }
419
420 return false;
421 }
422 #endif /* __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX */
423
424 load_return_t
425 load_machfile(
426 struct image_params *imgp,
427 struct mach_header *header,
428 thread_t thread,
429 vm_map_t *mapp,
430 load_result_t *result
431 )
432 {
433 struct vnode *vp = imgp->ip_vp;
434 off_t file_offset = imgp->ip_arch_offset;
435 off_t macho_size = imgp->ip_arch_size;
436 off_t total_size = 0;
437 off_t file_size = imgp->ip_vattr->va_data_size;
438 pmap_t pmap = 0; /* protected by create_map */
439 vm_map_t map;
440 load_result_t myresult;
441 load_return_t lret;
442 boolean_t enforce_hard_pagezero = TRUE;
443 int in_exec = (imgp->ip_flags & IMGPF_EXEC);
444 task_t task = current_task();
445 int64_t aslr_page_offset = 0;
446 int64_t dyld_aslr_page_offset = 0;
447 int64_t aslr_section_size = 0;
448 int64_t aslr_section_offset = 0;
449 kern_return_t kret;
450 unsigned int pmap_flags = 0;
451
452 if (os_add_overflow(file_offset, macho_size, &total_size) ||
453 total_size > file_size) {
454 return LOAD_BADMACHO;
455 }
456
457 result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
458 result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
459 #if defined(HAS_APPLE_PAC)
460 pmap_flags |= (imgp->ip_flags & IMGPF_NOJOP) ? PMAP_CREATE_DISABLE_JOP : 0;
461 #endif /* defined(HAS_APPLE_PAC) */
462 pmap_flags |= result->is_64bit_addr ? PMAP_CREATE_64BIT : 0;
463
464 task_t ledger_task;
465 if (imgp->ip_new_thread) {
466 ledger_task = get_threadtask(imgp->ip_new_thread);
467 } else {
468 ledger_task = task;
469 }
470
471 #if defined(PMAP_CREATE_FORCE_4K_PAGES) && (DEBUG || DEVELOPMENT)
472 if (imgp->ip_px_sa != NULL) {
473 struct _posix_spawnattr* psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
474 if (psa->psa_flags & _POSIX_SPAWN_FORCE_4K_PAGES) {
475 pmap_flags |= PMAP_CREATE_FORCE_4K_PAGES;
476 }
477 }
478 #endif /* defined(PMAP_CREATE_FORCE_4K_PAGES) && (DEBUG || DEVELOPMENT) */
479
480 pmap = pmap_create_options(get_task_ledger(ledger_task),
481 (vm_map_size_t) 0,
482 pmap_flags);
483 if (pmap == NULL) {
484 return LOAD_RESOURCE;
485 }
486 map = vm_map_create(pmap,
487 0,
488 vm_compute_max_offset(result->is_64bit_addr),
489 TRUE);
490
491 #if defined(__arm64__)
492 if (result->is_64bit_addr) {
493 /* enforce 16KB alignment of VM map entries */
494 vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT);
495 } else {
496 vm_map_set_page_shift(map, page_shift_user32);
497 }
498 #elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
499 /* enforce 16KB alignment for watch targets with new ABI */
500 vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT);
501 #endif /* __arm64__ */
502
503 #if PMAP_CREATE_FORCE_4K_PAGES
504 if (pmap_flags & PMAP_CREATE_FORCE_4K_PAGES) {
505 DEBUG4K_LIFE("***** launching '%s' as 4k *****\n", vp->v_name);
506 vm_map_set_page_shift(map, FOURK_PAGE_SHIFT);
507 }
508 #endif /* PMAP_CREATE_FORCE_4K_PAGES */
509
510 #ifndef CONFIG_ENFORCE_SIGNED_CODE
511 /* This turns off faulting for executable pages, which allows
512 * to circumvent Code Signing Enforcement. The per process
513 * flag (CS_ENFORCEMENT) is not set yet, but we can use the
514 * global flag.
515 */
516 if (!cs_process_global_enforcement() && (header->flags & MH_ALLOW_STACK_EXECUTION)) {
517 vm_map_disable_NX(map);
518 // TODO: Message Trace or log that this is happening
519 }
520 #endif
521
522 /* Forcibly disallow execution from data pages on even if the arch
523 * normally permits it. */
524 if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC)) {
525 vm_map_disallow_data_exec(map);
526 }
527
528 /*
529 * Compute a random offset for ASLR, and an independent random offset for dyld.
530 */
531 if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
532 vm_map_get_max_aslr_slide_section(map, &aslr_section_offset, &aslr_section_size);
533 aslr_section_offset = (random() % aslr_section_offset) * aslr_section_size;
534
535 aslr_page_offset = random();
536 aslr_page_offset %= vm_map_get_max_aslr_slide_pages(map);
537 aslr_page_offset <<= vm_map_page_shift(map);
538
539 dyld_aslr_page_offset = random();
540 dyld_aslr_page_offset %= vm_map_get_max_loader_aslr_slide_pages(map);
541 dyld_aslr_page_offset <<= vm_map_page_shift(map);
542
543 aslr_page_offset += aslr_section_offset;
544 }
545 if (vm_map_page_shift(map) < (int)PAGE_SHIFT) {
546 DEBUG4K_LOAD("slide=0x%llx dyld_slide=0x%llx\n", aslr_page_offset, dyld_aslr_page_offset);
547 }
548
549 if (!result) {
550 result = &myresult;
551 }
552
553 *result = load_result_null;
554
555 /*
556 * re-set the bitness on the load result since we cleared the load result above.
557 */
558 result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
559 result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
560
561 lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
562 0, aslr_page_offset, dyld_aslr_page_offset, result,
563 NULL, imgp);
564
565 if (lret != LOAD_SUCCESS) {
566 vm_map_deallocate(map); /* will lose pmap reference too */
567 return lret;
568 }
569
570 #if __x86_64__
571 /*
572 * On x86, for compatibility, don't enforce the hard page-zero restriction for 32-bit binaries.
573 */
574 if (!result->is_64bit_addr) {
575 enforce_hard_pagezero = FALSE;
576 }
577
578 /*
579 * For processes with IMGPF_HIGH_BITS_ASLR, add a few random high bits
580 * to the start address for "anywhere" memory allocations.
581 */
582 #define VM_MAP_HIGH_START_BITS_COUNT 8
583 #define VM_MAP_HIGH_START_BITS_SHIFT 27
584 if (result->is_64bit_addr &&
585 (imgp->ip_flags & IMGPF_HIGH_BITS_ASLR)) {
586 int random_bits;
587 vm_map_offset_t high_start;
588
589 random_bits = random();
590 random_bits &= (1 << VM_MAP_HIGH_START_BITS_COUNT) - 1;
591 high_start = (((vm_map_offset_t)random_bits)
592 << VM_MAP_HIGH_START_BITS_SHIFT);
593 vm_map_set_high_start(map, high_start);
594 }
595 #endif /* __x86_64__ */
596
597 /*
598 * Check to see if the page zero is enforced by the map->min_offset.
599 */
600 if (enforce_hard_pagezero &&
601 (vm_map_has_hard_pagezero(map, 0x1000) == FALSE)) {
602 #if __arm64__
603 if (
604 !result->is_64bit_addr && /* not 64-bit address space */
605 !(header->flags & MH_PIE) && /* not PIE */
606 (vm_map_page_shift(map) != FOURK_PAGE_SHIFT ||
607 PAGE_SHIFT != FOURK_PAGE_SHIFT) && /* page size != 4KB */
608 result->has_pagezero && /* has a "soft" page zero */
609 fourk_binary_compatibility_unsafe) {
610 /*
611 * For backwards compatibility of "4K" apps on
612 * a 16K system, do not enforce a hard page zero...
613 */
614 } else
615 #endif /* __arm64__ */
616 {
617 vm_map_deallocate(map); /* will lose pmap reference too */
618 return LOAD_BADMACHO;
619 }
620 }
621
622 #if __arm64__
623 if (enforce_hard_pagezero && result->is_64bit_addr && (header->cputype == CPU_TYPE_ARM64)) {
624 /* 64 bit ARM binary must have "hard page zero" of 4GB to cover the lower 32 bit address space */
625 if (vm_map_has_hard_pagezero(map, 0x100000000) == FALSE) {
626 vm_map_deallocate(map); /* will lose pmap reference too */
627 return LOAD_BADMACHO;
628 }
629 }
630 #endif
631
632 vm_commit_pagezero_status(map);
633
634 /*
635 * If this is an exec, then we are going to destroy the old
636 * task, and it's correct to halt it; if it's spawn, the
637 * task is not yet running, and it makes no sense.
638 */
639 if (in_exec) {
640 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
641 /*
642 * Mark the task as halting and start the other
643 * threads towards terminating themselves. Then
644 * make sure any threads waiting for a process
645 * transition get informed that we are committed to
646 * this transition, and then finally complete the
647 * task halting (wait for threads and then cleanup
648 * task resources).
649 *
650 * NOTE: task_start_halt() makes sure that no new
651 * threads are created in the task during the transition.
652 * We need to mark the workqueue as exiting before we
653 * wait for threads to terminate (at the end of which
654 * we no longer have a prohibition on thread creation).
655 *
656 * Finally, clean up any lingering workqueue data structures
657 * that may have been left behind by the workqueue threads
658 * as they exited (and then clean up the work queue itself).
659 */
660 kret = task_start_halt(task);
661 if (kret != KERN_SUCCESS) {
662 vm_map_deallocate(map); /* will lose pmap reference too */
663 return LOAD_FAILURE;
664 }
665 proc_transcommit(p, 0);
666 workq_mark_exiting(p);
667 task_complete_halt(task);
668 workq_exit(p);
669
670 /*
671 * Roll up accounting info to new task. The roll up is done after
672 * task_complete_halt to make sure the thread accounting info is
673 * rolled up to current_task.
674 */
675 task_rollup_accounting_info(get_threadtask(thread), task);
676 }
677 *mapp = map;
678
679 #if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
680 /*
681 * arm64e plugin hosts currently run with JOP keys disabled, since they
682 * may need to run arm64 plugins.
683 */
684 if (arm64e_plugin_host(imgp, result)) {
685 imgp->ip_flags |= IMGPF_NOJOP;
686 pmap_disable_user_jop(pmap);
687 }
688 #endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */
689
690 #ifdef CONFIG_32BIT_TELEMETRY
691 if (!result->is_64bit_data) {
692 /*
693 * This may not need to be an AST; we merely need to ensure that
694 * we gather telemetry at the point where all of the information
695 * that we want has been added to the process.
696 */
697 task_set_32bit_log_flag(get_threadtask(thread));
698 act_set_astbsd(thread);
699 }
700 #endif /* CONFIG_32BIT_TELEMETRY */
701
702 return LOAD_SUCCESS;
703 }
704
705 int macho_printf = 0;
706 #define MACHO_PRINTF(args) \
707 do { \
708 if (macho_printf) { \
709 printf args; \
710 } \
711 } while (0)
712
713
714 static boolean_t
715 pie_required(
716 cpu_type_t exectype,
717 cpu_subtype_t execsubtype)
718 {
719 switch (exectype) {
720 case CPU_TYPE_X86_64:
721 return FALSE;
722 case CPU_TYPE_ARM64:
723 return TRUE;
724 case CPU_TYPE_ARM:
725 switch (execsubtype) {
726 case CPU_SUBTYPE_ARM_V7K:
727 return TRUE;
728 }
729 break;
730 }
731 return FALSE;
732 }
733
734 /*
735 * The file size of a mach-o file is limited to 32 bits; this is because
736 * this is the limit on the kalloc() of enough bytes for a mach_header and
737 * the contents of its sizeofcmds, which is currently constrained to 32
738 * bits in the file format itself. We read into the kernel buffer the
739 * commands section, and then parse it in order to parse the mach-o file
740 * format load_command segment(s). We are only interested in a subset of
741 * the total set of possible commands. If "map"==VM_MAP_NULL or
742 * "thread"==THREAD_NULL, do not make permament VM modifications,
743 * just preflight the parse.
744 */
745 static
746 load_return_t
747 parse_machfile(
748 struct vnode *vp,
749 vm_map_t map,
750 thread_t thread,
751 struct mach_header *header,
752 off_t file_offset,
753 off_t macho_size,
754 int depth,
755 int64_t aslr_offset,
756 int64_t dyld_aslr_offset,
757 load_result_t *result,
758 load_result_t *binresult,
759 struct image_params *imgp
760 )
761 {
762 uint32_t ncmds;
763 struct load_command *lcp;
764 struct dylinker_command *dlp = 0;
765 void * control;
766 load_return_t ret = LOAD_SUCCESS;
767 void * addr;
768 vm_size_t alloc_size, cmds_size;
769 size_t offset;
770 size_t oldoffset; /* for overflow check */
771 int pass;
772 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
773 int error;
774 int resid = 0;
775 int spawn = (imgp->ip_flags & IMGPF_SPAWN);
776 int vfexec = (imgp->ip_flags & IMGPF_VFORK_EXEC);
777 size_t mach_header_sz = sizeof(struct mach_header);
778 boolean_t abi64;
779 boolean_t got_code_signatures = FALSE;
780 boolean_t found_header_segment = FALSE;
781 boolean_t found_xhdr = FALSE;
782 boolean_t found_version_cmd = FALSE;
783 int64_t slide = 0;
784 boolean_t dyld_no_load_addr = FALSE;
785 boolean_t is_dyld = FALSE;
786 vm_map_offset_t effective_page_mask = PAGE_MASK;
787 #if __arm64__
788 uint64_t pagezero_end = 0;
789 uint64_t executable_end = 0;
790 uint64_t writable_start = 0;
791 vm_map_size_t effective_page_size;
792
793 effective_page_mask = vm_map_page_mask(map);
794 effective_page_size = vm_map_page_size(map);
795 #endif /* __arm64__ */
796
797 if (header->magic == MH_MAGIC_64 ||
798 header->magic == MH_CIGAM_64) {
799 mach_header_sz = sizeof(struct mach_header_64);
800 }
801
802 /*
803 * Break infinite recursion
804 */
805 if (depth > 2) {
806 return LOAD_FAILURE;
807 }
808
809 depth++;
810
811 /*
812 * Check to see if right machine type.
813 */
814 if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
815 ) {
816 return LOAD_BADARCH;
817 }
818
819 if (!grade_binary(header->cputype,
820 header->cpusubtype & ~CPU_SUBTYPE_MASK,
821 header->cpusubtype & CPU_SUBTYPE_MASK, TRUE)) {
822 return LOAD_BADARCH;
823 }
824
825 abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
826
827 switch (header->filetype) {
828 case MH_EXECUTE:
829 if (depth != 1 && depth != 3) {
830 return LOAD_FAILURE;
831 }
832 if (header->flags & MH_DYLDLINK) {
833 /* Check properties of dynamic executables */
834 if (!(header->flags & MH_PIE) && pie_required(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) {
835 return LOAD_FAILURE;
836 }
837 result->needs_dynlinker = TRUE;
838 } else if (header->cputype == CPU_TYPE_X86_64) {
839 /* x86_64 static binaries allowed */
840 } else {
841 /* Check properties of static executables (disallowed except for development) */
842 #if !(DEVELOPMENT || DEBUG)
843 return LOAD_FAILURE;
844 #endif
845 }
846 break;
847 case MH_DYLINKER:
848 if (depth != 2) {
849 return LOAD_FAILURE;
850 }
851 is_dyld = TRUE;
852 break;
853
854 default:
855 return LOAD_FAILURE;
856 }
857
858 /*
859 * For PIE and dyld, slide everything by the ASLR offset.
860 */
861 if ((header->flags & MH_PIE) || is_dyld) {
862 slide = aslr_offset;
863 }
864
865 /*
866 * Get the pager for the file.
867 */
868 control = ubc_getobject(vp, UBC_FLAGS_NONE);
869
870 /* ensure header + sizeofcmds falls within the file */
871 if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) ||
872 (off_t)cmds_size > macho_size ||
873 round_page_overflow(cmds_size, &alloc_size) ||
874 alloc_size > INT_MAX) {
875 return LOAD_BADMACHO;
876 }
877
878 /*
879 * Map the load commands into kernel memory.
880 */
881 addr = kalloc(alloc_size);
882 if (addr == NULL) {
883 return LOAD_NOSPACE;
884 }
885
886 error = vn_rdwr(UIO_READ, vp, addr, (int)alloc_size, file_offset,
887 UIO_SYSSPACE, 0, vfs_context_ucred(imgp->ip_vfs_context), &resid, p);
888 if (error) {
889 kfree(addr, alloc_size);
890 return LOAD_IOERROR;
891 }
892
893 if (resid) {
894 {
895 /* We must be able to read in as much as the mach_header indicated */
896 kfree(addr, alloc_size);
897 return LOAD_BADMACHO;
898 }
899 }
900
901 /*
902 * Scan through the commands, processing each one as necessary.
903 * We parse in three passes through the headers:
904 * 0: determine if TEXT and DATA boundary can be page-aligned, load platform version
905 * 1: thread state, uuid, code signature
906 * 2: segments
907 * 3: dyld, encryption, check entry point
908 */
909
910 boolean_t slide_realign = FALSE;
911 #if __arm64__
912 if (!abi64) {
913 slide_realign = TRUE;
914 }
915 #endif
916
917 for (pass = 0; pass <= 3; pass++) {
918 if (pass == 1) {
919 #if __arm64__
920 boolean_t is_pie;
921 int64_t adjust;
922
923 is_pie = ((header->flags & MH_PIE) != 0);
924 if (pagezero_end != 0 &&
925 pagezero_end < effective_page_size) {
926 /* need at least 1 page for PAGEZERO */
927 adjust = effective_page_size;
928 MACHO_PRINTF(("pagezero boundary at "
929 "0x%llx; adjust slide from "
930 "0x%llx to 0x%llx%s\n",
931 (uint64_t) pagezero_end,
932 slide,
933 slide + adjust,
934 (is_pie
935 ? ""
936 : " BUT NO PIE ****** :-(")));
937 if (is_pie) {
938 slide += adjust;
939 pagezero_end += adjust;
940 executable_end += adjust;
941 writable_start += adjust;
942 }
943 }
944 if (pagezero_end != 0) {
945 result->has_pagezero = TRUE;
946 }
947 if (executable_end == writable_start &&
948 (executable_end & effective_page_mask) != 0 &&
949 (executable_end & FOURK_PAGE_MASK) == 0) {
950 /*
951 * The TEXT/DATA boundary is 4K-aligned but
952 * not page-aligned. Adjust the slide to make
953 * it page-aligned and avoid having a page
954 * with both write and execute permissions.
955 */
956 adjust =
957 (effective_page_size -
958 (executable_end & effective_page_mask));
959 MACHO_PRINTF(("page-unaligned X-W boundary at "
960 "0x%llx; adjust slide from "
961 "0x%llx to 0x%llx%s\n",
962 (uint64_t) executable_end,
963 slide,
964 slide + adjust,
965 (is_pie
966 ? ""
967 : " BUT NO PIE ****** :-(")));
968 if (is_pie) {
969 slide += adjust;
970 }
971 }
972 #endif /* __arm64__ */
973
974 if (dyld_no_load_addr && binresult) {
975 /*
976 * The dyld Mach-O does not specify a load address. Try to locate
977 * it right after the main binary. If binresult == NULL, load
978 * directly to the given slide.
979 */
980 mach_vm_address_t max_vm_addr = binresult->max_vm_addr;
981 slide = vm_map_round_page(slide + max_vm_addr, effective_page_mask);
982 }
983 }
984
985 /*
986 * Check that the entry point is contained in an executable segment
987 */
988 if ((pass == 3) && (thread != THREAD_NULL)) {
989 if (depth == 1 && imgp && (imgp->ip_flags & IMGPF_DRIVER)) {
990 /* Driver binaries must have driverkit platform */
991 if (result->ip_platform == PLATFORM_DRIVERKIT) {
992 /* Driver binaries have no entry point */
993 ret = setup_driver_main(thread, slide, result);
994 } else {
995 ret = LOAD_FAILURE;
996 }
997 } else if (!result->using_lcmain && result->validentry == 0) {
998 ret = LOAD_FAILURE;
999 }
1000 if (ret != KERN_SUCCESS) {
1001 thread_state_initialize(thread);
1002 break;
1003 }
1004 }
1005
1006 /*
1007 * Check that some segment maps the start of the mach-o file, which is
1008 * needed by the dynamic loader to read the mach headers, etc.
1009 */
1010 if ((pass == 3) && (found_header_segment == FALSE)) {
1011 ret = LOAD_BADMACHO;
1012 break;
1013 }
1014
1015 /*
1016 * Loop through each of the load_commands indicated by the
1017 * Mach-O header; if an absurd value is provided, we just
1018 * run off the end of the reserved section by incrementing
1019 * the offset too far, so we are implicitly fail-safe.
1020 */
1021 offset = mach_header_sz;
1022 ncmds = header->ncmds;
1023
1024 while (ncmds--) {
1025 /* ensure enough space for a minimal load command */
1026 if (offset + sizeof(struct load_command) > cmds_size) {
1027 ret = LOAD_BADMACHO;
1028 break;
1029 }
1030
1031 /*
1032 * Get a pointer to the command.
1033 */
1034 lcp = (struct load_command *)(addr + offset);
1035 oldoffset = offset;
1036
1037 /*
1038 * Perform prevalidation of the struct load_command
1039 * before we attempt to use its contents. Invalid
1040 * values are ones which result in an overflow, or
1041 * which can not possibly be valid commands, or which
1042 * straddle or exist past the reserved section at the
1043 * start of the image.
1044 */
1045 if (os_add_overflow(offset, lcp->cmdsize, &offset) ||
1046 lcp->cmdsize < sizeof(struct load_command) ||
1047 offset > cmds_size) {
1048 ret = LOAD_BADMACHO;
1049 break;
1050 }
1051
1052 /*
1053 * Act on struct load_command's for which kernel
1054 * intervention is required.
1055 * Note that each load command implementation is expected to validate
1056 * that lcp->cmdsize is large enough to fit its specific struct type
1057 * before dereferencing fields not covered by struct load_command.
1058 */
1059 switch (lcp->cmd) {
1060 case LC_SEGMENT: {
1061 struct segment_command *scp = (struct segment_command *) lcp;
1062 if (scp->cmdsize < sizeof(*scp)) {
1063 ret = LOAD_BADMACHO;
1064 break;
1065 }
1066 if (pass == 0) {
1067 if (is_dyld && scp->vmaddr == 0 && scp->fileoff == 0) {
1068 dyld_no_load_addr = TRUE;
1069 if (!slide_realign) {
1070 /* got what we need, bail early on pass 0 */
1071 continue;
1072 }
1073 }
1074
1075 #if __arm64__
1076 assert(!abi64);
1077
1078 if (scp->initprot == 0 && scp->maxprot == 0 && scp->vmaddr == 0) {
1079 /* PAGEZERO */
1080 if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &pagezero_end) || pagezero_end > UINT32_MAX) {
1081 ret = LOAD_BADMACHO;
1082 break;
1083 }
1084 }
1085 if (scp->initprot & VM_PROT_EXECUTE) {
1086 /* TEXT */
1087 if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &executable_end) || executable_end > UINT32_MAX) {
1088 ret = LOAD_BADMACHO;
1089 break;
1090 }
1091 }
1092 if (scp->initprot & VM_PROT_WRITE) {
1093 /* DATA */
1094 if (os_add_overflow(scp->vmaddr, slide, &writable_start) || writable_start > UINT32_MAX) {
1095 ret = LOAD_BADMACHO;
1096 break;
1097 }
1098 }
1099 #endif /* __arm64__ */
1100 break;
1101 }
1102
1103 if (pass == 1 && !strncmp(scp->segname, "__XHDR", sizeof(scp->segname))) {
1104 found_xhdr = TRUE;
1105 }
1106
1107 if (pass != 2) {
1108 break;
1109 }
1110
1111 if (abi64) {
1112 /*
1113 * Having an LC_SEGMENT command for the
1114 * wrong ABI is invalid <rdar://problem/11021230>
1115 */
1116 ret = LOAD_BADMACHO;
1117 break;
1118 }
1119
1120 ret = load_segment(lcp,
1121 header->filetype,
1122 control,
1123 file_offset,
1124 macho_size,
1125 vp,
1126 map,
1127 slide,
1128 result,
1129 imgp);
1130 if (ret == LOAD_SUCCESS && scp->fileoff == 0 && scp->filesize > 0) {
1131 /* Enforce a single segment mapping offset zero, with R+X
1132 * protection. */
1133 if (found_header_segment ||
1134 ((scp->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) != (VM_PROT_READ | VM_PROT_EXECUTE))) {
1135 ret = LOAD_BADMACHO;
1136 break;
1137 }
1138 found_header_segment = TRUE;
1139 }
1140
1141 break;
1142 }
1143 case LC_SEGMENT_64: {
1144 struct segment_command_64 *scp64 = (struct segment_command_64 *) lcp;
1145 if (scp64->cmdsize < sizeof(*scp64)) {
1146 ret = LOAD_BADMACHO;
1147 break;
1148 }
1149 if (pass == 0) {
1150 if (is_dyld && scp64->vmaddr == 0 && scp64->fileoff == 0) {
1151 dyld_no_load_addr = TRUE;
1152 }
1153 /* got what we need, bail early on pass 0 */
1154 continue;
1155 }
1156
1157 if (pass == 1 && !strncmp(scp64->segname, "__XHDR", sizeof(scp64->segname))) {
1158 found_xhdr = TRUE;
1159 }
1160
1161 if (pass != 2) {
1162 break;
1163 }
1164
1165 if (!abi64) {
1166 /*
1167 * Having an LC_SEGMENT_64 command for the
1168 * wrong ABI is invalid <rdar://problem/11021230>
1169 */
1170 ret = LOAD_BADMACHO;
1171 break;
1172 }
1173
1174 ret = load_segment(lcp,
1175 header->filetype,
1176 control,
1177 file_offset,
1178 macho_size,
1179 vp,
1180 map,
1181 slide,
1182 result,
1183 imgp);
1184
1185 if (ret == LOAD_SUCCESS && scp64->fileoff == 0 && scp64->filesize > 0) {
1186 /* Enforce a single segment mapping offset zero, with R+X
1187 * protection. */
1188 if (found_header_segment ||
1189 ((scp64->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) != (VM_PROT_READ | VM_PROT_EXECUTE))) {
1190 ret = LOAD_BADMACHO;
1191 break;
1192 }
1193 found_header_segment = TRUE;
1194 }
1195
1196 break;
1197 }
1198 case LC_UNIXTHREAD: {
1199 boolean_t is_x86_64_compat_binary = FALSE;
1200 if (pass != 1) {
1201 break;
1202 }
1203 ret = load_unixthread(
1204 (struct thread_command *) lcp,
1205 thread,
1206 slide,
1207 is_x86_64_compat_binary,
1208 result);
1209 break;
1210 }
1211 case LC_MAIN:
1212 if (pass != 1) {
1213 break;
1214 }
1215 if (depth != 1) {
1216 break;
1217 }
1218 ret = load_main(
1219 (struct entry_point_command *) lcp,
1220 thread,
1221 slide,
1222 result);
1223 break;
1224 case LC_LOAD_DYLINKER:
1225 if (pass != 3) {
1226 break;
1227 }
1228 if ((depth == 1) && (dlp == 0)) {
1229 dlp = (struct dylinker_command *)lcp;
1230 } else {
1231 ret = LOAD_FAILURE;
1232 }
1233 break;
1234 case LC_UUID:
1235 if (pass == 1 && depth == 1) {
1236 ret = load_uuid((struct uuid_command *) lcp,
1237 (char *)addr + cmds_size,
1238 result);
1239 }
1240 break;
1241 case LC_CODE_SIGNATURE:
1242 /* CODE SIGNING */
1243 if (pass != 1) {
1244 break;
1245 }
1246
1247 /* pager -> uip ->
1248 * load signatures & store in uip
1249 * set VM object "signed_pages"
1250 */
1251 ret = load_code_signature(
1252 (struct linkedit_data_command *) lcp,
1253 vp,
1254 file_offset,
1255 macho_size,
1256 header->cputype,
1257 header->cpusubtype,
1258 result,
1259 imgp);
1260 if (ret != LOAD_SUCCESS) {
1261 printf("proc %d: load code signature error %d "
1262 "for file \"%s\"\n",
1263 p->p_pid, ret, vp->v_name);
1264 /*
1265 * Allow injections to be ignored on devices w/o enforcement enabled
1266 */
1267 if (!cs_process_global_enforcement()) {
1268 ret = LOAD_SUCCESS; /* ignore error */
1269 }
1270 } else {
1271 got_code_signatures = TRUE;
1272 }
1273
1274 if (got_code_signatures) {
1275 unsigned tainted = CS_VALIDATE_TAINTED;
1276 boolean_t valid = FALSE;
1277 vm_size_t off = 0;
1278
1279
1280 if (cs_debug > 10) {
1281 printf("validating initial pages of %s\n", vp->v_name);
1282 }
1283
1284 while (off < alloc_size && ret == LOAD_SUCCESS) {
1285 tainted = CS_VALIDATE_TAINTED;
1286
1287 valid = cs_validate_range(vp,
1288 NULL,
1289 file_offset + off,
1290 addr + off,
1291 MIN(PAGE_SIZE, cmds_size),
1292 &tainted);
1293 if (!valid || (tainted & CS_VALIDATE_TAINTED)) {
1294 if (cs_debug) {
1295 printf("CODE SIGNING: %s[%d]: invalid initial page at offset %lld validated:%d tainted:%d csflags:0x%x\n",
1296 vp->v_name, p->p_pid, (long long)(file_offset + off), valid, tainted, result->csflags);
1297 }
1298 if (cs_process_global_enforcement() ||
1299 (result->csflags & (CS_HARD | CS_KILL | CS_ENFORCEMENT))) {
1300 ret = LOAD_FAILURE;
1301 }
1302 result->csflags &= ~CS_VALID;
1303 }
1304 off += PAGE_SIZE;
1305 }
1306 }
1307
1308 break;
1309 #if CONFIG_CODE_DECRYPTION
1310 case LC_ENCRYPTION_INFO:
1311 case LC_ENCRYPTION_INFO_64:
1312 if (pass != 3) {
1313 break;
1314 }
1315 ret = set_code_unprotect(
1316 (struct encryption_info_command *) lcp,
1317 addr, map, slide, vp, file_offset,
1318 header->cputype, header->cpusubtype);
1319 if (ret != LOAD_SUCCESS) {
1320 os_reason_t load_failure_reason = OS_REASON_NULL;
1321 printf("proc %d: set_code_unprotect() error %d "
1322 "for file \"%s\"\n",
1323 p->p_pid, ret, vp->v_name);
1324 /*
1325 * Don't let the app run if it's
1326 * encrypted but we failed to set up the
1327 * decrypter. If the keys are missing it will
1328 * return LOAD_DECRYPTFAIL.
1329 */
1330 if (ret == LOAD_DECRYPTFAIL) {
1331 /* failed to load due to missing FP keys */
1332 proc_lock(p);
1333 p->p_lflag |= P_LTERM_DECRYPTFAIL;
1334 proc_unlock(p);
1335
1336 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1337 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT, 0, 0);
1338 load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT);
1339 } else {
1340 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1341 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT, 0, 0);
1342 load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT);
1343 }
1344
1345 /*
1346 * Don't signal the process if it was forked and in a partially constructed
1347 * state as part of a spawn -- it will just be torn down when the exec fails.
1348 */
1349 if (!spawn) {
1350 assert(load_failure_reason != OS_REASON_NULL);
1351 if (vfexec) {
1352 psignal_vfork_with_reason(p, get_threadtask(imgp->ip_new_thread), imgp->ip_new_thread, SIGKILL, load_failure_reason);
1353 load_failure_reason = OS_REASON_NULL;
1354 } else {
1355 psignal_with_reason(p, SIGKILL, load_failure_reason);
1356 load_failure_reason = OS_REASON_NULL;
1357 }
1358 } else {
1359 os_reason_free(load_failure_reason);
1360 load_failure_reason = OS_REASON_NULL;
1361 }
1362 }
1363 break;
1364 #endif
1365 case LC_VERSION_MIN_IPHONEOS:
1366 case LC_VERSION_MIN_MACOSX:
1367 case LC_VERSION_MIN_WATCHOS:
1368 case LC_VERSION_MIN_TVOS: {
1369 struct version_min_command *vmc;
1370
1371 if (depth != 1 || pass != 0) {
1372 break;
1373 }
1374 vmc = (struct version_min_command *) lcp;
1375 ret = load_version(vmc, &found_version_cmd, imgp->ip_flags, result);
1376 break;
1377 }
1378 case LC_BUILD_VERSION: {
1379 if (depth != 1 || pass != 0) {
1380 break;
1381 }
1382 struct build_version_command* bvc = (struct build_version_command*)lcp;
1383 if (bvc->cmdsize < sizeof(*bvc)) {
1384 ret = LOAD_BADMACHO;
1385 break;
1386 }
1387 if (found_version_cmd == TRUE) {
1388 ret = LOAD_BADMACHO;
1389 break;
1390 }
1391 result->ip_platform = bvc->platform;
1392 result->lr_sdk = bvc->sdk;
1393 found_version_cmd = TRUE;
1394 break;
1395 }
1396 default:
1397 /* Other commands are ignored by the kernel */
1398 ret = LOAD_SUCCESS;
1399 break;
1400 }
1401 if (ret != LOAD_SUCCESS) {
1402 break;
1403 }
1404 }
1405 if (ret != LOAD_SUCCESS) {
1406 break;
1407 }
1408 }
1409
1410 if (ret == LOAD_SUCCESS) {
1411 if (!got_code_signatures && cs_process_global_enforcement()) {
1412 ret = LOAD_FAILURE;
1413 }
1414
1415 /* Make sure if we need dyld, we got it */
1416 if (result->needs_dynlinker && !dlp) {
1417 ret = LOAD_FAILURE;
1418 }
1419
1420 if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
1421 /*
1422 * load the dylinker, and slide it by the independent DYLD ASLR
1423 * offset regardless of the PIE-ness of the main binary.
1424 */
1425 ret = load_dylinker(dlp, header->cputype, map, thread, depth,
1426 dyld_aslr_offset, result, imgp);
1427 }
1428
1429
1430 if ((ret == LOAD_SUCCESS) && (depth == 1)) {
1431 if (result->thread_count == 0) {
1432 ret = LOAD_FAILURE;
1433 }
1434 #if CONFIG_ENFORCE_SIGNED_CODE
1435 if (result->needs_dynlinker && !(result->csflags & CS_DYLD_PLATFORM)) {
1436 ret = LOAD_FAILURE;
1437 }
1438 #endif
1439 }
1440 }
1441
1442 if (ret == LOAD_BADMACHO && found_xhdr) {
1443 ret = LOAD_BADMACHO_UPX;
1444 }
1445
1446 kfree(addr, alloc_size);
1447
1448 return ret;
1449 }
1450
1451 load_return_t
1452 validate_potential_simulator_binary(
1453 cpu_type_t exectype __unused,
1454 struct image_params *imgp __unused,
1455 off_t file_offset __unused,
1456 off_t macho_size __unused)
1457 {
1458 #if __x86_64__
1459 /* Allow 32 bit exec only for simulator binaries */
1460 if (bootarg_no32exec && imgp != NULL && exectype == CPU_TYPE_X86) {
1461 if (imgp->ip_simulator_binary == IMGPF_SB_DEFAULT) {
1462 boolean_t simulator_binary = check_if_simulator_binary(imgp, file_offset, macho_size);
1463 imgp->ip_simulator_binary = simulator_binary ? IMGPF_SB_TRUE : IMGPF_SB_FALSE;
1464 }
1465
1466 if (imgp->ip_simulator_binary != IMGPF_SB_TRUE) {
1467 return LOAD_BADARCH;
1468 }
1469 }
1470 #endif
1471 return LOAD_SUCCESS;
1472 }
1473
1474 #if __x86_64__
1475 static boolean_t
1476 check_if_simulator_binary(
1477 struct image_params *imgp,
1478 off_t file_offset,
1479 off_t macho_size)
1480 {
1481 struct mach_header *header;
1482 char *ip_vdata = NULL;
1483 kauth_cred_t cred = NULL;
1484 uint32_t ncmds;
1485 struct load_command *lcp;
1486 boolean_t simulator_binary = FALSE;
1487 void * addr = NULL;
1488 vm_size_t alloc_size, cmds_size;
1489 size_t offset;
1490 proc_t p = current_proc(); /* XXXX */
1491 int error;
1492 int resid = 0;
1493 size_t mach_header_sz = sizeof(struct mach_header);
1494
1495
1496 cred = kauth_cred_proc_ref(p);
1497
1498 /* Allocate page to copyin mach header */
1499 ip_vdata = kalloc(PAGE_SIZE);
1500 bzero(ip_vdata, PAGE_SIZE);
1501 if (ip_vdata == NULL) {
1502 goto bad;
1503 }
1504
1505 /* Read the Mach-O header */
1506 error = vn_rdwr(UIO_READ, imgp->ip_vp, ip_vdata,
1507 PAGE_SIZE, file_offset,
1508 UIO_SYSSPACE, (IO_UNIT | IO_NODELOCKED),
1509 cred, &resid, p);
1510 if (error) {
1511 goto bad;
1512 }
1513
1514 header = (struct mach_header *)ip_vdata;
1515
1516 if (header->magic == MH_MAGIC_64 ||
1517 header->magic == MH_CIGAM_64) {
1518 mach_header_sz = sizeof(struct mach_header_64);
1519 }
1520
1521 /* ensure header + sizeofcmds falls within the file */
1522 if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) ||
1523 (off_t)cmds_size > macho_size ||
1524 round_page_overflow(cmds_size, &alloc_size) ||
1525 alloc_size > INT_MAX) {
1526 goto bad;
1527 }
1528
1529 /*
1530 * Map the load commands into kernel memory.
1531 */
1532 addr = kalloc(alloc_size);
1533 if (addr == NULL) {
1534 goto bad;
1535 }
1536
1537 error = vn_rdwr(UIO_READ, imgp->ip_vp, addr, (int)alloc_size, file_offset,
1538 UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p);
1539 if (error) {
1540 goto bad;
1541 }
1542
1543 if (resid) {
1544 /* We must be able to read in as much as the mach_header indicated */
1545 goto bad;
1546 }
1547
1548 /*
1549 * Loop through each of the load_commands indicated by the
1550 * Mach-O header; if an absurd value is provided, we just
1551 * run off the end of the reserved section by incrementing
1552 * the offset too far, so we are implicitly fail-safe.
1553 */
1554 offset = mach_header_sz;
1555 ncmds = header->ncmds;
1556
1557 while (ncmds--) {
1558 /* ensure enough space for a minimal load command */
1559 if (offset + sizeof(struct load_command) > cmds_size) {
1560 break;
1561 }
1562
1563 /*
1564 * Get a pointer to the command.
1565 */
1566 lcp = (struct load_command *)(addr + offset);
1567
1568 /*
1569 * Perform prevalidation of the struct load_command
1570 * before we attempt to use its contents. Invalid
1571 * values are ones which result in an overflow, or
1572 * which can not possibly be valid commands, or which
1573 * straddle or exist past the reserved section at the
1574 * start of the image.
1575 */
1576 if (os_add_overflow(offset, lcp->cmdsize, &offset) ||
1577 lcp->cmdsize < sizeof(struct load_command) ||
1578 offset > cmds_size) {
1579 break;
1580 }
1581
1582 /* Check if its a simulator binary. */
1583 switch (lcp->cmd) {
1584 case LC_VERSION_MIN_WATCHOS:
1585 simulator_binary = TRUE;
1586 break;
1587
1588 case LC_BUILD_VERSION: {
1589 struct build_version_command *bvc;
1590
1591 bvc = (struct build_version_command *) lcp;
1592 if (bvc->cmdsize < sizeof(*bvc)) {
1593 /* unsafe to use this command struct if cmdsize
1594 * validated above is too small for it to fit */
1595 break;
1596 }
1597 if (bvc->platform == PLATFORM_IOSSIMULATOR ||
1598 bvc->platform == PLATFORM_WATCHOSSIMULATOR) {
1599 simulator_binary = TRUE;
1600 }
1601
1602 break;
1603 }
1604
1605 case LC_VERSION_MIN_IPHONEOS: {
1606 simulator_binary = TRUE;
1607 break;
1608 }
1609
1610 default:
1611 /* ignore other load commands */
1612 break;
1613 }
1614
1615 if (simulator_binary == TRUE) {
1616 break;
1617 }
1618 }
1619
1620 bad:
1621 if (ip_vdata) {
1622 kfree(ip_vdata, PAGE_SIZE);
1623 }
1624
1625 if (cred) {
1626 kauth_cred_unref(&cred);
1627 }
1628
1629 if (addr) {
1630 kfree(addr, alloc_size);
1631 }
1632
1633 return simulator_binary;
1634 }
1635 #endif /* __x86_64__ */
1636
1637 #if CONFIG_CODE_DECRYPTION
1638
1639 #define APPLE_UNPROTECTED_HEADER_SIZE (3 * 4096)
1640
1641 static load_return_t
1642 unprotect_dsmos_segment(
1643 uint64_t file_off,
1644 uint64_t file_size,
1645 struct vnode *vp,
1646 off_t macho_offset,
1647 vm_map_t map,
1648 vm_map_offset_t map_addr,
1649 vm_map_size_t map_size)
1650 {
1651 kern_return_t kr;
1652 uint64_t slice_off;
1653
1654 /*
1655 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
1656 * this part of a Universal binary) are not protected...
1657 * The rest needs to be "transformed".
1658 */
1659 slice_off = file_off - macho_offset;
1660 if (slice_off <= APPLE_UNPROTECTED_HEADER_SIZE &&
1661 slice_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) {
1662 /* it's all unprotected, nothing to do... */
1663 kr = KERN_SUCCESS;
1664 } else {
1665 if (slice_off <= APPLE_UNPROTECTED_HEADER_SIZE) {
1666 /*
1667 * We start mapping in the unprotected area.
1668 * Skip the unprotected part...
1669 */
1670 uint64_t delta_file;
1671 vm_map_offset_t delta_map;
1672
1673 delta_file = (uint64_t)APPLE_UNPROTECTED_HEADER_SIZE;
1674 delta_file -= slice_off;
1675 if (os_convert_overflow(delta_file, &delta_map)) {
1676 return LOAD_BADMACHO;
1677 }
1678 if (os_add_overflow(map_addr, delta_map, &map_addr)) {
1679 return LOAD_BADMACHO;
1680 }
1681 if (os_sub_overflow(map_size, delta_map, &map_size)) {
1682 return LOAD_BADMACHO;
1683 }
1684 }
1685 /* ... transform the rest of the mapping. */
1686 struct pager_crypt_info crypt_info;
1687 crypt_info.page_decrypt = dsmos_page_transform;
1688 crypt_info.crypt_ops = NULL;
1689 crypt_info.crypt_end = NULL;
1690 #pragma unused(vp, macho_offset)
1691 crypt_info.crypt_ops = (void *)0x2e69cf40;
1692 vm_map_offset_t crypto_backing_offset;
1693 crypto_backing_offset = -1; /* i.e. use map entry's offset */
1694 #if VM_MAP_DEBUG_APPLE_PROTECT
1695 if (vm_map_debug_apple_protect) {
1696 struct proc *p;
1697 p = current_proc();
1698 printf("APPLE_PROTECT: %d[%s] map %p "
1699 "[0x%llx:0x%llx] %s(%s)\n",
1700 p->p_pid, p->p_comm, map,
1701 (uint64_t) map_addr,
1702 (uint64_t) (map_addr + map_size),
1703 __FUNCTION__, vp->v_name);
1704 }
1705 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
1706
1707 /* The DSMOS pager can only be used by apple signed code */
1708 struct cs_blob * blob = csvnode_get_blob(vp, file_off);
1709 if (blob == NULL || !blob->csb_platform_binary || blob->csb_platform_path) {
1710 return LOAD_FAILURE;
1711 }
1712
1713 kr = vm_map_apple_protected(map,
1714 map_addr,
1715 map_addr + map_size,
1716 crypto_backing_offset,
1717 &crypt_info,
1718 CRYPTID_APP_ENCRYPTION);
1719 }
1720
1721 if (kr != KERN_SUCCESS) {
1722 return LOAD_FAILURE;
1723 }
1724 return LOAD_SUCCESS;
1725 }
1726 #else /* CONFIG_CODE_DECRYPTION */
1727 static load_return_t
1728 unprotect_dsmos_segment(
1729 __unused uint64_t file_off,
1730 __unused uint64_t file_size,
1731 __unused struct vnode *vp,
1732 __unused off_t macho_offset,
1733 __unused vm_map_t map,
1734 __unused vm_map_offset_t map_addr,
1735 __unused vm_map_size_t map_size)
1736 {
1737 return LOAD_SUCCESS;
1738 }
1739 #endif /* CONFIG_CODE_DECRYPTION */
1740
1741
1742 /*
1743 * map_segment:
1744 * Maps a Mach-O segment, taking care of mis-alignment (wrt the system
1745 * page size) issues.
1746 *
1747 * The mapping might result in 1, 2 or 3 map entries:
1748 * 1. for the first page, which could be overlap with the previous
1749 * mapping,
1750 * 2. for the center (if applicable),
1751 * 3. for the last page, which could overlap with the next mapping.
1752 *
1753 * For each of those map entries, we might have to interpose a
1754 * "fourk_pager" to deal with mis-alignment wrt the system page size,
1755 * either in the mapping address and/or size or the file offset and/or
1756 * size.
1757 * The "fourk_pager" itself would be mapped with proper alignment
1758 * wrt the system page size and would then be populated with the
1759 * information about the intended mapping, with a "4KB" granularity.
1760 */
1761 static kern_return_t
1762 map_segment(
1763 vm_map_t map,
1764 vm_map_offset_t vm_start,
1765 vm_map_offset_t vm_end,
1766 memory_object_control_t control,
1767 vm_map_offset_t file_start,
1768 vm_map_offset_t file_end,
1769 vm_prot_t initprot,
1770 vm_prot_t maxprot,
1771 load_result_t *result)
1772 {
1773 vm_map_offset_t cur_offset, cur_start, cur_end;
1774 kern_return_t ret;
1775 vm_map_offset_t effective_page_mask;
1776 vm_map_kernel_flags_t vmk_flags, cur_vmk_flags;
1777
1778 if (vm_end < vm_start ||
1779 file_end < file_start) {
1780 return LOAD_BADMACHO;
1781 }
1782 if (vm_end == vm_start ||
1783 file_end == file_start) {
1784 /* nothing to map... */
1785 return LOAD_SUCCESS;
1786 }
1787
1788 effective_page_mask = vm_map_page_mask(map);
1789
1790 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1791 if (vm_map_page_aligned(vm_start, effective_page_mask) &&
1792 vm_map_page_aligned(vm_end, effective_page_mask) &&
1793 vm_map_page_aligned(file_start, effective_page_mask) &&
1794 vm_map_page_aligned(file_end, effective_page_mask)) {
1795 /* all page-aligned and map-aligned: proceed */
1796 } else {
1797 #if __arm64__
1798 /* use an intermediate "4K" pager */
1799 vmk_flags.vmkf_fourk = TRUE;
1800 #else /* __arm64__ */
1801 panic("map_segment: unexpected mis-alignment "
1802 "vm[0x%llx:0x%llx] file[0x%llx:0x%llx]\n",
1803 (uint64_t) vm_start,
1804 (uint64_t) vm_end,
1805 (uint64_t) file_start,
1806 (uint64_t) file_end);
1807 #endif /* __arm64__ */
1808 }
1809
1810 cur_offset = 0;
1811 cur_start = vm_start;
1812 cur_end = vm_start;
1813 #if __arm64__
1814 if (!vm_map_page_aligned(vm_start, effective_page_mask)) {
1815 /* one 4K pager for the 1st page */
1816 cur_end = vm_map_round_page(cur_start, effective_page_mask);
1817 if (cur_end > vm_end) {
1818 cur_end = vm_start + (file_end - file_start);
1819 }
1820 if (control != MEMORY_OBJECT_CONTROL_NULL) {
1821 /* no copy-on-read for mapped binaries */
1822 vmk_flags.vmkf_no_copy_on_read = 1;
1823 ret = vm_map_enter_mem_object_control(
1824 map,
1825 &cur_start,
1826 cur_end - cur_start,
1827 (mach_vm_offset_t)0,
1828 VM_FLAGS_FIXED,
1829 vmk_flags,
1830 VM_KERN_MEMORY_NONE,
1831 control,
1832 file_start + cur_offset,
1833 TRUE, /* copy */
1834 initprot, maxprot,
1835 VM_INHERIT_DEFAULT);
1836 } else {
1837 ret = vm_map_enter_mem_object(
1838 map,
1839 &cur_start,
1840 cur_end - cur_start,
1841 (mach_vm_offset_t)0,
1842 VM_FLAGS_FIXED,
1843 vmk_flags,
1844 VM_KERN_MEMORY_NONE,
1845 IPC_PORT_NULL,
1846 0, /* offset */
1847 TRUE, /* copy */
1848 initprot, maxprot,
1849 VM_INHERIT_DEFAULT);
1850 }
1851 if (ret != KERN_SUCCESS) {
1852 return LOAD_NOSPACE;
1853 }
1854 cur_offset += cur_end - cur_start;
1855 }
1856 #endif /* __arm64__ */
1857 if (cur_end >= vm_start + (file_end - file_start)) {
1858 /* all mapped: done */
1859 goto done;
1860 }
1861 if (vm_map_round_page(cur_end, effective_page_mask) >=
1862 vm_map_trunc_page(vm_start + (file_end - file_start),
1863 effective_page_mask)) {
1864 /* no middle */
1865 } else {
1866 cur_start = cur_end;
1867 if ((vm_start & effective_page_mask) !=
1868 (file_start & effective_page_mask)) {
1869 /* one 4K pager for the middle */
1870 cur_vmk_flags = vmk_flags;
1871 } else {
1872 /* regular mapping for the middle */
1873 cur_vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1874 }
1875
1876 #if !defined(XNU_TARGET_OS_OSX)
1877 (void) result;
1878 #else /* !defined(XNU_TARGET_OS_OSX) */
1879 /*
1880 * This process doesn't have its new csflags (from
1881 * the image being loaded) yet, so tell VM to override the
1882 * current process's CS_ENFORCEMENT for this mapping.
1883 */
1884 if (result->csflags & CS_ENFORCEMENT) {
1885 cur_vmk_flags.vmkf_cs_enforcement = TRUE;
1886 } else {
1887 cur_vmk_flags.vmkf_cs_enforcement = FALSE;
1888 }
1889 cur_vmk_flags.vmkf_cs_enforcement_override = TRUE;
1890 #endif /* !defined(XNU_TARGET_OS_OSX) */
1891
1892 if (result->is_cambria && (initprot & VM_PROT_EXECUTE) == VM_PROT_EXECUTE) {
1893 cur_vmk_flags.vmkf_translated_allow_execute = TRUE;
1894 }
1895
1896 cur_end = vm_map_trunc_page(vm_start + (file_end -
1897 file_start),
1898 effective_page_mask);
1899 if (control != MEMORY_OBJECT_CONTROL_NULL) {
1900 /* no copy-on-read for mapped binaries */
1901 cur_vmk_flags.vmkf_no_copy_on_read = 1;
1902 ret = vm_map_enter_mem_object_control(
1903 map,
1904 &cur_start,
1905 cur_end - cur_start,
1906 (mach_vm_offset_t)0,
1907 VM_FLAGS_FIXED,
1908 cur_vmk_flags,
1909 VM_KERN_MEMORY_NONE,
1910 control,
1911 file_start + cur_offset,
1912 TRUE, /* copy */
1913 initprot, maxprot,
1914 VM_INHERIT_DEFAULT);
1915 } else {
1916 ret = vm_map_enter_mem_object(
1917 map,
1918 &cur_start,
1919 cur_end - cur_start,
1920 (mach_vm_offset_t)0,
1921 VM_FLAGS_FIXED,
1922 cur_vmk_flags,
1923 VM_KERN_MEMORY_NONE,
1924 IPC_PORT_NULL,
1925 0, /* offset */
1926 TRUE, /* copy */
1927 initprot, maxprot,
1928 VM_INHERIT_DEFAULT);
1929 }
1930 if (ret != KERN_SUCCESS) {
1931 return LOAD_NOSPACE;
1932 }
1933 cur_offset += cur_end - cur_start;
1934 }
1935 if (cur_end >= vm_start + (file_end - file_start)) {
1936 /* all mapped: done */
1937 goto done;
1938 }
1939 cur_start = cur_end;
1940 #if __arm64__
1941 if (!vm_map_page_aligned(vm_start + (file_end - file_start),
1942 effective_page_mask)) {
1943 /* one 4K pager for the last page */
1944 cur_end = vm_start + (file_end - file_start);
1945 if (control != MEMORY_OBJECT_CONTROL_NULL) {
1946 /* no copy-on-read for mapped binaries */
1947 vmk_flags.vmkf_no_copy_on_read = 1;
1948 ret = vm_map_enter_mem_object_control(
1949 map,
1950 &cur_start,
1951 cur_end - cur_start,
1952 (mach_vm_offset_t)0,
1953 VM_FLAGS_FIXED,
1954 vmk_flags,
1955 VM_KERN_MEMORY_NONE,
1956 control,
1957 file_start + cur_offset,
1958 TRUE, /* copy */
1959 initprot, maxprot,
1960 VM_INHERIT_DEFAULT);
1961 } else {
1962 ret = vm_map_enter_mem_object(
1963 map,
1964 &cur_start,
1965 cur_end - cur_start,
1966 (mach_vm_offset_t)0,
1967 VM_FLAGS_FIXED,
1968 vmk_flags,
1969 VM_KERN_MEMORY_NONE,
1970 IPC_PORT_NULL,
1971 0, /* offset */
1972 TRUE, /* copy */
1973 initprot, maxprot,
1974 VM_INHERIT_DEFAULT);
1975 }
1976 if (ret != KERN_SUCCESS) {
1977 return LOAD_NOSPACE;
1978 }
1979 cur_offset += cur_end - cur_start;
1980 }
1981 #endif /* __arm64__ */
1982 done:
1983 assert(cur_end >= vm_start + (file_end - file_start));
1984 return LOAD_SUCCESS;
1985 }
1986
1987 static
1988 load_return_t
1989 load_segment(
1990 struct load_command *lcp,
1991 uint32_t filetype,
1992 void * control,
1993 off_t pager_offset,
1994 off_t macho_size,
1995 struct vnode *vp,
1996 vm_map_t map,
1997 int64_t slide,
1998 load_result_t *result,
1999 struct image_params *imgp)
2000 {
2001 struct segment_command_64 segment_command, *scp;
2002 kern_return_t ret;
2003 vm_map_size_t delta_size;
2004 vm_prot_t initprot;
2005 vm_prot_t maxprot;
2006 size_t segment_command_size, total_section_size,
2007 single_section_size;
2008 uint64_t file_offset, file_size;
2009 vm_map_offset_t vm_offset;
2010 size_t vm_size;
2011 vm_map_offset_t vm_start, vm_end, vm_end_aligned;
2012 vm_map_offset_t file_start, file_end;
2013 kern_return_t kr;
2014 boolean_t verbose;
2015 vm_map_size_t effective_page_size;
2016 vm_map_offset_t effective_page_mask;
2017 #if __arm64__
2018 vm_map_kernel_flags_t vmk_flags;
2019 boolean_t fourk_align;
2020 #endif /* __arm64__ */
2021
2022 (void)imgp;
2023
2024 effective_page_size = vm_map_page_size(map);
2025 effective_page_mask = vm_map_page_mask(map);
2026
2027 verbose = FALSE;
2028 if (LC_SEGMENT_64 == lcp->cmd) {
2029 segment_command_size = sizeof(struct segment_command_64);
2030 single_section_size = sizeof(struct section_64);
2031 #if __arm64__
2032 /* 64-bit binary: should already be 16K-aligned */
2033 fourk_align = FALSE;
2034
2035 if (vm_map_page_shift(map) == FOURK_PAGE_SHIFT &&
2036 PAGE_SHIFT != FOURK_PAGE_SHIFT) {
2037 fourk_align = TRUE;
2038 verbose = TRUE;
2039 }
2040 #endif /* __arm64__ */
2041 } else {
2042 segment_command_size = sizeof(struct segment_command);
2043 single_section_size = sizeof(struct section);
2044 #if __arm64__
2045 /* 32-bit binary: might need 4K-alignment */
2046 if (effective_page_size != FOURK_PAGE_SIZE) {
2047 /* not using 4K page size: need fourk_pager */
2048 fourk_align = TRUE;
2049 verbose = TRUE;
2050 } else {
2051 /* using 4K page size: no need for re-alignment */
2052 fourk_align = FALSE;
2053 }
2054 #endif /* __arm64__ */
2055 }
2056 if (lcp->cmdsize < segment_command_size) {
2057 DEBUG4K_ERROR("LOAD_BADMACHO cmdsize %d < %zu\n", lcp->cmdsize, segment_command_size);
2058 return LOAD_BADMACHO;
2059 }
2060 total_section_size = lcp->cmdsize - segment_command_size;
2061
2062 if (LC_SEGMENT_64 == lcp->cmd) {
2063 scp = (struct segment_command_64 *)lcp;
2064 } else {
2065 scp = &segment_command;
2066 widen_segment_command((struct segment_command *)lcp, scp);
2067 }
2068
2069 if (verbose) {
2070 MACHO_PRINTF(("+++ load_segment %s "
2071 "vm[0x%llx:0x%llx] file[0x%llx:0x%llx] "
2072 "prot %d/%d flags 0x%x\n",
2073 scp->segname,
2074 (uint64_t)(slide + scp->vmaddr),
2075 (uint64_t)(slide + scp->vmaddr + scp->vmsize),
2076 pager_offset + scp->fileoff,
2077 pager_offset + scp->fileoff + scp->filesize,
2078 scp->initprot,
2079 scp->maxprot,
2080 scp->flags));
2081 }
2082
2083 /*
2084 * Make sure what we get from the file is really ours (as specified
2085 * by macho_size).
2086 */
2087 if (scp->fileoff + scp->filesize < scp->fileoff ||
2088 scp->fileoff + scp->filesize > (uint64_t)macho_size) {
2089 DEBUG4K_ERROR("LOAD_BADMACHO fileoff 0x%llx filesize 0x%llx macho_size 0x%llx\n", scp->fileoff, scp->filesize, (uint64_t)macho_size);
2090 return LOAD_BADMACHO;
2091 }
2092 /*
2093 * Ensure that the number of sections specified would fit
2094 * within the load command size.
2095 */
2096 if (total_section_size / single_section_size < scp->nsects) {
2097 DEBUG4K_ERROR("LOAD_BADMACHO 0x%zx 0x%zx %d\n", total_section_size, single_section_size, scp->nsects);
2098 return LOAD_BADMACHO;
2099 }
2100 /*
2101 * Make sure the segment is page-aligned in the file.
2102 */
2103 if (os_add_overflow(pager_offset, scp->fileoff, &file_offset)) {
2104 DEBUG4K_ERROR("LOAD_BADMACHO file_offset: 0x%llx + 0x%llx\n", pager_offset, scp->fileoff);
2105 return LOAD_BADMACHO;
2106 }
2107 file_size = scp->filesize;
2108 #if __arm64__
2109 if (fourk_align) {
2110 if ((file_offset & FOURK_PAGE_MASK) != 0) {
2111 /*
2112 * we can't mmap() it if it's not at least 4KB-aligned
2113 * in the file
2114 */
2115 DEBUG4K_ERROR("LOAD_BADMACHO file_offset 0x%llx\n", file_offset);
2116 return LOAD_BADMACHO;
2117 }
2118 } else
2119 #endif /* __arm64__ */
2120 if ((file_offset & PAGE_MASK_64) != 0 ||
2121 /* we can't mmap() it if it's not page-aligned in the file */
2122 (file_offset & vm_map_page_mask(map)) != 0) {
2123 /*
2124 * The 1st test would have failed if the system's page size
2125 * was what this process believe is the page size, so let's
2126 * fail here too for the sake of consistency.
2127 */
2128 DEBUG4K_ERROR("LOAD_BADMACHO file_offset 0x%llx\n", file_offset);
2129 return LOAD_BADMACHO;
2130 }
2131
2132 /*
2133 * If we have a code signature attached for this slice
2134 * require that the segments are within the signed part
2135 * of the file.
2136 */
2137 if (result->cs_end_offset &&
2138 result->cs_end_offset < (off_t)scp->fileoff &&
2139 result->cs_end_offset - scp->fileoff < scp->filesize) {
2140 if (cs_debug) {
2141 printf("section outside code signature\n");
2142 }
2143 DEBUG4K_ERROR("LOAD_BADMACHO end_offset 0x%llx fileoff 0x%llx filesize 0x%llx\n", result->cs_end_offset, scp->fileoff, scp->filesize);
2144 return LOAD_BADMACHO;
2145 }
2146
2147 if (os_add_overflow(scp->vmaddr, slide, &vm_offset)) {
2148 if (cs_debug) {
2149 printf("vmaddr too large\n");
2150 }
2151 DEBUG4K_ERROR("LOAD_BADMACHO vmaddr 0x%llx slide 0x%llx vm_offset 0x%llx\n", scp->vmaddr, slide, (uint64_t)vm_offset);
2152 return LOAD_BADMACHO;
2153 }
2154
2155 if (scp->vmsize > SIZE_MAX) {
2156 DEBUG4K_ERROR("LOAD_BADMACHO vmsize 0x%llx\n", scp->vmsize);
2157 return LOAD_BADMACHO;
2158 }
2159
2160 vm_size = (size_t)scp->vmsize;
2161
2162 if (vm_size == 0) {
2163 return LOAD_SUCCESS;
2164 }
2165 if (scp->vmaddr == 0 &&
2166 file_size == 0 &&
2167 vm_size != 0 &&
2168 (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
2169 (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
2170 if (map == VM_MAP_NULL) {
2171 return LOAD_SUCCESS;
2172 }
2173
2174 /*
2175 * For PIE, extend page zero rather than moving it. Extending
2176 * page zero keeps early allocations from falling predictably
2177 * between the end of page zero and the beginning of the first
2178 * slid segment.
2179 */
2180 /*
2181 * This is a "page zero" segment: it starts at address 0,
2182 * is not mapped from the binary file and is not accessible.
2183 * User-space should never be able to access that memory, so
2184 * make it completely off limits by raising the VM map's
2185 * minimum offset.
2186 */
2187 vm_end = (vm_map_offset_t)(vm_offset + vm_size);
2188 if (vm_end < vm_offset) {
2189 DEBUG4K_ERROR("LOAD_BADMACHO vm_end 0x%llx vm_offset 0x%llx vm_size 0x%llx\n", (uint64_t)vm_end, (uint64_t)vm_offset, (uint64_t)vm_size);
2190 return LOAD_BADMACHO;
2191 }
2192
2193 if (verbose) {
2194 MACHO_PRINTF(("++++++ load_segment: "
2195 "page_zero up to 0x%llx\n",
2196 (uint64_t) vm_end));
2197 }
2198 #if __arm64__
2199 if (fourk_align) {
2200 /* raise min_offset as much as page-alignment allows */
2201 vm_end_aligned = vm_map_trunc_page(vm_end,
2202 effective_page_mask);
2203 } else
2204 #endif /* __arm64__ */
2205 {
2206 vm_end = vm_map_round_page(vm_end,
2207 PAGE_MASK_64);
2208 vm_end_aligned = vm_end;
2209 }
2210 ret = vm_map_raise_min_offset(map,
2211 vm_end_aligned);
2212 #if __arm64__
2213 if (ret == 0 &&
2214 vm_end > vm_end_aligned) {
2215 /* use fourk_pager to map the rest of pagezero */
2216 assert(fourk_align);
2217 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
2218 vmk_flags.vmkf_fourk = TRUE;
2219 ret = vm_map_enter_mem_object(
2220 map,
2221 &vm_end_aligned,
2222 vm_end - vm_end_aligned,
2223 (mach_vm_offset_t) 0, /* mask */
2224 VM_FLAGS_FIXED,
2225 vmk_flags,
2226 VM_KERN_MEMORY_NONE,
2227 IPC_PORT_NULL,
2228 0,
2229 FALSE, /* copy */
2230 (scp->initprot & VM_PROT_ALL),
2231 (scp->maxprot & VM_PROT_ALL),
2232 VM_INHERIT_DEFAULT);
2233 }
2234 #endif /* __arm64__ */
2235
2236 if (ret != KERN_SUCCESS) {
2237 DEBUG4K_ERROR("LOAD_FAILURE ret 0x%x\n", ret);
2238 return LOAD_FAILURE;
2239 }
2240 return LOAD_SUCCESS;
2241 } else {
2242 #if !defined(XNU_TARGET_OS_OSX)
2243 /* not PAGEZERO: should not be mapped at address 0 */
2244 if (filetype != MH_DYLINKER && scp->vmaddr == 0) {
2245 DEBUG4K_ERROR("LOAD_BADMACHO filetype %d vmaddr 0x%llx\n", filetype, scp->vmaddr);
2246 return LOAD_BADMACHO;
2247 }
2248 #endif /* !defined(XNU_TARGET_OS_OSX) */
2249 }
2250
2251 #if __arm64__
2252 if (fourk_align) {
2253 /* 4K-align */
2254 file_start = vm_map_trunc_page(file_offset,
2255 FOURK_PAGE_MASK);
2256 file_end = vm_map_round_page(file_offset + file_size,
2257 FOURK_PAGE_MASK);
2258 vm_start = vm_map_trunc_page(vm_offset,
2259 FOURK_PAGE_MASK);
2260 vm_end = vm_map_round_page(vm_offset + vm_size,
2261 FOURK_PAGE_MASK);
2262
2263 if (file_offset - file_start > FOURK_PAGE_MASK ||
2264 file_end - file_offset - file_size > FOURK_PAGE_MASK) {
2265 DEBUG4K_ERROR("LOAD_BADMACHO file_start / file_size wrap "
2266 "[0x%llx:0x%llx] -> [0x%llx:0x%llx]\n",
2267 file_offset,
2268 file_offset + file_size,
2269 (uint64_t) file_start,
2270 (uint64_t) file_end);
2271 return LOAD_BADMACHO;
2272 }
2273
2274 if (!strncmp(scp->segname, "__LINKEDIT", 11) &&
2275 page_aligned(file_start) &&
2276 vm_map_page_aligned(file_start, vm_map_page_mask(map)) &&
2277 page_aligned(vm_start) &&
2278 vm_map_page_aligned(vm_start, vm_map_page_mask(map))) {
2279 /* XXX last segment: ignore mis-aligned tail */
2280 file_end = vm_map_round_page(file_end,
2281 effective_page_mask);
2282 vm_end = vm_map_round_page(vm_end,
2283 effective_page_mask);
2284 }
2285 } else
2286 #endif /* __arm64__ */
2287 {
2288 file_start = vm_map_trunc_page(file_offset,
2289 effective_page_mask);
2290 file_end = vm_map_round_page(file_offset + file_size,
2291 effective_page_mask);
2292 vm_start = vm_map_trunc_page(vm_offset,
2293 effective_page_mask);
2294 vm_end = vm_map_round_page(vm_offset + vm_size,
2295 effective_page_mask);
2296
2297 if (file_offset - file_start > effective_page_mask ||
2298 file_end - file_offset - file_size > effective_page_mask) {
2299 DEBUG4K_ERROR("LOAD_BADMACHO file_start / file_size wrap "
2300 "[0x%llx:0x%llx] -> [0x%llx:0x%llx]\n",
2301 file_offset,
2302 file_offset + file_size,
2303 (uint64_t) file_start,
2304 (uint64_t) file_end);
2305 return LOAD_BADMACHO;
2306 }
2307 }
2308
2309 if (vm_start < result->min_vm_addr) {
2310 result->min_vm_addr = vm_start;
2311 }
2312 if (vm_end > result->max_vm_addr) {
2313 result->max_vm_addr = vm_end;
2314 }
2315
2316 if (map == VM_MAP_NULL) {
2317 return LOAD_SUCCESS;
2318 }
2319
2320 if (vm_size > 0) {
2321 initprot = (scp->initprot) & VM_PROT_ALL;
2322 maxprot = (scp->maxprot) & VM_PROT_ALL;
2323 /*
2324 * Map a copy of the file into the address space.
2325 */
2326 if (verbose) {
2327 MACHO_PRINTF(("++++++ load_segment: "
2328 "mapping at vm [0x%llx:0x%llx] of "
2329 "file [0x%llx:0x%llx]\n",
2330 (uint64_t) vm_start,
2331 (uint64_t) vm_end,
2332 (uint64_t) file_start,
2333 (uint64_t) file_end));
2334 }
2335 ret = map_segment(map,
2336 vm_start,
2337 vm_end,
2338 control,
2339 file_start,
2340 file_end,
2341 initprot,
2342 maxprot,
2343 result);
2344 if (ret) {
2345 DEBUG4K_ERROR("LOAD_NOSPACE start 0x%llx end 0x%llx ret 0x%x\n", (uint64_t)vm_start, (uint64_t)vm_end, ret);
2346 return LOAD_NOSPACE;
2347 }
2348
2349 #if FIXME
2350 /*
2351 * If the file didn't end on a page boundary,
2352 * we need to zero the leftover.
2353 */
2354 delta_size = map_size - scp->filesize;
2355 if (delta_size > 0) {
2356 mach_vm_offset_t tmp;
2357
2358 ret = mach_vm_allocate_kernel(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_BSD);
2359 if (ret != KERN_SUCCESS) {
2360 DEBUG4K_ERROR("LOAD_RESOURCE delta_size 0x%llx ret 0x%x\n", delta_size, ret);
2361 return LOAD_RESOURCE;
2362 }
2363
2364 if (copyout(tmp, map_addr + scp->filesize,
2365 delta_size)) {
2366 (void) mach_vm_deallocate(
2367 kernel_map, tmp, delta_size);
2368 DEBUG4K_ERROR("LOAD_FAILURE copyout 0x%llx 0x%llx\n", map_addr + scp->filesize, delta_size);
2369 return LOAD_FAILURE;
2370 }
2371
2372 (void) mach_vm_deallocate(kernel_map, tmp, delta_size);
2373 }
2374 #endif /* FIXME */
2375 }
2376
2377 /*
2378 * If the virtual size of the segment is greater
2379 * than the size from the file, we need to allocate
2380 * zero fill memory for the rest.
2381 */
2382 if ((vm_end - vm_start) > (file_end - file_start)) {
2383 delta_size = (vm_end - vm_start) - (file_end - file_start);
2384 } else {
2385 delta_size = 0;
2386 }
2387 if (delta_size > 0) {
2388 vm_map_offset_t tmp_start;
2389 vm_map_offset_t tmp_end;
2390
2391 if (os_add_overflow(vm_start, file_end - file_start, &tmp_start)) {
2392 DEBUG4K_ERROR("LOAD_NOSPACE tmp_start: 0x%llx + 0x%llx\n", (uint64_t)vm_start, (uint64_t)(file_end - file_start));
2393 return LOAD_NOSPACE;
2394 }
2395
2396 if (os_add_overflow(tmp_start, delta_size, &tmp_end)) {
2397 DEBUG4K_ERROR("LOAD_NOSPACE tmp_end: 0x%llx + 0x%llx\n", (uint64_t)tmp_start, (uint64_t)delta_size);
2398 return LOAD_NOSPACE;
2399 }
2400
2401 if (verbose) {
2402 MACHO_PRINTF(("++++++ load_segment: "
2403 "delta mapping vm [0x%llx:0x%llx]\n",
2404 (uint64_t) tmp_start,
2405 (uint64_t) tmp_end));
2406 }
2407 kr = map_segment(map,
2408 tmp_start,
2409 tmp_end,
2410 MEMORY_OBJECT_CONTROL_NULL,
2411 0,
2412 delta_size,
2413 scp->initprot,
2414 scp->maxprot,
2415 result);
2416 if (kr != KERN_SUCCESS) {
2417 DEBUG4K_ERROR("LOAD_NOSPACE 0x%llx 0x%llx kr 0x%x\n", (unsigned long long)tmp_start, (uint64_t)delta_size, kr);
2418 return LOAD_NOSPACE;
2419 }
2420 }
2421
2422 if ((scp->fileoff == 0) && (scp->filesize != 0)) {
2423 result->mach_header = vm_offset;
2424 }
2425
2426 if (scp->flags & SG_PROTECTED_VERSION_1) {
2427 ret = unprotect_dsmos_segment(file_start,
2428 file_end - file_start,
2429 vp,
2430 pager_offset,
2431 map,
2432 vm_start,
2433 vm_end - vm_start);
2434 if (ret != LOAD_SUCCESS) {
2435 DEBUG4K_ERROR("unprotect 0x%llx 0x%llx ret %d \n", (uint64_t)vm_start, (uint64_t)vm_end, ret);
2436 return ret;
2437 }
2438 } else {
2439 ret = LOAD_SUCCESS;
2440 }
2441
2442 if (LOAD_SUCCESS == ret &&
2443 filetype == MH_DYLINKER &&
2444 result->all_image_info_addr == MACH_VM_MIN_ADDRESS) {
2445 note_all_image_info_section(scp,
2446 LC_SEGMENT_64 == lcp->cmd,
2447 single_section_size,
2448 ((const char *)lcp +
2449 segment_command_size),
2450 slide,
2451 result);
2452 }
2453
2454 if (result->entry_point != MACH_VM_MIN_ADDRESS) {
2455 if ((result->entry_point >= vm_offset) && (result->entry_point < (vm_offset + vm_size))) {
2456 if ((scp->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) == (VM_PROT_READ | VM_PROT_EXECUTE)) {
2457 result->validentry = 1;
2458 } else {
2459 /* right range but wrong protections, unset if previously validated */
2460 result->validentry = 0;
2461 }
2462 }
2463 }
2464
2465 if (ret != LOAD_SUCCESS && verbose) {
2466 DEBUG4K_ERROR("ret %d\n", ret);
2467 }
2468 return ret;
2469 }
2470
2471 static
2472 load_return_t
2473 load_uuid(
2474 struct uuid_command *uulp,
2475 char *command_end,
2476 load_result_t *result
2477 )
2478 {
2479 /*
2480 * We need to check the following for this command:
2481 * - The command size should be atleast the size of struct uuid_command
2482 * - The UUID part of the command should be completely within the mach-o header
2483 */
2484
2485 if ((uulp->cmdsize < sizeof(struct uuid_command)) ||
2486 (((char *)uulp + sizeof(struct uuid_command)) > command_end)) {
2487 return LOAD_BADMACHO;
2488 }
2489
2490 memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid));
2491 return LOAD_SUCCESS;
2492 }
2493
2494 static
2495 load_return_t
2496 load_version(
2497 struct version_min_command *vmc,
2498 boolean_t *found_version_cmd,
2499 int ip_flags __unused,
2500 load_result_t *result
2501 )
2502 {
2503 uint32_t platform = 0;
2504 uint32_t sdk;
2505
2506 if (vmc->cmdsize < sizeof(*vmc)) {
2507 return LOAD_BADMACHO;
2508 }
2509 if (*found_version_cmd == TRUE) {
2510 return LOAD_BADMACHO;
2511 }
2512 *found_version_cmd = TRUE;
2513 sdk = vmc->sdk;
2514 switch (vmc->cmd) {
2515 case LC_VERSION_MIN_MACOSX:
2516 platform = PLATFORM_MACOS;
2517 break;
2518 #if __x86_64__ /* __x86_64__ */
2519 case LC_VERSION_MIN_IPHONEOS:
2520 platform = PLATFORM_IOSSIMULATOR;
2521 break;
2522 case LC_VERSION_MIN_WATCHOS:
2523 platform = PLATFORM_WATCHOSSIMULATOR;
2524 break;
2525 case LC_VERSION_MIN_TVOS:
2526 platform = PLATFORM_TVOSSIMULATOR;
2527 break;
2528 #else
2529 case LC_VERSION_MIN_IPHONEOS: {
2530 #if __arm64__
2531 extern int legacy_footprint_entitlement_mode;
2532 if (vmc->sdk < (12 << 16)) {
2533 /* app built with a pre-iOS12 SDK: apply legacy footprint mitigation */
2534 result->legacy_footprint = TRUE;
2535 }
2536 #endif /* __arm64__ */
2537 platform = PLATFORM_IOS;
2538 break;
2539 }
2540 case LC_VERSION_MIN_WATCHOS:
2541 platform = PLATFORM_WATCHOS;
2542 break;
2543 case LC_VERSION_MIN_TVOS:
2544 platform = PLATFORM_TVOS;
2545 break;
2546 #endif /* __x86_64__ */
2547 /* All LC_VERSION_MIN_* load commands are legacy and we will not be adding any more */
2548 default:
2549 sdk = (uint32_t)-1;
2550 __builtin_unreachable();
2551 }
2552 result->ip_platform = platform;
2553 result->lr_min_sdk = sdk;
2554 return LOAD_SUCCESS;
2555 }
2556
2557 static
2558 load_return_t
2559 load_main(
2560 struct entry_point_command *epc,
2561 thread_t thread,
2562 int64_t slide,
2563 load_result_t *result
2564 )
2565 {
2566 mach_vm_offset_t addr;
2567 kern_return_t ret;
2568
2569 if (epc->cmdsize < sizeof(*epc)) {
2570 return LOAD_BADMACHO;
2571 }
2572 if (result->thread_count != 0) {
2573 return LOAD_FAILURE;
2574 }
2575
2576 if (thread == THREAD_NULL) {
2577 return LOAD_SUCCESS;
2578 }
2579
2580 /*
2581 * LC_MAIN specifies stack size but not location.
2582 * Add guard page to allocation size (MAXSSIZ includes guard page).
2583 */
2584 if (epc->stacksize) {
2585 if (os_add_overflow(epc->stacksize, 4 * PAGE_SIZE, &result->user_stack_size)) {
2586 /*
2587 * We are going to immediately throw away this result, but we want
2588 * to make sure we aren't loading a dangerously close to
2589 * overflowing value, since this will have a guard page added to it
2590 * and be rounded to page boundaries
2591 */
2592 return LOAD_BADMACHO;
2593 }
2594 result->user_stack_size = epc->stacksize;
2595 if (os_add_overflow(epc->stacksize, PAGE_SIZE, &result->user_stack_alloc_size)) {
2596 return LOAD_BADMACHO;
2597 }
2598 result->custom_stack = TRUE;
2599 } else {
2600 result->user_stack_alloc_size = MAXSSIZ;
2601 }
2602
2603 /* use default location for stack */
2604 ret = thread_userstackdefault(&addr, result->is_64bit_addr);
2605 if (ret != KERN_SUCCESS) {
2606 return LOAD_FAILURE;
2607 }
2608
2609 /* The stack slides down from the default location */
2610 result->user_stack = (user_addr_t)mach_vm_trunc_page((user_addr_t)addr - slide);
2611
2612 if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2613 /* Already processed LC_MAIN or LC_UNIXTHREAD */
2614 return LOAD_FAILURE;
2615 }
2616
2617 /* kernel does *not* use entryoff from LC_MAIN. Dyld uses it. */
2618 result->needs_dynlinker = TRUE;
2619 result->using_lcmain = TRUE;
2620
2621 ret = thread_state_initialize( thread );
2622 if (ret != KERN_SUCCESS) {
2623 return LOAD_FAILURE;
2624 }
2625
2626 result->unixproc = TRUE;
2627 result->thread_count++;
2628
2629 return LOAD_SUCCESS;
2630 }
2631
2632 static
2633 load_return_t
2634 setup_driver_main(
2635 thread_t thread,
2636 int64_t slide,
2637 load_result_t *result
2638 )
2639 {
2640 mach_vm_offset_t addr;
2641 kern_return_t ret;
2642
2643 /* Driver binaries have no LC_MAIN, use defaults */
2644
2645 if (thread == THREAD_NULL) {
2646 return LOAD_SUCCESS;
2647 }
2648
2649 result->user_stack_alloc_size = MAXSSIZ;
2650
2651 /* use default location for stack */
2652 ret = thread_userstackdefault(&addr, result->is_64bit_addr);
2653 if (ret != KERN_SUCCESS) {
2654 return LOAD_FAILURE;
2655 }
2656
2657 /* The stack slides down from the default location */
2658 result->user_stack = (user_addr_t)addr;
2659 result->user_stack -= slide;
2660
2661 if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2662 /* Already processed LC_MAIN or LC_UNIXTHREAD */
2663 return LOAD_FAILURE;
2664 }
2665
2666 result->needs_dynlinker = TRUE;
2667
2668 ret = thread_state_initialize( thread );
2669 if (ret != KERN_SUCCESS) {
2670 return LOAD_FAILURE;
2671 }
2672
2673 result->unixproc = TRUE;
2674 result->thread_count++;
2675
2676 return LOAD_SUCCESS;
2677 }
2678
2679 static
2680 load_return_t
2681 load_unixthread(
2682 struct thread_command *tcp,
2683 thread_t thread,
2684 int64_t slide,
2685 boolean_t is_x86_64_compat_binary,
2686 load_result_t *result
2687 )
2688 {
2689 load_return_t ret;
2690 int customstack = 0;
2691 mach_vm_offset_t addr;
2692 if (tcp->cmdsize < sizeof(*tcp)) {
2693 return LOAD_BADMACHO;
2694 }
2695 if (result->thread_count != 0) {
2696 return LOAD_FAILURE;
2697 }
2698
2699 if (thread == THREAD_NULL) {
2700 return LOAD_SUCCESS;
2701 }
2702
2703 ret = load_threadstack(thread,
2704 (uint32_t *)(((vm_offset_t)tcp) +
2705 sizeof(struct thread_command)),
2706 tcp->cmdsize - sizeof(struct thread_command),
2707 &addr, &customstack, is_x86_64_compat_binary, result);
2708 if (ret != LOAD_SUCCESS) {
2709 return ret;
2710 }
2711
2712 /* LC_UNIXTHREAD optionally specifies stack size and location */
2713
2714 if (customstack) {
2715 result->custom_stack = TRUE;
2716 } else {
2717 result->user_stack_alloc_size = MAXSSIZ;
2718 }
2719
2720 /* The stack slides down from the default location */
2721 result->user_stack = (user_addr_t)mach_vm_trunc_page((user_addr_t)addr - slide);
2722
2723 {
2724 ret = load_threadentry(thread,
2725 (uint32_t *)(((vm_offset_t)tcp) +
2726 sizeof(struct thread_command)),
2727 tcp->cmdsize - sizeof(struct thread_command),
2728 &addr);
2729 if (ret != LOAD_SUCCESS) {
2730 return ret;
2731 }
2732
2733 if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2734 /* Already processed LC_MAIN or LC_UNIXTHREAD */
2735 return LOAD_FAILURE;
2736 }
2737
2738 result->entry_point = (user_addr_t)addr;
2739 result->entry_point += slide;
2740
2741 ret = load_threadstate(thread,
2742 (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)),
2743 tcp->cmdsize - sizeof(struct thread_command),
2744 result);
2745 if (ret != LOAD_SUCCESS) {
2746 return ret;
2747 }
2748 }
2749
2750 result->unixproc = TRUE;
2751 result->thread_count++;
2752
2753 return LOAD_SUCCESS;
2754 }
2755
2756 static
2757 load_return_t
2758 load_threadstate(
2759 thread_t thread,
2760 uint32_t *ts,
2761 uint32_t total_size,
2762 load_result_t *result
2763 )
2764 {
2765 uint32_t size;
2766 int flavor;
2767 uint32_t thread_size;
2768 uint32_t *local_ts = NULL;
2769 uint32_t local_ts_size = 0;
2770 int ret;
2771
2772 (void)thread;
2773
2774 if (total_size > 0) {
2775 local_ts_size = total_size;
2776 local_ts = kalloc(local_ts_size);
2777 if (local_ts == NULL) {
2778 return LOAD_FAILURE;
2779 }
2780 memcpy(local_ts, ts, local_ts_size);
2781 ts = local_ts;
2782 }
2783
2784 /*
2785 * Validate the new thread state; iterate through the state flavors in
2786 * the Mach-O file.
2787 * XXX: we should validate the machine state here, to avoid failing at
2788 * activation time where we can't bail out cleanly.
2789 */
2790 while (total_size > 0) {
2791 if (total_size < 2 * sizeof(uint32_t)) {
2792 return LOAD_BADMACHO;
2793 }
2794
2795 flavor = *ts++;
2796 size = *ts++;
2797
2798 if (os_add_and_mul_overflow(size, 2, sizeof(uint32_t), &thread_size) ||
2799 os_sub_overflow(total_size, thread_size, &total_size)) {
2800 ret = LOAD_BADMACHO;
2801 goto bad;
2802 }
2803
2804 ts += size; /* ts is a (uint32_t *) */
2805 }
2806
2807 result->threadstate = local_ts;
2808 result->threadstate_sz = local_ts_size;
2809 return LOAD_SUCCESS;
2810
2811 bad:
2812 if (local_ts) {
2813 kfree(local_ts, local_ts_size);
2814 }
2815 return ret;
2816 }
2817
2818
2819 static
2820 load_return_t
2821 load_threadstack(
2822 thread_t thread,
2823 uint32_t *ts,
2824 uint32_t total_size,
2825 mach_vm_offset_t *user_stack,
2826 int *customstack,
2827 __unused boolean_t is_x86_64_compat_binary,
2828 load_result_t *result
2829 )
2830 {
2831 kern_return_t ret;
2832 uint32_t size;
2833 int flavor;
2834 uint32_t stack_size;
2835
2836 if (total_size == 0) {
2837 return LOAD_BADMACHO;
2838 }
2839
2840 while (total_size > 0) {
2841 if (total_size < 2 * sizeof(uint32_t)) {
2842 return LOAD_BADMACHO;
2843 }
2844
2845 flavor = *ts++;
2846 size = *ts++;
2847 if (UINT32_MAX - 2 < size ||
2848 UINT32_MAX / sizeof(uint32_t) < size + 2) {
2849 return LOAD_BADMACHO;
2850 }
2851 stack_size = (size + 2) * sizeof(uint32_t);
2852 if (stack_size > total_size) {
2853 return LOAD_BADMACHO;
2854 }
2855 total_size -= stack_size;
2856
2857 /*
2858 * Third argument is a kernel space pointer; it gets cast
2859 * to the appropriate type in thread_userstack() based on
2860 * the value of flavor.
2861 */
2862 {
2863 ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is_64bit_data);
2864 if (ret != KERN_SUCCESS) {
2865 return LOAD_FAILURE;
2866 }
2867 }
2868
2869 ts += size; /* ts is a (uint32_t *) */
2870 }
2871 return LOAD_SUCCESS;
2872 }
2873
2874 static
2875 load_return_t
2876 load_threadentry(
2877 thread_t thread,
2878 uint32_t *ts,
2879 uint32_t total_size,
2880 mach_vm_offset_t *entry_point
2881 )
2882 {
2883 kern_return_t ret;
2884 uint32_t size;
2885 int flavor;
2886 uint32_t entry_size;
2887
2888 /*
2889 * Set the thread state.
2890 */
2891 *entry_point = MACH_VM_MIN_ADDRESS;
2892 while (total_size > 0) {
2893 if (total_size < 2 * sizeof(uint32_t)) {
2894 return LOAD_BADMACHO;
2895 }
2896
2897 flavor = *ts++;
2898 size = *ts++;
2899 if (UINT32_MAX - 2 < size ||
2900 UINT32_MAX / sizeof(uint32_t) < size + 2) {
2901 return LOAD_BADMACHO;
2902 }
2903 entry_size = (size + 2) * sizeof(uint32_t);
2904 if (entry_size > total_size) {
2905 return LOAD_BADMACHO;
2906 }
2907 total_size -= entry_size;
2908 /*
2909 * Third argument is a kernel space pointer; it gets cast
2910 * to the appropriate type in thread_entrypoint() based on
2911 * the value of flavor.
2912 */
2913 ret = thread_entrypoint(thread, flavor, (thread_state_t)ts, size, entry_point);
2914 if (ret != KERN_SUCCESS) {
2915 return LOAD_FAILURE;
2916 }
2917 ts += size; /* ts is a (uint32_t *) */
2918 }
2919 return LOAD_SUCCESS;
2920 }
2921
2922 struct macho_data {
2923 struct nameidata __nid;
2924 union macho_vnode_header {
2925 struct mach_header mach_header;
2926 struct fat_header fat_header;
2927 char __pad[512];
2928 } __header;
2929 };
2930
2931 #define DEFAULT_DYLD_PATH "/usr/lib/dyld"
2932
2933 #if (DEVELOPMENT || DEBUG)
2934 extern char dyld_alt_path[];
2935 extern int use_alt_dyld;
2936 #endif
2937
2938 static load_return_t
2939 load_dylinker(
2940 struct dylinker_command *lcp,
2941 cpu_type_t cputype,
2942 vm_map_t map,
2943 thread_t thread,
2944 int depth,
2945 int64_t slide,
2946 load_result_t *result,
2947 struct image_params *imgp
2948 )
2949 {
2950 const char *name;
2951 struct vnode *vp = NULLVP; /* set by get_macho_vnode() */
2952 struct mach_header *header;
2953 off_t file_offset = 0; /* set by get_macho_vnode() */
2954 off_t macho_size = 0; /* set by get_macho_vnode() */
2955 load_result_t *myresult;
2956 kern_return_t ret;
2957 struct macho_data *macho_data;
2958 struct {
2959 struct mach_header __header;
2960 load_result_t __myresult;
2961 struct macho_data __macho_data;
2962 } *dyld_data;
2963
2964 if (lcp->cmdsize < sizeof(*lcp) || lcp->name.offset >= lcp->cmdsize) {
2965 return LOAD_BADMACHO;
2966 }
2967
2968 name = (const char *)lcp + lcp->name.offset;
2969
2970 /* Check for a proper null terminated string. */
2971 size_t maxsz = lcp->cmdsize - lcp->name.offset;
2972 size_t namelen = strnlen(name, maxsz);
2973 if (namelen >= maxsz) {
2974 return LOAD_BADMACHO;
2975 }
2976
2977 #if (DEVELOPMENT || DEBUG)
2978
2979 /*
2980 * rdar://23680808
2981 * If an alternate dyld has been specified via boot args, check
2982 * to see if PROC_UUID_ALT_DYLD_POLICY has been set on this
2983 * executable and redirect the kernel to load that linker.
2984 */
2985
2986 if (use_alt_dyld) {
2987 int policy_error;
2988 uint32_t policy_flags = 0;
2989 int32_t policy_gencount = 0;
2990
2991 policy_error = proc_uuid_policy_lookup(result->uuid, &policy_flags, &policy_gencount);
2992 if (policy_error == 0) {
2993 if (policy_flags & PROC_UUID_ALT_DYLD_POLICY) {
2994 name = dyld_alt_path;
2995 }
2996 }
2997 }
2998 #endif
2999
3000 #if !(DEVELOPMENT || DEBUG)
3001 if (0 != strcmp(name, DEFAULT_DYLD_PATH)) {
3002 return LOAD_BADMACHO;
3003 }
3004 #endif
3005
3006 /* Allocate wad-of-data from heap to reduce excessively deep stacks */
3007
3008 MALLOC(dyld_data, void *, sizeof(*dyld_data), M_TEMP, M_WAITOK);
3009 header = &dyld_data->__header;
3010 myresult = &dyld_data->__myresult;
3011 macho_data = &dyld_data->__macho_data;
3012
3013 {
3014 cputype = (cputype & CPU_ARCH_MASK) | (cpu_type() & ~CPU_ARCH_MASK);
3015 }
3016
3017 ret = get_macho_vnode(name, cputype, header,
3018 &file_offset, &macho_size, macho_data, &vp, imgp);
3019 if (ret) {
3020 goto novp_out;
3021 }
3022
3023 *myresult = load_result_null;
3024 myresult->is_64bit_addr = result->is_64bit_addr;
3025 myresult->is_64bit_data = result->is_64bit_data;
3026
3027 ret = parse_machfile(vp, map, thread, header, file_offset,
3028 macho_size, depth, slide, 0, myresult, result, imgp);
3029
3030 if (ret == LOAD_SUCCESS) {
3031 if (result->threadstate) {
3032 /* don't use the app's threadstate if we have a dyld */
3033 kfree(result->threadstate, result->threadstate_sz);
3034 }
3035 result->threadstate = myresult->threadstate;
3036 result->threadstate_sz = myresult->threadstate_sz;
3037
3038 result->dynlinker = TRUE;
3039 result->entry_point = myresult->entry_point;
3040 result->validentry = myresult->validentry;
3041 result->all_image_info_addr = myresult->all_image_info_addr;
3042 result->all_image_info_size = myresult->all_image_info_size;
3043 if (myresult->platform_binary) {
3044 result->csflags |= CS_DYLD_PLATFORM;
3045 }
3046
3047 }
3048
3049 struct vnode_attr *va;
3050 va = kheap_alloc(KHEAP_TEMP, sizeof(*va), Z_WAITOK | Z_ZERO);
3051 VATTR_INIT(va);
3052 VATTR_WANTED(va, va_fsid64);
3053 VATTR_WANTED(va, va_fsid);
3054 VATTR_WANTED(va, va_fileid);
3055 int error = vnode_getattr(vp, va, imgp->ip_vfs_context);
3056 if (error == 0) {
3057 imgp->ip_dyld_fsid = vnode_get_va_fsid(va);
3058 imgp->ip_dyld_fsobjid = va->va_fileid;
3059 }
3060
3061 vnode_put(vp);
3062 kheap_free(KHEAP_TEMP, va, sizeof(*va));
3063 novp_out:
3064 FREE(dyld_data, M_TEMP);
3065 return ret;
3066 }
3067
3068
3069 static load_return_t
3070 load_code_signature(
3071 struct linkedit_data_command *lcp,
3072 struct vnode *vp,
3073 off_t macho_offset,
3074 off_t macho_size,
3075 cpu_type_t cputype,
3076 cpu_subtype_t cpusubtype,
3077 load_result_t *result,
3078 struct image_params *imgp)
3079 {
3080 int ret;
3081 kern_return_t kr;
3082 vm_offset_t addr;
3083 int resid;
3084 struct cs_blob *blob;
3085 int error;
3086 vm_size_t blob_size;
3087 uint32_t sum;
3088 boolean_t anyCPU;
3089
3090 addr = 0;
3091 blob = NULL;
3092
3093 cpusubtype &= ~CPU_SUBTYPE_MASK;
3094
3095 if (lcp->cmdsize != sizeof(struct linkedit_data_command)) {
3096 ret = LOAD_BADMACHO;
3097 goto out;
3098 }
3099
3100 sum = 0;
3101 if (os_add_overflow(lcp->dataoff, lcp->datasize, &sum) || sum > macho_size) {
3102 ret = LOAD_BADMACHO;
3103 goto out;
3104 }
3105
3106 blob = ubc_cs_blob_get(vp, cputype, cpusubtype, macho_offset);
3107
3108 if (blob != NULL) {
3109 /* we already have a blob for this vnode and cpu(sub)type */
3110 anyCPU = blob->csb_cpu_type == -1;
3111 if ((blob->csb_cpu_type != cputype &&
3112 blob->csb_cpu_subtype != cpusubtype && !anyCPU) ||
3113 blob->csb_base_offset != macho_offset) {
3114 /* the blob has changed for this vnode: fail ! */
3115 ret = LOAD_BADMACHO;
3116 goto out;
3117 }
3118
3119 /* It matches the blob we want here, let's verify the version */
3120 if (!anyCPU && ubc_cs_generation_check(vp) == 0) {
3121 /* No need to revalidate, we're good! */
3122 ret = LOAD_SUCCESS;
3123 goto out;
3124 }
3125
3126 /* That blob may be stale, let's revalidate. */
3127 error = ubc_cs_blob_revalidate(vp, blob, imgp, 0, result->ip_platform);
3128 if (error == 0) {
3129 /* Revalidation succeeded, we're good! */
3130 /* If we were revaliding a CS blob with any CPU arch we adjust it */
3131 if (anyCPU) {
3132 vnode_lock_spin(vp);
3133 blob->csb_cpu_type = cputype;
3134 blob->csb_cpu_subtype = cpusubtype;
3135 vnode_unlock(vp);
3136 }
3137 ret = LOAD_SUCCESS;
3138 goto out;
3139 }
3140
3141 if (error != EAGAIN) {
3142 printf("load_code_signature: revalidation failed: %d\n", error);
3143 ret = LOAD_FAILURE;
3144 goto out;
3145 }
3146
3147 assert(error == EAGAIN);
3148
3149 /*
3150 * Revalidation was not possible for this blob. We just continue as if there was no blob,
3151 * rereading the signature, and ubc_cs_blob_add will do the right thing.
3152 */
3153 blob = NULL;
3154 }
3155
3156 blob_size = lcp->datasize;
3157 kr = ubc_cs_blob_allocate(&addr, &blob_size);
3158 if (kr != KERN_SUCCESS) {
3159 ret = LOAD_NOSPACE;
3160 goto out;
3161 }
3162
3163 resid = 0;
3164 error = vn_rdwr(UIO_READ,
3165 vp,
3166 (caddr_t) addr,
3167 lcp->datasize,
3168 macho_offset + lcp->dataoff,
3169 UIO_SYSSPACE,
3170 0,
3171 kauth_cred_get(),
3172 &resid,
3173 current_proc());
3174 if (error || resid != 0) {
3175 ret = LOAD_IOERROR;
3176 goto out;
3177 }
3178
3179 if (ubc_cs_blob_add(vp,
3180 result->ip_platform,
3181 cputype,
3182 cpusubtype,
3183 macho_offset,
3184 &addr,
3185 lcp->datasize,
3186 imgp,
3187 0,
3188 &blob)) {
3189 if (addr) {
3190 ubc_cs_blob_deallocate(addr, blob_size);
3191 addr = 0;
3192 }
3193 ret = LOAD_FAILURE;
3194 goto out;
3195 } else {
3196 /* ubc_cs_blob_add() has consumed "addr" */
3197 addr = 0;
3198 }
3199
3200 #if CHECK_CS_VALIDATION_BITMAP
3201 ubc_cs_validation_bitmap_allocate( vp );
3202 #endif
3203
3204 ret = LOAD_SUCCESS;
3205 out:
3206 if (ret == LOAD_SUCCESS) {
3207 if (blob == NULL) {
3208 panic("success, but no blob!");
3209 }
3210
3211 result->csflags |= blob->csb_flags;
3212 result->platform_binary = blob->csb_platform_binary;
3213 result->cs_end_offset = blob->csb_end_offset;
3214 }
3215 if (addr != 0) {
3216 ubc_cs_blob_deallocate(addr, blob_size);
3217 addr = 0;
3218 }
3219
3220 return ret;
3221 }
3222
3223
3224 #if CONFIG_CODE_DECRYPTION
3225
3226 static load_return_t
3227 set_code_unprotect(
3228 struct encryption_info_command *eip,
3229 caddr_t addr,
3230 vm_map_t map,
3231 int64_t slide,
3232 struct vnode *vp,
3233 off_t macho_offset,
3234 cpu_type_t cputype,
3235 cpu_subtype_t cpusubtype)
3236 {
3237 int error, len;
3238 pager_crypt_info_t crypt_info;
3239 const char * cryptname = 0;
3240 char *vpath;
3241
3242 size_t offset;
3243 struct segment_command_64 *seg64;
3244 struct segment_command *seg32;
3245 vm_map_offset_t map_offset, map_size;
3246 vm_object_offset_t crypto_backing_offset;
3247 kern_return_t kr;
3248
3249 if (eip->cmdsize < sizeof(*eip)) {
3250 return LOAD_BADMACHO;
3251 }
3252
3253 switch (eip->cryptid) {
3254 case 0:
3255 /* not encrypted, just an empty load command */
3256 return LOAD_SUCCESS;
3257 case 1:
3258 cryptname = "com.apple.unfree";
3259 break;
3260 case 0x10:
3261 /* some random cryptid that you could manually put into
3262 * your binary if you want NULL */
3263 cryptname = "com.apple.null";
3264 break;
3265 default:
3266 return LOAD_BADMACHO;
3267 }
3268
3269 if (map == VM_MAP_NULL) {
3270 return LOAD_SUCCESS;
3271 }
3272 if (NULL == text_crypter_create) {
3273 return LOAD_FAILURE;
3274 }
3275
3276 vpath = zalloc(ZV_NAMEI);
3277
3278 len = MAXPATHLEN;
3279 error = vn_getpath(vp, vpath, &len);
3280 if (error) {
3281 zfree(ZV_NAMEI, vpath);
3282 return LOAD_FAILURE;
3283 }
3284
3285 /* set up decrypter first */
3286 crypt_file_data_t crypt_data = {
3287 .filename = vpath,
3288 .cputype = cputype,
3289 .cpusubtype = cpusubtype
3290 };
3291 kr = text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data);
3292 #if VM_MAP_DEBUG_APPLE_PROTECT
3293 if (vm_map_debug_apple_protect) {
3294 struct proc *p;
3295 p = current_proc();
3296 printf("APPLE_PROTECT: %d[%s] map %p %s(%s) -> 0x%x\n",
3297 p->p_pid, p->p_comm, map, __FUNCTION__, vpath, kr);
3298 }
3299 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
3300 zfree(ZV_NAMEI, vpath);
3301
3302 if (kr) {
3303 printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n",
3304 cryptname, kr);
3305 if (kr == kIOReturnNotPrivileged) {
3306 /* text encryption returned decryption failure */
3307 return LOAD_DECRYPTFAIL;
3308 } else {
3309 return LOAD_RESOURCE;
3310 }
3311 }
3312
3313 /* this is terrible, but we have to rescan the load commands to find the
3314 * virtual address of this encrypted stuff. This code is gonna look like
3315 * the dyld source one day... */
3316 struct mach_header *header = (struct mach_header *)addr;
3317 size_t mach_header_sz = sizeof(struct mach_header);
3318 if (header->magic == MH_MAGIC_64 ||
3319 header->magic == MH_CIGAM_64) {
3320 mach_header_sz = sizeof(struct mach_header_64);
3321 }
3322 offset = mach_header_sz;
3323 uint32_t ncmds = header->ncmds;
3324 while (ncmds--) {
3325 /*
3326 * Get a pointer to the command.
3327 */
3328 struct load_command *lcp = (struct load_command *)(addr + offset);
3329 offset += lcp->cmdsize;
3330
3331 switch (lcp->cmd) {
3332 case LC_SEGMENT_64:
3333 seg64 = (struct segment_command_64 *)lcp;
3334 if ((seg64->fileoff <= eip->cryptoff) &&
3335 (seg64->fileoff + seg64->filesize >=
3336 eip->cryptoff + eip->cryptsize)) {
3337 map_offset = (vm_map_offset_t)(seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide);
3338 map_size = eip->cryptsize;
3339 crypto_backing_offset = macho_offset + eip->cryptoff;
3340 goto remap_now;
3341 }
3342 break;
3343 case LC_SEGMENT:
3344 seg32 = (struct segment_command *)lcp;
3345 if ((seg32->fileoff <= eip->cryptoff) &&
3346 (seg32->fileoff + seg32->filesize >=
3347 eip->cryptoff + eip->cryptsize)) {
3348 map_offset = (vm_map_offset_t)(seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide);
3349 map_size = eip->cryptsize;
3350 crypto_backing_offset = macho_offset + eip->cryptoff;
3351 goto remap_now;
3352 }
3353 break;
3354 }
3355 }
3356
3357 /* if we get here, did not find anything */
3358 return LOAD_BADMACHO;
3359
3360 remap_now:
3361 /* now remap using the decrypter */
3362 MACHO_PRINTF(("+++ set_code_unprotect: vm[0x%llx:0x%llx]\n",
3363 (uint64_t) map_offset,
3364 (uint64_t) (map_offset + map_size)));
3365 kr = vm_map_apple_protected(map,
3366 map_offset,
3367 map_offset + map_size,
3368 crypto_backing_offset,
3369 &crypt_info,
3370 CRYPTID_APP_ENCRYPTION);
3371 if (kr) {
3372 printf("set_code_unprotect(): mapping failed with %x\n", kr);
3373 return LOAD_PROTECT;
3374 }
3375
3376 return LOAD_SUCCESS;
3377 }
3378
3379 #endif
3380
3381 /*
3382 * This routine exists to support the load_dylinker().
3383 *
3384 * This routine has its own, separate, understanding of the FAT file format,
3385 * which is terrifically unfortunate.
3386 */
3387 static
3388 load_return_t
3389 get_macho_vnode(
3390 const char *path,
3391 cpu_type_t cputype,
3392 struct mach_header *mach_header,
3393 off_t *file_offset,
3394 off_t *macho_size,
3395 struct macho_data *data,
3396 struct vnode **vpp,
3397 struct image_params *imgp
3398 )
3399 {
3400 struct vnode *vp;
3401 vfs_context_t ctx = vfs_context_current();
3402 proc_t p = vfs_context_proc(ctx);
3403 kauth_cred_t kerncred;
3404 struct nameidata *ndp = &data->__nid;
3405 boolean_t is_fat;
3406 struct fat_arch fat_arch;
3407 int error;
3408 int resid;
3409 union macho_vnode_header *header = &data->__header;
3410 off_t fsize = (off_t)0;
3411
3412 /*
3413 * Capture the kernel credential for use in the actual read of the
3414 * file, since the user doing the execution may have execute rights
3415 * but not read rights, but to exec something, we have to either map
3416 * or read it into the new process address space, which requires
3417 * read rights. This is to deal with lack of common credential
3418 * serialization code which would treat NOCRED as "serialize 'root'".
3419 */
3420 kerncred = vfs_context_ucred(vfs_context_kernel());
3421
3422 /* init the namei data to point the file user's program name */
3423 NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
3424
3425 if ((error = namei(ndp)) != 0) {
3426 if (error == ENOENT) {
3427 error = LOAD_ENOENT;
3428 } else {
3429 error = LOAD_FAILURE;
3430 }
3431 return error;
3432 }
3433 nameidone(ndp);
3434 vp = ndp->ni_vp;
3435
3436 /* check for regular file */
3437 if (vp->v_type != VREG) {
3438 error = LOAD_PROTECT;
3439 goto bad1;
3440 }
3441
3442 /* get size */
3443 if ((error = vnode_size(vp, &fsize, ctx)) != 0) {
3444 error = LOAD_FAILURE;
3445 goto bad1;
3446 }
3447
3448 /* Check mount point */
3449 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
3450 error = LOAD_PROTECT;
3451 goto bad1;
3452 }
3453
3454 /* check access */
3455 if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE | KAUTH_VNODE_READ_DATA, ctx)) != 0) {
3456 error = LOAD_PROTECT;
3457 goto bad1;
3458 }
3459
3460 /* try to open it */
3461 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
3462 error = LOAD_PROTECT;
3463 goto bad1;
3464 }
3465
3466 if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)header, sizeof(*header), 0,
3467 UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p)) != 0) {
3468 error = LOAD_IOERROR;
3469 goto bad2;
3470 }
3471
3472 if (resid) {
3473 error = LOAD_BADMACHO;
3474 goto bad2;
3475 }
3476
3477 if (header->mach_header.magic == MH_MAGIC ||
3478 header->mach_header.magic == MH_MAGIC_64) {
3479 is_fat = FALSE;
3480 } else if (OSSwapBigToHostInt32(header->fat_header.magic) == FAT_MAGIC) {
3481 is_fat = TRUE;
3482 } else {
3483 error = LOAD_BADMACHO;
3484 goto bad2;
3485 }
3486
3487 if (is_fat) {
3488 error = fatfile_validate_fatarches((vm_offset_t)(&header->fat_header),
3489 sizeof(*header));
3490 if (error != LOAD_SUCCESS) {
3491 goto bad2;
3492 }
3493
3494 /* Look up our architecture in the fat file. */
3495 error = fatfile_getbestarch_for_cputype(cputype, CPU_SUBTYPE_ANY,
3496 (vm_offset_t)(&header->fat_header), sizeof(*header), imgp, &fat_arch);
3497 if (error != LOAD_SUCCESS) {
3498 goto bad2;
3499 }
3500
3501 /* Read the Mach-O header out of it */
3502 error = vn_rdwr(UIO_READ, vp, (caddr_t)&header->mach_header,
3503 sizeof(header->mach_header), fat_arch.offset,
3504 UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p);
3505 if (error) {
3506 error = LOAD_IOERROR;
3507 goto bad2;
3508 }
3509
3510 if (resid) {
3511 error = LOAD_BADMACHO;
3512 goto bad2;
3513 }
3514
3515 /* Is this really a Mach-O? */
3516 if (header->mach_header.magic != MH_MAGIC &&
3517 header->mach_header.magic != MH_MAGIC_64) {
3518 error = LOAD_BADMACHO;
3519 goto bad2;
3520 }
3521
3522 *file_offset = fat_arch.offset;
3523 *macho_size = fat_arch.size;
3524 } else {
3525 /*
3526 * Force get_macho_vnode() to fail if the architecture bits
3527 * do not match the expected architecture bits. This in
3528 * turn causes load_dylinker() to fail for the same reason,
3529 * so it ensures the dynamic linker and the binary are in
3530 * lock-step. This is potentially bad, if we ever add to
3531 * the CPU_ARCH_* bits any bits that are desirable but not
3532 * required, since the dynamic linker might work, but we will
3533 * refuse to load it because of this check.
3534 */
3535 if ((cpu_type_t)header->mach_header.cputype != cputype) {
3536 error = LOAD_BADARCH;
3537 goto bad2;
3538 }
3539
3540 *file_offset = 0;
3541 *macho_size = fsize;
3542 }
3543
3544 *mach_header = header->mach_header;
3545 *vpp = vp;
3546
3547 ubc_setsize(vp, fsize);
3548 return error;
3549
3550 bad2:
3551 (void) VNOP_CLOSE(vp, FREAD, ctx);
3552 bad1:
3553 vnode_put(vp);
3554 return error;
3555 }