]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/mach_loader.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / bsd / kern / mach_loader.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (C) 1988, 1989, NeXT, Inc.
30 *
31 * File: kern/mach_loader.c
32 * Author: Avadis Tevanian, Jr.
33 *
34 * Mach object file loader (kernel version, for now).
35 *
36 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
37 * Started.
38 */
39
40 #include <sys/param.h>
41 #include <sys/vnode_internal.h>
42 #include <sys/uio.h>
43 #include <sys/namei.h>
44 #include <sys/proc_internal.h>
45 #include <sys/kauth.h>
46 #include <sys/stat.h>
47 #include <sys/malloc.h>
48 #include <sys/mount_internal.h>
49 #include <sys/fcntl.h>
50 #include <sys/ubc_internal.h>
51 #include <sys/imgact.h>
52 #include <sys/codesign.h>
53 #include <sys/proc_uuid_policy.h>
54 #include <sys/reason.h>
55 #include <sys/kdebug.h>
56
57 #include <mach/mach_types.h>
58 #include <mach/vm_map.h> /* vm_allocate() */
59 #include <mach/mach_vm.h> /* mach_vm_allocate() */
60 #include <mach/vm_statistics.h>
61 #include <mach/task.h>
62 #include <mach/thread_act.h>
63
64 #include <machine/vmparam.h>
65 #include <machine/exec.h>
66 #include <machine/pal_routines.h>
67
68 #include <kern/ast.h>
69 #include <kern/kern_types.h>
70 #include <kern/cpu_number.h>
71 #include <kern/mach_loader.h>
72 #include <kern/mach_fat.h>
73 #include <kern/kalloc.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/page_decrypt.h>
77
78 #include <mach-o/fat.h>
79 #include <mach-o/loader.h>
80
81 #include <vm/pmap.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_pager.h>
85 #include <vm/vnode_pager.h>
86 #include <vm/vm_protos.h>
87 #include <IOKit/IOReturn.h> /* for kIOReturnNotPrivileged */
88
89 #include <os/overflow.h>
90
91 #if __x86_64__
92 extern int bootarg_no32exec; /* bsd_init.c */
93 #endif
94
95 /*
96 * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
97 * when KERNEL is defined.
98 */
99 extern pmap_t pmap_create(ledger_t ledger, vm_map_size_t size,
100 boolean_t is_64bit);
101
102 /* XXX should have prototypes in a shared header file */
103 extern int get_map_nentries(vm_map_t);
104
105 extern kern_return_t memory_object_signed(memory_object_control_t control,
106 boolean_t is_signed);
107
108 /* An empty load_result_t */
109 static const load_result_t load_result_null = {
110 .mach_header = MACH_VM_MIN_ADDRESS,
111 .entry_point = MACH_VM_MIN_ADDRESS,
112 .user_stack = MACH_VM_MIN_ADDRESS,
113 .user_stack_size = 0,
114 .user_stack_alloc = MACH_VM_MIN_ADDRESS,
115 .user_stack_alloc_size = 0,
116 .all_image_info_addr = MACH_VM_MIN_ADDRESS,
117 .all_image_info_size = 0,
118 .thread_count = 0,
119 .unixproc = 0,
120 .dynlinker = 0,
121 .needs_dynlinker = 0,
122 .validentry = 0,
123 .using_lcmain = 0,
124 .is_64bit_addr = 0,
125 .is_64bit_data = 0,
126 .custom_stack = 0,
127 .csflags = 0,
128 .has_pagezero = 0,
129 .uuid = { 0 },
130 .min_vm_addr = MACH_VM_MAX_ADDRESS,
131 .max_vm_addr = MACH_VM_MIN_ADDRESS,
132 .cs_end_offset = 0,
133 .threadstate = NULL,
134 .threadstate_sz = 0
135 };
136
137 /*
138 * Prototypes of static functions.
139 */
140 static load_return_t
141 parse_machfile(
142 struct vnode *vp,
143 vm_map_t map,
144 thread_t thread,
145 struct mach_header *header,
146 off_t file_offset,
147 off_t macho_size,
148 int depth,
149 int64_t slide,
150 int64_t dyld_slide,
151 load_result_t *result,
152 load_result_t *binresult,
153 struct image_params *imgp
154 );
155
156 static load_return_t
157 load_segment(
158 struct load_command *lcp,
159 uint32_t filetype,
160 void *control,
161 off_t pager_offset,
162 off_t macho_size,
163 struct vnode *vp,
164 vm_map_t map,
165 int64_t slide,
166 load_result_t *result
167 );
168
169 static load_return_t
170 load_uuid(
171 struct uuid_command *uulp,
172 char *command_end,
173 load_result_t *result
174 );
175
176 static load_return_t
177 load_code_signature(
178 struct linkedit_data_command *lcp,
179 struct vnode *vp,
180 off_t macho_offset,
181 off_t macho_size,
182 cpu_type_t cputype,
183 load_result_t *result,
184 struct image_params *imgp);
185
186 #if CONFIG_CODE_DECRYPTION
187 static load_return_t
188 set_code_unprotect(
189 struct encryption_info_command *lcp,
190 caddr_t addr,
191 vm_map_t map,
192 int64_t slide,
193 struct vnode *vp,
194 off_t macho_offset,
195 cpu_type_t cputype,
196 cpu_subtype_t cpusubtype);
197 #endif
198
199 static
200 load_return_t
201 load_main(
202 struct entry_point_command *epc,
203 thread_t thread,
204 int64_t slide,
205 load_result_t *result
206 );
207
208 static load_return_t
209 load_unixthread(
210 struct thread_command *tcp,
211 thread_t thread,
212 int64_t slide,
213 load_result_t *result
214 );
215
216 static load_return_t
217 load_threadstate(
218 thread_t thread,
219 uint32_t *ts,
220 uint32_t total_size,
221 load_result_t *
222 );
223
224 static load_return_t
225 load_threadstack(
226 thread_t thread,
227 uint32_t *ts,
228 uint32_t total_size,
229 mach_vm_offset_t *user_stack,
230 int *customstack,
231 load_result_t *result
232 );
233
234 static load_return_t
235 load_threadentry(
236 thread_t thread,
237 uint32_t *ts,
238 uint32_t total_size,
239 mach_vm_offset_t *entry_point
240 );
241
242 static load_return_t
243 load_dylinker(
244 struct dylinker_command *lcp,
245 integer_t archbits,
246 vm_map_t map,
247 thread_t thread,
248 int depth,
249 int64_t slide,
250 load_result_t *result,
251 struct image_params *imgp
252 );
253
254 struct macho_data;
255
256 static load_return_t
257 get_macho_vnode(
258 const char *path,
259 integer_t archbits,
260 struct mach_header *mach_header,
261 off_t *file_offset,
262 off_t *macho_size,
263 struct macho_data *macho_data,
264 struct vnode **vpp
265 );
266
267 static inline void
268 widen_segment_command(const struct segment_command *scp32,
269 struct segment_command_64 *scp)
270 {
271 scp->cmd = scp32->cmd;
272 scp->cmdsize = scp32->cmdsize;
273 bcopy(scp32->segname, scp->segname, sizeof(scp->segname));
274 scp->vmaddr = scp32->vmaddr;
275 scp->vmsize = scp32->vmsize;
276 scp->fileoff = scp32->fileoff;
277 scp->filesize = scp32->filesize;
278 scp->maxprot = scp32->maxprot;
279 scp->initprot = scp32->initprot;
280 scp->nsects = scp32->nsects;
281 scp->flags = scp32->flags;
282 }
283
284 static void
285 note_all_image_info_section(const struct segment_command_64 *scp,
286 boolean_t is64, size_t section_size, const void *sections,
287 int64_t slide, load_result_t *result)
288 {
289 const union {
290 struct section s32;
291 struct section_64 s64;
292 } *sectionp;
293 unsigned int i;
294
295
296 if (strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0) {
297 return;
298 }
299 for (i = 0; i < scp->nsects; ++i) {
300 sectionp = (const void *)
301 ((const char *)sections + section_size * i);
302 if (0 == strncmp(sectionp->s64.sectname, "__all_image_info",
303 sizeof(sectionp->s64.sectname))) {
304 result->all_image_info_addr =
305 is64 ? sectionp->s64.addr : sectionp->s32.addr;
306 result->all_image_info_addr += slide;
307 result->all_image_info_size =
308 is64 ? sectionp->s64.size : sectionp->s32.size;
309 return;
310 }
311 }
312 }
313
314 #if __arm64__
315 /*
316 * Allow bypassing some security rules (hard pagezero, no write+execute)
317 * in exchange for better binary compatibility for legacy apps built
318 * before 16KB-alignment was enforced.
319 */
320 const int fourk_binary_compatibility_unsafe = TRUE;
321 const int fourk_binary_compatibility_allow_wx = FALSE;
322 #endif /* __arm64__ */
323
324 load_return_t
325 load_machfile(
326 struct image_params *imgp,
327 struct mach_header *header,
328 thread_t thread,
329 vm_map_t *mapp,
330 load_result_t *result
331 )
332 {
333 struct vnode *vp = imgp->ip_vp;
334 off_t file_offset = imgp->ip_arch_offset;
335 off_t macho_size = imgp->ip_arch_size;
336 off_t file_size = imgp->ip_vattr->va_data_size;
337 pmap_t pmap = 0; /* protected by create_map */
338 vm_map_t map;
339 load_result_t myresult;
340 load_return_t lret;
341 boolean_t enforce_hard_pagezero = TRUE;
342 int in_exec = (imgp->ip_flags & IMGPF_EXEC);
343 task_t task = current_task();
344 proc_t p = current_proc();
345 int64_t aslr_page_offset = 0;
346 int64_t dyld_aslr_page_offset = 0;
347 int64_t aslr_section_size = 0;
348 int64_t aslr_section_offset = 0;
349 kern_return_t kret;
350
351 if (macho_size > file_size) {
352 return LOAD_BADMACHO;
353 }
354
355 result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
356 result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
357
358 task_t ledger_task;
359 if (imgp->ip_new_thread) {
360 ledger_task = get_threadtask(imgp->ip_new_thread);
361 } else {
362 ledger_task = task;
363 }
364 pmap = pmap_create(get_task_ledger(ledger_task),
365 (vm_map_size_t) 0,
366 result->is_64bit_addr);
367 map = vm_map_create(pmap,
368 0,
369 vm_compute_max_offset(result->is_64bit_addr),
370 TRUE);
371
372 #if defined(__arm64__)
373 if (result->is_64bit_addr) {
374 /* enforce 16KB alignment of VM map entries */
375 vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT);
376 } else {
377 vm_map_set_page_shift(map, page_shift_user32);
378 }
379 #elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
380 /* enforce 16KB alignment for watch targets with new ABI */
381 vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT);
382 #endif /* __arm64__ */
383
384 #ifndef CONFIG_ENFORCE_SIGNED_CODE
385 /* This turns off faulting for executable pages, which allows
386 * to circumvent Code Signing Enforcement. The per process
387 * flag (CS_ENFORCEMENT) is not set yet, but we can use the
388 * global flag.
389 */
390 if (!cs_process_global_enforcement() && (header->flags & MH_ALLOW_STACK_EXECUTION)) {
391 vm_map_disable_NX(map);
392 // TODO: Message Trace or log that this is happening
393 }
394 #endif
395
396 /* Forcibly disallow execution from data pages on even if the arch
397 * normally permits it. */
398 if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC)) {
399 vm_map_disallow_data_exec(map);
400 }
401
402 /*
403 * Compute a random offset for ASLR, and an independent random offset for dyld.
404 */
405 if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
406 vm_map_get_max_aslr_slide_section(map, &aslr_section_offset, &aslr_section_size);
407 aslr_section_offset = (random() % aslr_section_offset) * aslr_section_size;
408
409 aslr_page_offset = random();
410 aslr_page_offset %= vm_map_get_max_aslr_slide_pages(map);
411 aslr_page_offset <<= vm_map_page_shift(map);
412
413 dyld_aslr_page_offset = random();
414 dyld_aslr_page_offset %= vm_map_get_max_loader_aslr_slide_pages(map);
415 dyld_aslr_page_offset <<= vm_map_page_shift(map);
416
417 aslr_page_offset += aslr_section_offset;
418 }
419
420 if (!result) {
421 result = &myresult;
422 }
423
424 *result = load_result_null;
425
426 /*
427 * re-set the bitness on the load result since we cleared the load result above.
428 */
429 result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
430 result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
431
432 lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
433 0, aslr_page_offset, dyld_aslr_page_offset, result,
434 NULL, imgp);
435
436 if (lret != LOAD_SUCCESS) {
437 vm_map_deallocate(map); /* will lose pmap reference too */
438 return lret;
439 }
440
441 #if __x86_64__
442 /*
443 * On x86, for compatibility, don't enforce the hard page-zero restriction for 32-bit binaries.
444 */
445 if (!result->is_64bit_addr) {
446 enforce_hard_pagezero = FALSE;
447 }
448
449 /*
450 * For processes with IMGPF_HIGH_BITS_ASLR, add a few random high bits
451 * to the start address for "anywhere" memory allocations.
452 */
453 #define VM_MAP_HIGH_START_BITS_COUNT 8
454 #define VM_MAP_HIGH_START_BITS_SHIFT 27
455 if (result->is_64bit_addr &&
456 (imgp->ip_flags & IMGPF_HIGH_BITS_ASLR)) {
457 int random_bits;
458 vm_map_offset_t high_start;
459
460 random_bits = random();
461 random_bits &= (1 << VM_MAP_HIGH_START_BITS_COUNT) - 1;
462 high_start = (((vm_map_offset_t)random_bits)
463 << VM_MAP_HIGH_START_BITS_SHIFT);
464 vm_map_set_high_start(map, high_start);
465 }
466 #endif /* __x86_64__ */
467
468 /*
469 * Check to see if the page zero is enforced by the map->min_offset.
470 */
471 if (enforce_hard_pagezero &&
472 (vm_map_has_hard_pagezero(map, 0x1000) == FALSE)) {
473 #if __arm64__
474 if (!result->is_64bit_addr && /* not 64-bit address space */
475 !(header->flags & MH_PIE) && /* not PIE */
476 (vm_map_page_shift(map) != FOURK_PAGE_SHIFT ||
477 PAGE_SHIFT != FOURK_PAGE_SHIFT) && /* page size != 4KB */
478 result->has_pagezero && /* has a "soft" page zero */
479 fourk_binary_compatibility_unsafe) {
480 /*
481 * For backwards compatibility of "4K" apps on
482 * a 16K system, do not enforce a hard page zero...
483 */
484 } else
485 #endif /* __arm64__ */
486 {
487 vm_map_deallocate(map); /* will lose pmap reference too */
488 return LOAD_BADMACHO;
489 }
490 }
491
492 vm_commit_pagezero_status(map);
493
494 /*
495 * If this is an exec, then we are going to destroy the old
496 * task, and it's correct to halt it; if it's spawn, the
497 * task is not yet running, and it makes no sense.
498 */
499 if (in_exec) {
500 /*
501 * Mark the task as halting and start the other
502 * threads towards terminating themselves. Then
503 * make sure any threads waiting for a process
504 * transition get informed that we are committed to
505 * this transition, and then finally complete the
506 * task halting (wait for threads and then cleanup
507 * task resources).
508 *
509 * NOTE: task_start_halt() makes sure that no new
510 * threads are created in the task during the transition.
511 * We need to mark the workqueue as exiting before we
512 * wait for threads to terminate (at the end of which
513 * we no longer have a prohibition on thread creation).
514 *
515 * Finally, clean up any lingering workqueue data structures
516 * that may have been left behind by the workqueue threads
517 * as they exited (and then clean up the work queue itself).
518 */
519 kret = task_start_halt(task);
520 if (kret != KERN_SUCCESS) {
521 vm_map_deallocate(map); /* will lose pmap reference too */
522 return LOAD_FAILURE;
523 }
524 proc_transcommit(p, 0);
525 workq_mark_exiting(p);
526 task_complete_halt(task);
527 workq_exit(p);
528
529 /*
530 * Roll up accounting info to new task. The roll up is done after
531 * task_complete_halt to make sure the thread accounting info is
532 * rolled up to current_task.
533 */
534 task_rollup_accounting_info(get_threadtask(thread), task);
535 }
536 *mapp = map;
537
538 #ifdef CONFIG_32BIT_TELEMETRY
539 if (!result->is_64bit_data) {
540 /*
541 * This may not need to be an AST; we merely need to ensure that
542 * we gather telemetry at the point where all of the information
543 * that we want has been added to the process.
544 */
545 task_set_32bit_log_flag(get_threadtask(thread));
546 act_set_astbsd(thread);
547 }
548 #endif /* CONFIG_32BIT_TELEMETRY */
549
550 return LOAD_SUCCESS;
551 }
552
553 int macho_printf = 0;
554 #define MACHO_PRINTF(args) \
555 do { \
556 if (macho_printf) { \
557 printf args; \
558 } \
559 } while (0)
560
561 /*
562 * The file size of a mach-o file is limited to 32 bits; this is because
563 * this is the limit on the kalloc() of enough bytes for a mach_header and
564 * the contents of its sizeofcmds, which is currently constrained to 32
565 * bits in the file format itself. We read into the kernel buffer the
566 * commands section, and then parse it in order to parse the mach-o file
567 * format load_command segment(s). We are only interested in a subset of
568 * the total set of possible commands. If "map"==VM_MAP_NULL or
569 * "thread"==THREAD_NULL, do not make permament VM modifications,
570 * just preflight the parse.
571 */
572 static
573 load_return_t
574 parse_machfile(
575 struct vnode *vp,
576 vm_map_t map,
577 thread_t thread,
578 struct mach_header *header,
579 off_t file_offset,
580 off_t macho_size,
581 int depth,
582 int64_t aslr_offset,
583 int64_t dyld_aslr_offset,
584 load_result_t *result,
585 load_result_t *binresult,
586 struct image_params *imgp
587 )
588 {
589 uint32_t ncmds;
590 struct load_command *lcp;
591 struct dylinker_command *dlp = 0;
592 integer_t dlarchbits = 0;
593 void * control;
594 load_return_t ret = LOAD_SUCCESS;
595 void * addr;
596 vm_size_t alloc_size, cmds_size;
597 size_t offset;
598 size_t oldoffset; /* for overflow check */
599 int pass;
600 proc_t p = current_proc(); /* XXXX */
601 int error;
602 int resid = 0;
603 size_t mach_header_sz = sizeof(struct mach_header);
604 boolean_t abi64;
605 boolean_t got_code_signatures = FALSE;
606 boolean_t found_header_segment = FALSE;
607 boolean_t found_xhdr = FALSE;
608 int64_t slide = 0;
609 boolean_t dyld_no_load_addr = FALSE;
610 boolean_t is_dyld = FALSE;
611 vm_map_offset_t effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
612 #if __arm64__
613 uint32_t pagezero_end = 0;
614 uint32_t executable_end = 0;
615 uint32_t writable_start = 0;
616 vm_map_size_t effective_page_size;
617
618 effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map));
619 #endif /* __arm64__ */
620
621 if (header->magic == MH_MAGIC_64 ||
622 header->magic == MH_CIGAM_64) {
623 mach_header_sz = sizeof(struct mach_header_64);
624 }
625
626 /*
627 * Break infinite recursion
628 */
629 if (depth > 1) {
630 return LOAD_FAILURE;
631 }
632
633 depth++;
634
635 /*
636 * Check to see if right machine type.
637 */
638 if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) ||
639 !grade_binary(header->cputype,
640 header->cpusubtype & ~CPU_SUBTYPE_MASK)) {
641 return LOAD_BADARCH;
642 }
643
644 #if __x86_64__
645 if (bootarg_no32exec && (header->cputype == CPU_TYPE_X86)) {
646 return LOAD_BADARCH_X86;
647 }
648 #endif
649
650 abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
651
652 switch (header->filetype) {
653 case MH_EXECUTE:
654 if (depth != 1) {
655 return LOAD_FAILURE;
656 }
657 #if CONFIG_EMBEDDED
658 if (header->flags & MH_DYLDLINK) {
659 /* Check properties of dynamic executables */
660 if (!(header->flags & MH_PIE) && pie_required(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) {
661 return LOAD_FAILURE;
662 }
663 result->needs_dynlinker = TRUE;
664 } else {
665 /* Check properties of static executables (disallowed except for development) */
666 #if !(DEVELOPMENT || DEBUG)
667 return LOAD_FAILURE;
668 #endif
669 }
670 #endif /* CONFIG_EMBEDDED */
671
672 break;
673 case MH_DYLINKER:
674 if (depth != 2) {
675 return LOAD_FAILURE;
676 }
677 is_dyld = TRUE;
678 break;
679
680 default:
681 return LOAD_FAILURE;
682 }
683
684 /*
685 * Get the pager for the file.
686 */
687 control = ubc_getobject(vp, UBC_FLAGS_NONE);
688
689 /* ensure header + sizeofcmds falls within the file */
690 if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) ||
691 (off_t)cmds_size > macho_size ||
692 round_page_overflow(cmds_size, &alloc_size)) {
693 return LOAD_BADMACHO;
694 }
695
696 /*
697 * Map the load commands into kernel memory.
698 */
699 addr = kalloc(alloc_size);
700 if (addr == NULL) {
701 return LOAD_NOSPACE;
702 }
703
704 error = vn_rdwr(UIO_READ, vp, addr, alloc_size, file_offset,
705 UIO_SYSSPACE, 0, kauth_cred_get(), &resid, p);
706 if (error) {
707 kfree(addr, alloc_size);
708 return LOAD_IOERROR;
709 }
710
711 if (resid) {
712 /* We must be able to read in as much as the mach_header indicated */
713 kfree(addr, alloc_size);
714 return LOAD_BADMACHO;
715 }
716
717 /*
718 * For PIE and dyld, slide everything by the ASLR offset.
719 */
720 if ((header->flags & MH_PIE) || is_dyld) {
721 slide = aslr_offset;
722 }
723
724 /*
725 * Scan through the commands, processing each one as necessary.
726 * We parse in three passes through the headers:
727 * 0: determine if TEXT and DATA boundary can be page-aligned
728 * 1: thread state, uuid, code signature
729 * 2: segments
730 * 3: dyld, encryption, check entry point
731 */
732
733 boolean_t slide_realign = FALSE;
734 #if __arm64__
735 if (!abi64) {
736 slide_realign = TRUE;
737 }
738 #endif
739
740 for (pass = 0; pass <= 3; pass++) {
741 if (pass == 0 && !slide_realign && !is_dyld) {
742 /* if we dont need to realign the slide or determine dyld's load
743 * address, pass 0 can be skipped */
744 continue;
745 } else if (pass == 1) {
746 #if __arm64__
747 boolean_t is_pie;
748 int64_t adjust;
749
750 is_pie = ((header->flags & MH_PIE) != 0);
751 if (pagezero_end != 0 &&
752 pagezero_end < effective_page_size) {
753 /* need at least 1 page for PAGEZERO */
754 adjust = effective_page_size;
755 MACHO_PRINTF(("pagezero boundary at "
756 "0x%llx; adjust slide from "
757 "0x%llx to 0x%llx%s\n",
758 (uint64_t) pagezero_end,
759 slide,
760 slide + adjust,
761 (is_pie
762 ? ""
763 : " BUT NO PIE ****** :-(")));
764 if (is_pie) {
765 slide += adjust;
766 pagezero_end += adjust;
767 executable_end += adjust;
768 writable_start += adjust;
769 }
770 }
771 if (pagezero_end != 0) {
772 result->has_pagezero = TRUE;
773 }
774 if (executable_end == writable_start &&
775 (executable_end & effective_page_mask) != 0 &&
776 (executable_end & FOURK_PAGE_MASK) == 0) {
777 /*
778 * The TEXT/DATA boundary is 4K-aligned but
779 * not page-aligned. Adjust the slide to make
780 * it page-aligned and avoid having a page
781 * with both write and execute permissions.
782 */
783 adjust =
784 (effective_page_size -
785 (executable_end & effective_page_mask));
786 MACHO_PRINTF(("page-unaligned X-W boundary at "
787 "0x%llx; adjust slide from "
788 "0x%llx to 0x%llx%s\n",
789 (uint64_t) executable_end,
790 slide,
791 slide + adjust,
792 (is_pie
793 ? ""
794 : " BUT NO PIE ****** :-(")));
795 if (is_pie) {
796 slide += adjust;
797 }
798 }
799 #endif /* __arm64__ */
800
801 if (dyld_no_load_addr && binresult) {
802 /*
803 * The dyld Mach-O does not specify a load address. Try to locate
804 * it right after the main binary. If binresult == NULL, load
805 * directly to the given slide.
806 */
807 slide = vm_map_round_page(slide + binresult->max_vm_addr, effective_page_mask);
808 }
809 }
810
811 /*
812 * Check that the entry point is contained in an executable segments
813 */
814 if ((pass == 3) && (!result->using_lcmain && result->validentry == 0)) {
815 thread_state_initialize(thread);
816 ret = LOAD_FAILURE;
817 break;
818 }
819
820 /*
821 * Check that some segment maps the start of the mach-o file, which is
822 * needed by the dynamic loader to read the mach headers, etc.
823 */
824 if ((pass == 3) && (found_header_segment == FALSE)) {
825 ret = LOAD_BADMACHO;
826 break;
827 }
828
829 /*
830 * Loop through each of the load_commands indicated by the
831 * Mach-O header; if an absurd value is provided, we just
832 * run off the end of the reserved section by incrementing
833 * the offset too far, so we are implicitly fail-safe.
834 */
835 offset = mach_header_sz;
836 ncmds = header->ncmds;
837
838 while (ncmds--) {
839 /* ensure enough space for a minimal load command */
840 if (offset + sizeof(struct load_command) > cmds_size) {
841 ret = LOAD_BADMACHO;
842 break;
843 }
844
845 /*
846 * Get a pointer to the command.
847 */
848 lcp = (struct load_command *)(addr + offset);
849 oldoffset = offset;
850
851 /*
852 * Perform prevalidation of the struct load_command
853 * before we attempt to use its contents. Invalid
854 * values are ones which result in an overflow, or
855 * which can not possibly be valid commands, or which
856 * straddle or exist past the reserved section at the
857 * start of the image.
858 */
859 if (os_add_overflow(offset, lcp->cmdsize, &offset) ||
860 lcp->cmdsize < sizeof(struct load_command) ||
861 offset > cmds_size) {
862 ret = LOAD_BADMACHO;
863 break;
864 }
865
866 /*
867 * Act on struct load_command's for which kernel
868 * intervention is required.
869 */
870 switch (lcp->cmd) {
871 case LC_SEGMENT: {
872 struct segment_command *scp = (struct segment_command *) lcp;
873 if (pass == 0) {
874 if (is_dyld && scp->vmaddr == 0 && scp->fileoff == 0) {
875 dyld_no_load_addr = TRUE;
876 if (!slide_realign) {
877 /* got what we need, bail early on pass 0 */
878 continue;
879 }
880 }
881
882 #if __arm64__
883 assert(!abi64);
884
885 if (scp->initprot == 0 && scp->maxprot == 0 && scp->vmaddr == 0) {
886 /* PAGEZERO */
887 if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &pagezero_end)) {
888 ret = LOAD_BADMACHO;
889 break;
890 }
891 }
892 if (scp->initprot & VM_PROT_EXECUTE) {
893 /* TEXT */
894 if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &executable_end)) {
895 ret = LOAD_BADMACHO;
896 break;
897 }
898 }
899 if (scp->initprot & VM_PROT_WRITE) {
900 /* DATA */
901 if (os_add_overflow(scp->vmaddr, slide, &writable_start)) {
902 ret = LOAD_BADMACHO;
903 break;
904 }
905 }
906 #endif /* __arm64__ */
907 break;
908 }
909
910 if (pass == 1 && !strncmp(scp->segname, "__XHDR", sizeof(scp->segname))) {
911 found_xhdr = TRUE;
912 }
913
914 if (pass != 2) {
915 break;
916 }
917
918 if (abi64) {
919 /*
920 * Having an LC_SEGMENT command for the
921 * wrong ABI is invalid <rdar://problem/11021230>
922 */
923 ret = LOAD_BADMACHO;
924 break;
925 }
926
927 ret = load_segment(lcp,
928 header->filetype,
929 control,
930 file_offset,
931 macho_size,
932 vp,
933 map,
934 slide,
935 result);
936 if (ret == LOAD_SUCCESS && scp->fileoff == 0 && scp->filesize > 0) {
937 /* Enforce a single segment mapping offset zero, with R+X
938 * protection. */
939 if (found_header_segment ||
940 ((scp->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) != (VM_PROT_READ | VM_PROT_EXECUTE))) {
941 ret = LOAD_BADMACHO;
942 break;
943 }
944 found_header_segment = TRUE;
945 }
946
947 break;
948 }
949 case LC_SEGMENT_64: {
950 struct segment_command_64 *scp64 = (struct segment_command_64 *) lcp;
951
952 if (pass == 0) {
953 if (is_dyld && scp64->vmaddr == 0 && scp64->fileoff == 0) {
954 dyld_no_load_addr = TRUE;
955 if (!slide_realign) {
956 /* got what we need, bail early on pass 0 */
957 continue;
958 }
959 }
960 }
961
962 if (pass == 1 && !strncmp(scp64->segname, "__XHDR", sizeof(scp64->segname))) {
963 found_xhdr = TRUE;
964 }
965
966 if (pass != 2) {
967 break;
968 }
969
970 if (!abi64) {
971 /*
972 * Having an LC_SEGMENT_64 command for the
973 * wrong ABI is invalid <rdar://problem/11021230>
974 */
975 ret = LOAD_BADMACHO;
976 break;
977 }
978
979 ret = load_segment(lcp,
980 header->filetype,
981 control,
982 file_offset,
983 macho_size,
984 vp,
985 map,
986 slide,
987 result);
988
989 if (ret == LOAD_SUCCESS && scp64->fileoff == 0 && scp64->filesize > 0) {
990 /* Enforce a single segment mapping offset zero, with R+X
991 * protection. */
992 if (found_header_segment ||
993 ((scp64->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) != (VM_PROT_READ | VM_PROT_EXECUTE))) {
994 ret = LOAD_BADMACHO;
995 break;
996 }
997 found_header_segment = TRUE;
998 }
999
1000 break;
1001 }
1002 case LC_UNIXTHREAD:
1003 if (pass != 1) {
1004 break;
1005 }
1006 ret = load_unixthread(
1007 (struct thread_command *) lcp,
1008 thread,
1009 slide,
1010 result);
1011 break;
1012 case LC_MAIN:
1013 if (pass != 1) {
1014 break;
1015 }
1016 if (depth != 1) {
1017 break;
1018 }
1019 ret = load_main(
1020 (struct entry_point_command *) lcp,
1021 thread,
1022 slide,
1023 result);
1024 break;
1025 case LC_LOAD_DYLINKER:
1026 if (pass != 3) {
1027 break;
1028 }
1029 if ((depth == 1) && (dlp == 0)) {
1030 dlp = (struct dylinker_command *)lcp;
1031 dlarchbits = (header->cputype & CPU_ARCH_MASK);
1032 } else {
1033 ret = LOAD_FAILURE;
1034 }
1035 break;
1036 case LC_UUID:
1037 if (pass == 1 && depth == 1) {
1038 ret = load_uuid((struct uuid_command *) lcp,
1039 (char *)addr + cmds_size,
1040 result);
1041 }
1042 break;
1043 case LC_CODE_SIGNATURE:
1044 /* CODE SIGNING */
1045 if (pass != 1) {
1046 break;
1047 }
1048 /* pager -> uip ->
1049 * load signatures & store in uip
1050 * set VM object "signed_pages"
1051 */
1052 ret = load_code_signature(
1053 (struct linkedit_data_command *) lcp,
1054 vp,
1055 file_offset,
1056 macho_size,
1057 header->cputype,
1058 result,
1059 imgp);
1060 if (ret != LOAD_SUCCESS) {
1061 printf("proc %d: load code signature error %d "
1062 "for file \"%s\"\n",
1063 p->p_pid, ret, vp->v_name);
1064 /*
1065 * Allow injections to be ignored on devices w/o enforcement enabled
1066 */
1067 if (!cs_process_global_enforcement()) {
1068 ret = LOAD_SUCCESS; /* ignore error */
1069 }
1070 } else {
1071 got_code_signatures = TRUE;
1072 }
1073
1074 if (got_code_signatures) {
1075 unsigned tainted = CS_VALIDATE_TAINTED;
1076 boolean_t valid = FALSE;
1077 vm_size_t off = 0;
1078
1079
1080 if (cs_debug > 10) {
1081 printf("validating initial pages of %s\n", vp->v_name);
1082 }
1083
1084 while (off < alloc_size && ret == LOAD_SUCCESS) {
1085 tainted = CS_VALIDATE_TAINTED;
1086
1087 valid = cs_validate_range(vp,
1088 NULL,
1089 file_offset + off,
1090 addr + off,
1091 PAGE_SIZE,
1092 &tainted);
1093 if (!valid || (tainted & CS_VALIDATE_TAINTED)) {
1094 if (cs_debug) {
1095 printf("CODE SIGNING: %s[%d]: invalid initial page at offset %lld validated:%d tainted:%d csflags:0x%x\n",
1096 vp->v_name, p->p_pid, (long long)(file_offset + off), valid, tainted, result->csflags);
1097 }
1098 if (cs_process_global_enforcement() ||
1099 (result->csflags & (CS_HARD | CS_KILL | CS_ENFORCEMENT))) {
1100 ret = LOAD_FAILURE;
1101 }
1102 result->csflags &= ~CS_VALID;
1103 }
1104 off += PAGE_SIZE;
1105 }
1106 }
1107
1108 break;
1109 #if CONFIG_CODE_DECRYPTION
1110 case LC_ENCRYPTION_INFO:
1111 case LC_ENCRYPTION_INFO_64:
1112 if (pass != 3) {
1113 break;
1114 }
1115 ret = set_code_unprotect(
1116 (struct encryption_info_command *) lcp,
1117 addr, map, slide, vp, file_offset,
1118 header->cputype, header->cpusubtype);
1119 if (ret != LOAD_SUCCESS) {
1120 os_reason_t load_failure_reason = OS_REASON_NULL;
1121 printf("proc %d: set_code_unprotect() error %d "
1122 "for file \"%s\"\n",
1123 p->p_pid, ret, vp->v_name);
1124 /*
1125 * Don't let the app run if it's
1126 * encrypted but we failed to set up the
1127 * decrypter. If the keys are missing it will
1128 * return LOAD_DECRYPTFAIL.
1129 */
1130 if (ret == LOAD_DECRYPTFAIL) {
1131 /* failed to load due to missing FP keys */
1132 proc_lock(p);
1133 p->p_lflag |= P_LTERM_DECRYPTFAIL;
1134 proc_unlock(p);
1135
1136 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1137 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT, 0, 0);
1138 load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT);
1139 } else {
1140 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1141 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT, 0, 0);
1142 load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT);
1143 }
1144
1145 assert(load_failure_reason != OS_REASON_NULL);
1146 psignal_with_reason(p, SIGKILL, load_failure_reason);
1147 }
1148 break;
1149 #endif
1150 #if __arm64__
1151 case LC_VERSION_MIN_IPHONEOS: {
1152 struct version_min_command *vmc;
1153
1154 if (pass != 1) {
1155 break;
1156 }
1157 vmc = (struct version_min_command *) lcp;
1158 if (vmc->sdk < (12 << 16)) {
1159 /* app built with a pre-iOS12 SDK: apply legacy footprint mitigation */
1160 result->legacy_footprint = TRUE;
1161 }
1162 // printf("FBDP %s:%d vp %p (%s) sdk %d.%d.%d -> legacy_footprint=%d\n", __FUNCTION__, __LINE__, vp, vp->v_name, (vmc->sdk >> 16), ((vmc->sdk & 0xFF00) >> 8), (vmc->sdk & 0xFF), result->legacy_footprint);
1163 break;
1164 }
1165 #endif /* __arm64__ */
1166 default:
1167 /* Other commands are ignored by the kernel */
1168 ret = LOAD_SUCCESS;
1169 break;
1170 }
1171 if (ret != LOAD_SUCCESS) {
1172 break;
1173 }
1174 }
1175 if (ret != LOAD_SUCCESS) {
1176 break;
1177 }
1178 }
1179
1180 if (ret == LOAD_SUCCESS) {
1181 if (!got_code_signatures && cs_process_global_enforcement()) {
1182 ret = LOAD_FAILURE;
1183 }
1184
1185 /* Make sure if we need dyld, we got it */
1186 if (result->needs_dynlinker && !dlp) {
1187 ret = LOAD_FAILURE;
1188 }
1189
1190 if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
1191 /*
1192 * load the dylinker, and slide it by the independent DYLD ASLR
1193 * offset regardless of the PIE-ness of the main binary.
1194 */
1195 ret = load_dylinker(dlp, dlarchbits, map, thread, depth,
1196 dyld_aslr_offset, result, imgp);
1197 }
1198
1199 if ((ret == LOAD_SUCCESS) && (depth == 1)) {
1200 if (result->thread_count == 0) {
1201 ret = LOAD_FAILURE;
1202 }
1203 #if CONFIG_ENFORCE_SIGNED_CODE
1204 if (result->needs_dynlinker && !(result->csflags & CS_DYLD_PLATFORM)) {
1205 ret = LOAD_FAILURE;
1206 }
1207 #endif
1208 }
1209 }
1210
1211 if (ret == LOAD_BADMACHO && found_xhdr) {
1212 ret = LOAD_BADMACHO_UPX;
1213 }
1214
1215 kfree(addr, alloc_size);
1216
1217 return ret;
1218 }
1219
1220 #if CONFIG_CODE_DECRYPTION
1221
1222 #define APPLE_UNPROTECTED_HEADER_SIZE (3 * 4096)
1223
1224 static load_return_t
1225 unprotect_dsmos_segment(
1226 uint64_t file_off,
1227 uint64_t file_size,
1228 struct vnode *vp,
1229 off_t macho_offset,
1230 vm_map_t map,
1231 vm_map_offset_t map_addr,
1232 vm_map_size_t map_size)
1233 {
1234 kern_return_t kr;
1235
1236 /*
1237 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
1238 * this part of a Universal binary) are not protected...
1239 * The rest needs to be "transformed".
1240 */
1241 if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE &&
1242 file_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) {
1243 /* it's all unprotected, nothing to do... */
1244 kr = KERN_SUCCESS;
1245 } else {
1246 if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE) {
1247 /*
1248 * We start mapping in the unprotected area.
1249 * Skip the unprotected part...
1250 */
1251 vm_map_offset_t delta;
1252
1253 delta = APPLE_UNPROTECTED_HEADER_SIZE;
1254 delta -= file_off;
1255 map_addr += delta;
1256 map_size -= delta;
1257 }
1258 /* ... transform the rest of the mapping. */
1259 struct pager_crypt_info crypt_info;
1260 crypt_info.page_decrypt = dsmos_page_transform;
1261 crypt_info.crypt_ops = NULL;
1262 crypt_info.crypt_end = NULL;
1263 #pragma unused(vp, macho_offset)
1264 crypt_info.crypt_ops = (void *)0x2e69cf40;
1265 vm_map_offset_t crypto_backing_offset;
1266 crypto_backing_offset = -1; /* i.e. use map entry's offset */
1267 #if VM_MAP_DEBUG_APPLE_PROTECT
1268 if (vm_map_debug_apple_protect) {
1269 struct proc *p;
1270 p = current_proc();
1271 printf("APPLE_PROTECT: %d[%s] map %p "
1272 "[0x%llx:0x%llx] %s(%s)\n",
1273 p->p_pid, p->p_comm, map,
1274 (uint64_t) map_addr,
1275 (uint64_t) (map_addr + map_size),
1276 __FUNCTION__, vp->v_name);
1277 }
1278 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
1279
1280 /* The DSMOS pager can only be used by apple signed code */
1281 struct cs_blob * blob = csvnode_get_blob(vp, file_off);
1282 if (blob == NULL || !blob->csb_platform_binary || blob->csb_platform_path) {
1283 return LOAD_FAILURE;
1284 }
1285
1286 kr = vm_map_apple_protected(map,
1287 map_addr,
1288 map_addr + map_size,
1289 crypto_backing_offset,
1290 &crypt_info);
1291 }
1292
1293 if (kr != KERN_SUCCESS) {
1294 return LOAD_FAILURE;
1295 }
1296 return LOAD_SUCCESS;
1297 }
1298 #else /* CONFIG_CODE_DECRYPTION */
1299 static load_return_t
1300 unprotect_dsmos_segment(
1301 __unused uint64_t file_off,
1302 __unused uint64_t file_size,
1303 __unused struct vnode *vp,
1304 __unused off_t macho_offset,
1305 __unused vm_map_t map,
1306 __unused vm_map_offset_t map_addr,
1307 __unused vm_map_size_t map_size)
1308 {
1309 return LOAD_SUCCESS;
1310 }
1311 #endif /* CONFIG_CODE_DECRYPTION */
1312
1313
1314 /*
1315 * map_segment:
1316 * Maps a Mach-O segment, taking care of mis-alignment (wrt the system
1317 * page size) issues.
1318 *
1319 * The mapping might result in 1, 2 or 3 map entries:
1320 * 1. for the first page, which could be overlap with the previous
1321 * mapping,
1322 * 2. for the center (if applicable),
1323 * 3. for the last page, which could overlap with the next mapping.
1324 *
1325 * For each of those map entries, we might have to interpose a
1326 * "fourk_pager" to deal with mis-alignment wrt the system page size,
1327 * either in the mapping address and/or size or the file offset and/or
1328 * size.
1329 * The "fourk_pager" itself would be mapped with proper alignment
1330 * wrt the system page size and would then be populated with the
1331 * information about the intended mapping, with a "4KB" granularity.
1332 */
1333 static kern_return_t
1334 map_segment(
1335 vm_map_t map,
1336 vm_map_offset_t vm_start,
1337 vm_map_offset_t vm_end,
1338 memory_object_control_t control,
1339 vm_map_offset_t file_start,
1340 vm_map_offset_t file_end,
1341 vm_prot_t initprot,
1342 vm_prot_t maxprot,
1343 load_result_t *result)
1344 {
1345 vm_map_offset_t cur_offset, cur_start, cur_end;
1346 kern_return_t ret;
1347 vm_map_offset_t effective_page_mask;
1348 vm_map_kernel_flags_t vmk_flags, cur_vmk_flags;
1349
1350 if (vm_end < vm_start ||
1351 file_end < file_start) {
1352 return LOAD_BADMACHO;
1353 }
1354 if (vm_end == vm_start ||
1355 file_end == file_start) {
1356 /* nothing to map... */
1357 return LOAD_SUCCESS;
1358 }
1359
1360 effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
1361
1362 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1363 if (vm_map_page_aligned(vm_start, effective_page_mask) &&
1364 vm_map_page_aligned(vm_end, effective_page_mask) &&
1365 vm_map_page_aligned(file_start, effective_page_mask) &&
1366 vm_map_page_aligned(file_end, effective_page_mask)) {
1367 /* all page-aligned and map-aligned: proceed */
1368 } else {
1369 #if __arm64__
1370 /* use an intermediate "4K" pager */
1371 vmk_flags.vmkf_fourk = TRUE;
1372 #else /* __arm64__ */
1373 panic("map_segment: unexpected mis-alignment "
1374 "vm[0x%llx:0x%llx] file[0x%llx:0x%llx]\n",
1375 (uint64_t) vm_start,
1376 (uint64_t) vm_end,
1377 (uint64_t) file_start,
1378 (uint64_t) file_end);
1379 #endif /* __arm64__ */
1380 }
1381
1382 cur_offset = 0;
1383 cur_start = vm_start;
1384 cur_end = vm_start;
1385 #if __arm64__
1386 if (!vm_map_page_aligned(vm_start, effective_page_mask)) {
1387 /* one 4K pager for the 1st page */
1388 cur_end = vm_map_round_page(cur_start, effective_page_mask);
1389 if (cur_end > vm_end) {
1390 cur_end = vm_start + (file_end - file_start);
1391 }
1392 if (control != MEMORY_OBJECT_CONTROL_NULL) {
1393 ret = vm_map_enter_mem_object_control(
1394 map,
1395 &cur_start,
1396 cur_end - cur_start,
1397 (mach_vm_offset_t)0,
1398 VM_FLAGS_FIXED,
1399 vmk_flags,
1400 VM_KERN_MEMORY_NONE,
1401 control,
1402 file_start + cur_offset,
1403 TRUE, /* copy */
1404 initprot, maxprot,
1405 VM_INHERIT_DEFAULT);
1406 } else {
1407 ret = vm_map_enter_mem_object(
1408 map,
1409 &cur_start,
1410 cur_end - cur_start,
1411 (mach_vm_offset_t)0,
1412 VM_FLAGS_FIXED,
1413 vmk_flags,
1414 VM_KERN_MEMORY_NONE,
1415 IPC_PORT_NULL,
1416 0, /* offset */
1417 TRUE, /* copy */
1418 initprot, maxprot,
1419 VM_INHERIT_DEFAULT);
1420 }
1421 if (ret != KERN_SUCCESS) {
1422 return LOAD_NOSPACE;
1423 }
1424 cur_offset += cur_end - cur_start;
1425 }
1426 #endif /* __arm64__ */
1427 if (cur_end >= vm_start + (file_end - file_start)) {
1428 /* all mapped: done */
1429 goto done;
1430 }
1431 if (vm_map_round_page(cur_end, effective_page_mask) >=
1432 vm_map_trunc_page(vm_start + (file_end - file_start),
1433 effective_page_mask)) {
1434 /* no middle */
1435 } else {
1436 cur_start = cur_end;
1437 if ((vm_start & effective_page_mask) !=
1438 (file_start & effective_page_mask)) {
1439 /* one 4K pager for the middle */
1440 cur_vmk_flags = vmk_flags;
1441 } else {
1442 /* regular mapping for the middle */
1443 cur_vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1444 }
1445
1446 #if CONFIG_EMBEDDED
1447 (void) result;
1448 #else /* CONFIG_EMBEDDED */
1449 /*
1450 * This process doesn't have its new csflags (from
1451 * the image being loaded) yet, so tell VM to override the
1452 * current process's CS_ENFORCEMENT for this mapping.
1453 */
1454 if (result->csflags & CS_ENFORCEMENT) {
1455 cur_vmk_flags.vmkf_cs_enforcement = TRUE;
1456 } else {
1457 cur_vmk_flags.vmkf_cs_enforcement = FALSE;
1458 }
1459 cur_vmk_flags.vmkf_cs_enforcement_override = TRUE;
1460 #endif /* CONFIG_EMBEDDED */
1461
1462 cur_end = vm_map_trunc_page(vm_start + (file_end -
1463 file_start),
1464 effective_page_mask);
1465 if (control != MEMORY_OBJECT_CONTROL_NULL) {
1466 ret = vm_map_enter_mem_object_control(
1467 map,
1468 &cur_start,
1469 cur_end - cur_start,
1470 (mach_vm_offset_t)0,
1471 VM_FLAGS_FIXED,
1472 cur_vmk_flags,
1473 VM_KERN_MEMORY_NONE,
1474 control,
1475 file_start + cur_offset,
1476 TRUE, /* copy */
1477 initprot, maxprot,
1478 VM_INHERIT_DEFAULT);
1479 } else {
1480 ret = vm_map_enter_mem_object(
1481 map,
1482 &cur_start,
1483 cur_end - cur_start,
1484 (mach_vm_offset_t)0,
1485 VM_FLAGS_FIXED,
1486 cur_vmk_flags,
1487 VM_KERN_MEMORY_NONE,
1488 IPC_PORT_NULL,
1489 0, /* offset */
1490 TRUE, /* copy */
1491 initprot, maxprot,
1492 VM_INHERIT_DEFAULT);
1493 }
1494 if (ret != KERN_SUCCESS) {
1495 return LOAD_NOSPACE;
1496 }
1497 cur_offset += cur_end - cur_start;
1498 }
1499 if (cur_end >= vm_start + (file_end - file_start)) {
1500 /* all mapped: done */
1501 goto done;
1502 }
1503 cur_start = cur_end;
1504 #if __arm64__
1505 if (!vm_map_page_aligned(vm_start + (file_end - file_start),
1506 effective_page_mask)) {
1507 /* one 4K pager for the last page */
1508 cur_end = vm_start + (file_end - file_start);
1509 if (control != MEMORY_OBJECT_CONTROL_NULL) {
1510 ret = vm_map_enter_mem_object_control(
1511 map,
1512 &cur_start,
1513 cur_end - cur_start,
1514 (mach_vm_offset_t)0,
1515 VM_FLAGS_FIXED,
1516 vmk_flags,
1517 VM_KERN_MEMORY_NONE,
1518 control,
1519 file_start + cur_offset,
1520 TRUE, /* copy */
1521 initprot, maxprot,
1522 VM_INHERIT_DEFAULT);
1523 } else {
1524 ret = vm_map_enter_mem_object(
1525 map,
1526 &cur_start,
1527 cur_end - cur_start,
1528 (mach_vm_offset_t)0,
1529 VM_FLAGS_FIXED,
1530 vmk_flags,
1531 VM_KERN_MEMORY_NONE,
1532 IPC_PORT_NULL,
1533 0, /* offset */
1534 TRUE, /* copy */
1535 initprot, maxprot,
1536 VM_INHERIT_DEFAULT);
1537 }
1538 if (ret != KERN_SUCCESS) {
1539 return LOAD_NOSPACE;
1540 }
1541 cur_offset += cur_end - cur_start;
1542 }
1543 #endif /* __arm64__ */
1544 done:
1545 assert(cur_end >= vm_start + (file_end - file_start));
1546 return LOAD_SUCCESS;
1547 }
1548
1549 static
1550 load_return_t
1551 load_segment(
1552 struct load_command *lcp,
1553 uint32_t filetype,
1554 void * control,
1555 off_t pager_offset,
1556 off_t macho_size,
1557 struct vnode *vp,
1558 vm_map_t map,
1559 int64_t slide,
1560 load_result_t *result)
1561 {
1562 struct segment_command_64 segment_command, *scp;
1563 kern_return_t ret;
1564 vm_map_size_t delta_size;
1565 vm_prot_t initprot;
1566 vm_prot_t maxprot;
1567 size_t segment_command_size, total_section_size,
1568 single_section_size;
1569 vm_map_offset_t file_offset, file_size;
1570 vm_map_offset_t vm_offset, vm_size;
1571 vm_map_offset_t vm_start, vm_end, vm_end_aligned;
1572 vm_map_offset_t file_start, file_end;
1573 kern_return_t kr;
1574 boolean_t verbose;
1575 vm_map_size_t effective_page_size;
1576 vm_map_offset_t effective_page_mask;
1577 #if __arm64__
1578 vm_map_kernel_flags_t vmk_flags;
1579 boolean_t fourk_align;
1580 #endif /* __arm64__ */
1581
1582 effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map));
1583 effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
1584
1585 verbose = FALSE;
1586 if (LC_SEGMENT_64 == lcp->cmd) {
1587 segment_command_size = sizeof(struct segment_command_64);
1588 single_section_size = sizeof(struct section_64);
1589 #if __arm64__
1590 /* 64-bit binary: should already be 16K-aligned */
1591 fourk_align = FALSE;
1592 #endif /* __arm64__ */
1593 } else {
1594 segment_command_size = sizeof(struct segment_command);
1595 single_section_size = sizeof(struct section);
1596 #if __arm64__
1597 /* 32-bit binary: might need 4K-alignment */
1598 if (effective_page_size != FOURK_PAGE_SIZE) {
1599 /* not using 4K page size: need fourk_pager */
1600 fourk_align = TRUE;
1601 verbose = TRUE;
1602 } else {
1603 /* using 4K page size: no need for re-alignment */
1604 fourk_align = FALSE;
1605 }
1606 #endif /* __arm64__ */
1607 }
1608 if (lcp->cmdsize < segment_command_size) {
1609 return LOAD_BADMACHO;
1610 }
1611 total_section_size = lcp->cmdsize - segment_command_size;
1612
1613 if (LC_SEGMENT_64 == lcp->cmd) {
1614 scp = (struct segment_command_64 *)lcp;
1615 } else {
1616 scp = &segment_command;
1617 widen_segment_command((struct segment_command *)lcp, scp);
1618 }
1619
1620 if (verbose) {
1621 MACHO_PRINTF(("+++ load_segment %s "
1622 "vm[0x%llx:0x%llx] file[0x%llx:0x%llx] "
1623 "prot %d/%d flags 0x%x\n",
1624 scp->segname,
1625 (uint64_t)(slide + scp->vmaddr),
1626 (uint64_t)(slide + scp->vmaddr + scp->vmsize),
1627 pager_offset + scp->fileoff,
1628 pager_offset + scp->fileoff + scp->filesize,
1629 scp->initprot,
1630 scp->maxprot,
1631 scp->flags));
1632 }
1633
1634 /*
1635 * Make sure what we get from the file is really ours (as specified
1636 * by macho_size).
1637 */
1638 if (scp->fileoff + scp->filesize < scp->fileoff ||
1639 scp->fileoff + scp->filesize > (uint64_t)macho_size) {
1640 return LOAD_BADMACHO;
1641 }
1642 /*
1643 * Ensure that the number of sections specified would fit
1644 * within the load command size.
1645 */
1646 if (total_section_size / single_section_size < scp->nsects) {
1647 return LOAD_BADMACHO;
1648 }
1649 /*
1650 * Make sure the segment is page-aligned in the file.
1651 */
1652 file_offset = pager_offset + scp->fileoff; /* limited to 32 bits */
1653 file_size = scp->filesize;
1654 #if __arm64__
1655 if (fourk_align) {
1656 if ((file_offset & FOURK_PAGE_MASK) != 0) {
1657 /*
1658 * we can't mmap() it if it's not at least 4KB-aligned
1659 * in the file
1660 */
1661 return LOAD_BADMACHO;
1662 }
1663 } else
1664 #endif /* __arm64__ */
1665 if ((file_offset & PAGE_MASK_64) != 0 ||
1666 /* we can't mmap() it if it's not page-aligned in the file */
1667 (file_offset & vm_map_page_mask(map)) != 0) {
1668 /*
1669 * The 1st test would have failed if the system's page size
1670 * was what this process believe is the page size, so let's
1671 * fail here too for the sake of consistency.
1672 */
1673 return LOAD_BADMACHO;
1674 }
1675
1676 /*
1677 * If we have a code signature attached for this slice
1678 * require that the segments are within the signed part
1679 * of the file.
1680 */
1681 if (result->cs_end_offset &&
1682 result->cs_end_offset < (off_t)scp->fileoff &&
1683 result->cs_end_offset - scp->fileoff < scp->filesize) {
1684 if (cs_debug) {
1685 printf("section outside code signature\n");
1686 }
1687 return LOAD_BADMACHO;
1688 }
1689
1690 vm_offset = scp->vmaddr + slide;
1691 vm_size = scp->vmsize;
1692
1693 if (vm_size == 0) {
1694 return LOAD_SUCCESS;
1695 }
1696 if (scp->vmaddr == 0 &&
1697 file_size == 0 &&
1698 vm_size != 0 &&
1699 (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
1700 (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
1701 /*
1702 * For PIE, extend page zero rather than moving it. Extending
1703 * page zero keeps early allocations from falling predictably
1704 * between the end of page zero and the beginning of the first
1705 * slid segment.
1706 */
1707 /*
1708 * This is a "page zero" segment: it starts at address 0,
1709 * is not mapped from the binary file and is not accessible.
1710 * User-space should never be able to access that memory, so
1711 * make it completely off limits by raising the VM map's
1712 * minimum offset.
1713 */
1714 vm_end = vm_offset + vm_size;
1715 if (vm_end < vm_offset) {
1716 return LOAD_BADMACHO;
1717 }
1718 if (verbose) {
1719 MACHO_PRINTF(("++++++ load_segment: "
1720 "page_zero up to 0x%llx\n",
1721 (uint64_t) vm_end));
1722 }
1723 #if __arm64__
1724 if (fourk_align) {
1725 /* raise min_offset as much as page-alignment allows */
1726 vm_end_aligned = vm_map_trunc_page(vm_end,
1727 effective_page_mask);
1728 } else
1729 #endif /* __arm64__ */
1730 {
1731 vm_end = vm_map_round_page(vm_end,
1732 PAGE_MASK_64);
1733 vm_end_aligned = vm_end;
1734 }
1735 ret = vm_map_raise_min_offset(map,
1736 vm_end_aligned);
1737 #if __arm64__
1738 if (ret == 0 &&
1739 vm_end > vm_end_aligned) {
1740 /* use fourk_pager to map the rest of pagezero */
1741 assert(fourk_align);
1742 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1743 vmk_flags.vmkf_fourk = TRUE;
1744 ret = vm_map_enter_mem_object(
1745 map,
1746 &vm_end_aligned,
1747 vm_end - vm_end_aligned,
1748 (mach_vm_offset_t) 0, /* mask */
1749 VM_FLAGS_FIXED,
1750 vmk_flags,
1751 VM_KERN_MEMORY_NONE,
1752 IPC_PORT_NULL,
1753 0,
1754 FALSE, /* copy */
1755 (scp->initprot & VM_PROT_ALL),
1756 (scp->maxprot & VM_PROT_ALL),
1757 VM_INHERIT_DEFAULT);
1758 }
1759 #endif /* __arm64__ */
1760
1761 if (ret != KERN_SUCCESS) {
1762 return LOAD_FAILURE;
1763 }
1764 return LOAD_SUCCESS;
1765 } else {
1766 #if CONFIG_EMBEDDED
1767 /* not PAGEZERO: should not be mapped at address 0 */
1768 if (filetype != MH_DYLINKER && scp->vmaddr == 0) {
1769 return LOAD_BADMACHO;
1770 }
1771 #endif /* CONFIG_EMBEDDED */
1772 }
1773
1774 #if __arm64__
1775 if (fourk_align) {
1776 /* 4K-align */
1777 file_start = vm_map_trunc_page(file_offset,
1778 FOURK_PAGE_MASK);
1779 file_end = vm_map_round_page(file_offset + file_size,
1780 FOURK_PAGE_MASK);
1781 vm_start = vm_map_trunc_page(vm_offset,
1782 FOURK_PAGE_MASK);
1783 vm_end = vm_map_round_page(vm_offset + vm_size,
1784 FOURK_PAGE_MASK);
1785 if (!strncmp(scp->segname, "__LINKEDIT", 11) &&
1786 page_aligned(file_start) &&
1787 vm_map_page_aligned(file_start, vm_map_page_mask(map)) &&
1788 page_aligned(vm_start) &&
1789 vm_map_page_aligned(vm_start, vm_map_page_mask(map))) {
1790 /* XXX last segment: ignore mis-aligned tail */
1791 file_end = vm_map_round_page(file_end,
1792 effective_page_mask);
1793 vm_end = vm_map_round_page(vm_end,
1794 effective_page_mask);
1795 }
1796 } else
1797 #endif /* __arm64__ */
1798 {
1799 file_start = vm_map_trunc_page(file_offset,
1800 effective_page_mask);
1801 file_end = vm_map_round_page(file_offset + file_size,
1802 effective_page_mask);
1803 vm_start = vm_map_trunc_page(vm_offset,
1804 effective_page_mask);
1805 vm_end = vm_map_round_page(vm_offset + vm_size,
1806 effective_page_mask);
1807 }
1808
1809 if (vm_start < result->min_vm_addr) {
1810 result->min_vm_addr = vm_start;
1811 }
1812 if (vm_end > result->max_vm_addr) {
1813 result->max_vm_addr = vm_end;
1814 }
1815
1816 if (map == VM_MAP_NULL) {
1817 return LOAD_SUCCESS;
1818 }
1819
1820 if (vm_size > 0) {
1821 initprot = (scp->initprot) & VM_PROT_ALL;
1822 maxprot = (scp->maxprot) & VM_PROT_ALL;
1823 /*
1824 * Map a copy of the file into the address space.
1825 */
1826 if (verbose) {
1827 MACHO_PRINTF(("++++++ load_segment: "
1828 "mapping at vm [0x%llx:0x%llx] of "
1829 "file [0x%llx:0x%llx]\n",
1830 (uint64_t) vm_start,
1831 (uint64_t) vm_end,
1832 (uint64_t) file_start,
1833 (uint64_t) file_end));
1834 }
1835 ret = map_segment(map,
1836 vm_start,
1837 vm_end,
1838 control,
1839 file_start,
1840 file_end,
1841 initprot,
1842 maxprot,
1843 result);
1844 if (ret) {
1845 return LOAD_NOSPACE;
1846 }
1847
1848 #if FIXME
1849 /*
1850 * If the file didn't end on a page boundary,
1851 * we need to zero the leftover.
1852 */
1853 delta_size = map_size - scp->filesize;
1854 if (delta_size > 0) {
1855 mach_vm_offset_t tmp;
1856
1857 ret = mach_vm_allocate_kernel(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_BSD);
1858 if (ret != KERN_SUCCESS) {
1859 return LOAD_RESOURCE;
1860 }
1861
1862 if (copyout(tmp, map_addr + scp->filesize,
1863 delta_size)) {
1864 (void) mach_vm_deallocate(
1865 kernel_map, tmp, delta_size);
1866 return LOAD_FAILURE;
1867 }
1868
1869 (void) mach_vm_deallocate(kernel_map, tmp, delta_size);
1870 }
1871 #endif /* FIXME */
1872 }
1873
1874 /*
1875 * If the virtual size of the segment is greater
1876 * than the size from the file, we need to allocate
1877 * zero fill memory for the rest.
1878 */
1879 if ((vm_end - vm_start) > (file_end - file_start)) {
1880 delta_size = (vm_end - vm_start) - (file_end - file_start);
1881 } else {
1882 delta_size = 0;
1883 }
1884 if (delta_size > 0) {
1885 mach_vm_offset_t tmp;
1886
1887 tmp = vm_start + (file_end - file_start);
1888 if (verbose) {
1889 MACHO_PRINTF(("++++++ load_segment: "
1890 "delta mapping vm [0x%llx:0x%llx]\n",
1891 (uint64_t) tmp,
1892 (uint64_t) (tmp + delta_size)));
1893 }
1894 kr = map_segment(map,
1895 tmp,
1896 tmp + delta_size,
1897 MEMORY_OBJECT_CONTROL_NULL,
1898 0,
1899 delta_size,
1900 scp->initprot,
1901 scp->maxprot,
1902 result);
1903 if (kr != KERN_SUCCESS) {
1904 return LOAD_NOSPACE;
1905 }
1906 }
1907
1908 if ((scp->fileoff == 0) && (scp->filesize != 0)) {
1909 result->mach_header = vm_offset;
1910 }
1911
1912 if (scp->flags & SG_PROTECTED_VERSION_1) {
1913 ret = unprotect_dsmos_segment(file_start,
1914 file_end - file_start,
1915 vp,
1916 pager_offset,
1917 map,
1918 vm_start,
1919 vm_end - vm_start);
1920 if (ret != LOAD_SUCCESS) {
1921 return ret;
1922 }
1923 } else {
1924 ret = LOAD_SUCCESS;
1925 }
1926
1927 if (LOAD_SUCCESS == ret &&
1928 filetype == MH_DYLINKER &&
1929 result->all_image_info_addr == MACH_VM_MIN_ADDRESS) {
1930 note_all_image_info_section(scp,
1931 LC_SEGMENT_64 == lcp->cmd,
1932 single_section_size,
1933 ((const char *)lcp +
1934 segment_command_size),
1935 slide,
1936 result);
1937 }
1938
1939 if (result->entry_point != MACH_VM_MIN_ADDRESS) {
1940 if ((result->entry_point >= vm_offset) && (result->entry_point < (vm_offset + vm_size))) {
1941 if ((scp->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) == (VM_PROT_READ | VM_PROT_EXECUTE)) {
1942 result->validentry = 1;
1943 } else {
1944 /* right range but wrong protections, unset if previously validated */
1945 result->validentry = 0;
1946 }
1947 }
1948 }
1949
1950 return ret;
1951 }
1952
1953 static
1954 load_return_t
1955 load_uuid(
1956 struct uuid_command *uulp,
1957 char *command_end,
1958 load_result_t *result
1959 )
1960 {
1961 /*
1962 * We need to check the following for this command:
1963 * - The command size should be atleast the size of struct uuid_command
1964 * - The UUID part of the command should be completely within the mach-o header
1965 */
1966
1967 if ((uulp->cmdsize < sizeof(struct uuid_command)) ||
1968 (((char *)uulp + sizeof(struct uuid_command)) > command_end)) {
1969 return LOAD_BADMACHO;
1970 }
1971
1972 memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid));
1973 return LOAD_SUCCESS;
1974 }
1975
1976 static
1977 load_return_t
1978 load_main(
1979 struct entry_point_command *epc,
1980 thread_t thread,
1981 int64_t slide,
1982 load_result_t *result
1983 )
1984 {
1985 mach_vm_offset_t addr;
1986 kern_return_t ret;
1987
1988 if (epc->cmdsize < sizeof(*epc)) {
1989 return LOAD_BADMACHO;
1990 }
1991 if (result->thread_count != 0) {
1992 return LOAD_FAILURE;
1993 }
1994
1995 if (thread == THREAD_NULL) {
1996 return LOAD_SUCCESS;
1997 }
1998
1999 /*
2000 * LC_MAIN specifies stack size but not location.
2001 * Add guard page to allocation size (MAXSSIZ includes guard page).
2002 */
2003 if (epc->stacksize) {
2004 if (os_add_overflow(epc->stacksize, 4 * PAGE_SIZE, &result->user_stack_size)) {
2005 /*
2006 * We are going to immediately throw away this result, but we want
2007 * to make sure we aren't loading a dangerously close to
2008 * overflowing value, since this will have a guard page added to it
2009 * and be rounded to page boundaries
2010 */
2011 return LOAD_BADMACHO;
2012 }
2013 result->user_stack_size = epc->stacksize;
2014 if (os_add_overflow(epc->stacksize, PAGE_SIZE, &result->user_stack_alloc_size)) {
2015 return LOAD_BADMACHO;
2016 }
2017 result->custom_stack = TRUE;
2018 } else {
2019 result->user_stack_alloc_size = MAXSSIZ;
2020 }
2021
2022 /* use default location for stack */
2023 ret = thread_userstackdefault(&addr, result->is_64bit_addr);
2024 if (ret != KERN_SUCCESS) {
2025 return LOAD_FAILURE;
2026 }
2027
2028 /* The stack slides down from the default location */
2029 result->user_stack = addr;
2030 result->user_stack -= slide;
2031
2032 if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2033 /* Already processed LC_MAIN or LC_UNIXTHREAD */
2034 return LOAD_FAILURE;
2035 }
2036
2037 /* kernel does *not* use entryoff from LC_MAIN. Dyld uses it. */
2038 result->needs_dynlinker = TRUE;
2039 result->using_lcmain = TRUE;
2040
2041 ret = thread_state_initialize( thread );
2042 if (ret != KERN_SUCCESS) {
2043 return LOAD_FAILURE;
2044 }
2045
2046 result->unixproc = TRUE;
2047 result->thread_count++;
2048
2049 return LOAD_SUCCESS;
2050 }
2051
2052
2053 static
2054 load_return_t
2055 load_unixthread(
2056 struct thread_command *tcp,
2057 thread_t thread,
2058 int64_t slide,
2059 load_result_t *result
2060 )
2061 {
2062 load_return_t ret;
2063 int customstack = 0;
2064 mach_vm_offset_t addr;
2065 if (tcp->cmdsize < sizeof(*tcp)) {
2066 return LOAD_BADMACHO;
2067 }
2068 if (result->thread_count != 0) {
2069 return LOAD_FAILURE;
2070 }
2071
2072 if (thread == THREAD_NULL) {
2073 return LOAD_SUCCESS;
2074 }
2075
2076 ret = load_threadstack(thread,
2077 (uint32_t *)(((vm_offset_t)tcp) +
2078 sizeof(struct thread_command)),
2079 tcp->cmdsize - sizeof(struct thread_command),
2080 &addr, &customstack, result);
2081 if (ret != LOAD_SUCCESS) {
2082 return ret;
2083 }
2084
2085 /* LC_UNIXTHREAD optionally specifies stack size and location */
2086
2087 if (customstack) {
2088 result->custom_stack = TRUE;
2089 } else {
2090 result->user_stack_alloc_size = MAXSSIZ;
2091 }
2092
2093 /* The stack slides down from the default location */
2094 result->user_stack = addr;
2095 result->user_stack -= slide;
2096
2097 ret = load_threadentry(thread,
2098 (uint32_t *)(((vm_offset_t)tcp) +
2099 sizeof(struct thread_command)),
2100 tcp->cmdsize - sizeof(struct thread_command),
2101 &addr);
2102 if (ret != LOAD_SUCCESS) {
2103 return ret;
2104 }
2105
2106 if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2107 /* Already processed LC_MAIN or LC_UNIXTHREAD */
2108 return LOAD_FAILURE;
2109 }
2110
2111 result->entry_point = addr;
2112 result->entry_point += slide;
2113
2114 ret = load_threadstate(thread,
2115 (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)),
2116 tcp->cmdsize - sizeof(struct thread_command),
2117 result);
2118 if (ret != LOAD_SUCCESS) {
2119 return ret;
2120 }
2121
2122 result->unixproc = TRUE;
2123 result->thread_count++;
2124
2125 return LOAD_SUCCESS;
2126 }
2127
2128 static
2129 load_return_t
2130 load_threadstate(
2131 thread_t thread,
2132 uint32_t *ts,
2133 uint32_t total_size,
2134 load_result_t *result
2135 )
2136 {
2137 uint32_t size;
2138 int flavor;
2139 uint32_t thread_size;
2140 uint32_t *local_ts = NULL;
2141 uint32_t local_ts_size = 0;
2142 int ret;
2143
2144 (void)thread;
2145
2146 if (total_size > 0) {
2147 local_ts_size = total_size;
2148 local_ts = kalloc(local_ts_size);
2149 if (local_ts == NULL) {
2150 return LOAD_FAILURE;
2151 }
2152 memcpy(local_ts, ts, local_ts_size);
2153 ts = local_ts;
2154 }
2155
2156 /*
2157 * Validate the new thread state; iterate through the state flavors in
2158 * the Mach-O file.
2159 * XXX: we should validate the machine state here, to avoid failing at
2160 * activation time where we can't bail out cleanly.
2161 */
2162 while (total_size > 0) {
2163 flavor = *ts++;
2164 size = *ts++;
2165
2166 if (os_add_and_mul_overflow(size, 2, sizeof(uint32_t), &thread_size) ||
2167 os_sub_overflow(total_size, thread_size, &total_size)) {
2168 ret = LOAD_BADMACHO;
2169 goto bad;
2170 }
2171
2172 ts += size; /* ts is a (uint32_t *) */
2173 }
2174
2175 result->threadstate = local_ts;
2176 result->threadstate_sz = local_ts_size;
2177 return LOAD_SUCCESS;
2178
2179 bad:
2180 if (local_ts) {
2181 kfree(local_ts, local_ts_size);
2182 }
2183 return ret;
2184 }
2185
2186 static
2187 load_return_t
2188 load_threadstack(
2189 thread_t thread,
2190 uint32_t *ts,
2191 uint32_t total_size,
2192 mach_vm_offset_t *user_stack,
2193 int *customstack,
2194 load_result_t *result
2195 )
2196 {
2197 kern_return_t ret;
2198 uint32_t size;
2199 int flavor;
2200 uint32_t stack_size;
2201
2202 while (total_size > 0) {
2203 flavor = *ts++;
2204 size = *ts++;
2205 if (UINT32_MAX - 2 < size ||
2206 UINT32_MAX / sizeof(uint32_t) < size + 2) {
2207 return LOAD_BADMACHO;
2208 }
2209 stack_size = (size + 2) * sizeof(uint32_t);
2210 if (stack_size > total_size) {
2211 return LOAD_BADMACHO;
2212 }
2213 total_size -= stack_size;
2214
2215 /*
2216 * Third argument is a kernel space pointer; it gets cast
2217 * to the appropriate type in thread_userstack() based on
2218 * the value of flavor.
2219 */
2220 ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is_64bit_data);
2221 if (ret != KERN_SUCCESS) {
2222 return LOAD_FAILURE;
2223 }
2224 ts += size; /* ts is a (uint32_t *) */
2225 }
2226 return LOAD_SUCCESS;
2227 }
2228
2229 static
2230 load_return_t
2231 load_threadentry(
2232 thread_t thread,
2233 uint32_t *ts,
2234 uint32_t total_size,
2235 mach_vm_offset_t *entry_point
2236 )
2237 {
2238 kern_return_t ret;
2239 uint32_t size;
2240 int flavor;
2241 uint32_t entry_size;
2242
2243 /*
2244 * Set the thread state.
2245 */
2246 *entry_point = MACH_VM_MIN_ADDRESS;
2247 while (total_size > 0) {
2248 flavor = *ts++;
2249 size = *ts++;
2250 if (UINT32_MAX - 2 < size ||
2251 UINT32_MAX / sizeof(uint32_t) < size + 2) {
2252 return LOAD_BADMACHO;
2253 }
2254 entry_size = (size + 2) * sizeof(uint32_t);
2255 if (entry_size > total_size) {
2256 return LOAD_BADMACHO;
2257 }
2258 total_size -= entry_size;
2259 /*
2260 * Third argument is a kernel space pointer; it gets cast
2261 * to the appropriate type in thread_entrypoint() based on
2262 * the value of flavor.
2263 */
2264 ret = thread_entrypoint(thread, flavor, (thread_state_t)ts, size, entry_point);
2265 if (ret != KERN_SUCCESS) {
2266 return LOAD_FAILURE;
2267 }
2268 ts += size; /* ts is a (uint32_t *) */
2269 }
2270 return LOAD_SUCCESS;
2271 }
2272
2273 struct macho_data {
2274 struct nameidata __nid;
2275 union macho_vnode_header {
2276 struct mach_header mach_header;
2277 struct fat_header fat_header;
2278 char __pad[512];
2279 } __header;
2280 };
2281
2282 #define DEFAULT_DYLD_PATH "/usr/lib/dyld"
2283
2284 #if (DEVELOPMENT || DEBUG)
2285 extern char dyld_alt_path[];
2286 extern int use_alt_dyld;
2287 #endif
2288
2289 static load_return_t
2290 load_dylinker(
2291 struct dylinker_command *lcp,
2292 integer_t archbits,
2293 vm_map_t map,
2294 thread_t thread,
2295 int depth,
2296 int64_t slide,
2297 load_result_t *result,
2298 struct image_params *imgp
2299 )
2300 {
2301 const char *name;
2302 struct vnode *vp = NULLVP; /* set by get_macho_vnode() */
2303 struct mach_header *header;
2304 off_t file_offset = 0; /* set by get_macho_vnode() */
2305 off_t macho_size = 0; /* set by get_macho_vnode() */
2306 load_result_t *myresult;
2307 kern_return_t ret;
2308 struct macho_data *macho_data;
2309 struct {
2310 struct mach_header __header;
2311 load_result_t __myresult;
2312 struct macho_data __macho_data;
2313 } *dyld_data;
2314
2315 if (lcp->cmdsize < sizeof(*lcp) || lcp->name.offset >= lcp->cmdsize) {
2316 return LOAD_BADMACHO;
2317 }
2318
2319 name = (const char *)lcp + lcp->name.offset;
2320
2321 /* Check for a proper null terminated string. */
2322 size_t maxsz = lcp->cmdsize - lcp->name.offset;
2323 size_t namelen = strnlen(name, maxsz);
2324 if (namelen >= maxsz) {
2325 return LOAD_BADMACHO;
2326 }
2327
2328 #if (DEVELOPMENT || DEBUG)
2329
2330 /*
2331 * rdar://23680808
2332 * If an alternate dyld has been specified via boot args, check
2333 * to see if PROC_UUID_ALT_DYLD_POLICY has been set on this
2334 * executable and redirect the kernel to load that linker.
2335 */
2336
2337 if (use_alt_dyld) {
2338 int policy_error;
2339 uint32_t policy_flags = 0;
2340 int32_t policy_gencount = 0;
2341
2342 policy_error = proc_uuid_policy_lookup(result->uuid, &policy_flags, &policy_gencount);
2343 if (policy_error == 0) {
2344 if (policy_flags & PROC_UUID_ALT_DYLD_POLICY) {
2345 name = dyld_alt_path;
2346 }
2347 }
2348 }
2349 #endif
2350
2351 #if !(DEVELOPMENT || DEBUG)
2352 if (0 != strcmp(name, DEFAULT_DYLD_PATH)) {
2353 return LOAD_BADMACHO;
2354 }
2355 #endif
2356
2357 /* Allocate wad-of-data from heap to reduce excessively deep stacks */
2358
2359 MALLOC(dyld_data, void *, sizeof(*dyld_data), M_TEMP, M_WAITOK);
2360 header = &dyld_data->__header;
2361 myresult = &dyld_data->__myresult;
2362 macho_data = &dyld_data->__macho_data;
2363
2364 ret = get_macho_vnode(name, archbits, header,
2365 &file_offset, &macho_size, macho_data, &vp);
2366 if (ret) {
2367 goto novp_out;
2368 }
2369
2370 *myresult = load_result_null;
2371 myresult->is_64bit_addr = result->is_64bit_addr;
2372 myresult->is_64bit_data = result->is_64bit_data;
2373
2374 ret = parse_machfile(vp, map, thread, header, file_offset,
2375 macho_size, depth, slide, 0, myresult, result, imgp);
2376
2377 if (ret == LOAD_SUCCESS) {
2378 if (result->threadstate) {
2379 /* don't use the app's threadstate if we have a dyld */
2380 kfree(result->threadstate, result->threadstate_sz);
2381 }
2382 result->threadstate = myresult->threadstate;
2383 result->threadstate_sz = myresult->threadstate_sz;
2384
2385 result->dynlinker = TRUE;
2386 result->entry_point = myresult->entry_point;
2387 result->validentry = myresult->validentry;
2388 result->all_image_info_addr = myresult->all_image_info_addr;
2389 result->all_image_info_size = myresult->all_image_info_size;
2390 if (myresult->platform_binary) {
2391 result->csflags |= CS_DYLD_PLATFORM;
2392 }
2393 }
2394
2395 struct vnode_attr va;
2396 VATTR_INIT(&va);
2397 VATTR_WANTED(&va, va_fsid64);
2398 VATTR_WANTED(&va, va_fsid);
2399 VATTR_WANTED(&va, va_fileid);
2400 int error = vnode_getattr(vp, &va, imgp->ip_vfs_context);
2401 if (error == 0) {
2402 imgp->ip_dyld_fsid = vnode_get_va_fsid(&va);
2403 imgp->ip_dyld_fsobjid = va.va_fileid;
2404 }
2405
2406 vnode_put(vp);
2407 novp_out:
2408 FREE(dyld_data, M_TEMP);
2409 return ret;
2410 }
2411
2412 static load_return_t
2413 load_code_signature(
2414 struct linkedit_data_command *lcp,
2415 struct vnode *vp,
2416 off_t macho_offset,
2417 off_t macho_size,
2418 cpu_type_t cputype,
2419 load_result_t *result,
2420 struct image_params *imgp)
2421 {
2422 int ret;
2423 kern_return_t kr;
2424 vm_offset_t addr;
2425 int resid;
2426 struct cs_blob *blob;
2427 int error;
2428 vm_size_t blob_size;
2429
2430 addr = 0;
2431 blob = NULL;
2432
2433 if (lcp->cmdsize != sizeof(struct linkedit_data_command) ||
2434 lcp->dataoff + lcp->datasize > macho_size) {
2435 ret = LOAD_BADMACHO;
2436 goto out;
2437 }
2438
2439 blob = ubc_cs_blob_get(vp, cputype, macho_offset);
2440
2441 if (blob != NULL) {
2442 /* we already have a blob for this vnode and cputype */
2443 if (blob->csb_cpu_type != cputype ||
2444 blob->csb_base_offset != macho_offset) {
2445 /* the blob has changed for this vnode: fail ! */
2446 ret = LOAD_BADMACHO;
2447 goto out;
2448 }
2449
2450 /* It matches the blob we want here, let's verify the version */
2451 if (ubc_cs_generation_check(vp) == 0) {
2452 /* No need to revalidate, we're good! */
2453 ret = LOAD_SUCCESS;
2454 goto out;
2455 }
2456
2457 /* That blob may be stale, let's revalidate. */
2458 error = ubc_cs_blob_revalidate(vp, blob, imgp, 0);
2459 if (error == 0) {
2460 /* Revalidation succeeded, we're good! */
2461 ret = LOAD_SUCCESS;
2462 goto out;
2463 }
2464
2465 if (error != EAGAIN) {
2466 printf("load_code_signature: revalidation failed: %d\n", error);
2467 ret = LOAD_FAILURE;
2468 goto out;
2469 }
2470
2471 assert(error == EAGAIN);
2472
2473 /*
2474 * Revalidation was not possible for this blob. We just continue as if there was no blob,
2475 * rereading the signature, and ubc_cs_blob_add will do the right thing.
2476 */
2477 blob = NULL;
2478 }
2479
2480 blob_size = lcp->datasize;
2481 kr = ubc_cs_blob_allocate(&addr, &blob_size);
2482 if (kr != KERN_SUCCESS) {
2483 ret = LOAD_NOSPACE;
2484 goto out;
2485 }
2486
2487 resid = 0;
2488 error = vn_rdwr(UIO_READ,
2489 vp,
2490 (caddr_t) addr,
2491 lcp->datasize,
2492 macho_offset + lcp->dataoff,
2493 UIO_SYSSPACE,
2494 0,
2495 kauth_cred_get(),
2496 &resid,
2497 current_proc());
2498 if (error || resid != 0) {
2499 ret = LOAD_IOERROR;
2500 goto out;
2501 }
2502
2503 if (ubc_cs_blob_add(vp,
2504 cputype,
2505 macho_offset,
2506 &addr,
2507 lcp->datasize,
2508 imgp,
2509 0,
2510 &blob)) {
2511 if (addr) {
2512 ubc_cs_blob_deallocate(addr, blob_size);
2513 }
2514 ret = LOAD_FAILURE;
2515 goto out;
2516 } else {
2517 /* ubc_cs_blob_add() has consumed "addr" */
2518 addr = 0;
2519 }
2520
2521 #if CHECK_CS_VALIDATION_BITMAP
2522 ubc_cs_validation_bitmap_allocate( vp );
2523 #endif
2524
2525 ret = LOAD_SUCCESS;
2526 out:
2527 if (ret == LOAD_SUCCESS) {
2528 if (blob == NULL) {
2529 panic("success, but no blob!");
2530 }
2531
2532 result->csflags |= blob->csb_flags;
2533 result->platform_binary = blob->csb_platform_binary;
2534 result->cs_end_offset = blob->csb_end_offset;
2535 }
2536 if (addr != 0) {
2537 ubc_cs_blob_deallocate(addr, blob_size);
2538 addr = 0;
2539 }
2540
2541 return ret;
2542 }
2543
2544
2545 #if CONFIG_CODE_DECRYPTION
2546
2547 static load_return_t
2548 set_code_unprotect(
2549 struct encryption_info_command *eip,
2550 caddr_t addr,
2551 vm_map_t map,
2552 int64_t slide,
2553 struct vnode *vp,
2554 off_t macho_offset,
2555 cpu_type_t cputype,
2556 cpu_subtype_t cpusubtype)
2557 {
2558 int error, len;
2559 pager_crypt_info_t crypt_info;
2560 const char * cryptname = 0;
2561 char *vpath;
2562
2563 size_t offset;
2564 struct segment_command_64 *seg64;
2565 struct segment_command *seg32;
2566 vm_map_offset_t map_offset, map_size;
2567 vm_object_offset_t crypto_backing_offset;
2568 kern_return_t kr;
2569
2570 if (eip->cmdsize < sizeof(*eip)) {
2571 return LOAD_BADMACHO;
2572 }
2573
2574 switch (eip->cryptid) {
2575 case 0:
2576 /* not encrypted, just an empty load command */
2577 return LOAD_SUCCESS;
2578 case 1:
2579 cryptname = "com.apple.unfree";
2580 break;
2581 case 0x10:
2582 /* some random cryptid that you could manually put into
2583 * your binary if you want NULL */
2584 cryptname = "com.apple.null";
2585 break;
2586 default:
2587 return LOAD_BADMACHO;
2588 }
2589
2590 if (map == VM_MAP_NULL) {
2591 return LOAD_SUCCESS;
2592 }
2593 if (NULL == text_crypter_create) {
2594 return LOAD_FAILURE;
2595 }
2596
2597 MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
2598 if (vpath == NULL) {
2599 return LOAD_FAILURE;
2600 }
2601
2602 len = MAXPATHLEN;
2603 error = vn_getpath(vp, vpath, &len);
2604 if (error) {
2605 FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
2606 return LOAD_FAILURE;
2607 }
2608
2609 /* set up decrypter first */
2610 crypt_file_data_t crypt_data = {
2611 .filename = vpath,
2612 .cputype = cputype,
2613 .cpusubtype = cpusubtype
2614 };
2615 kr = text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data);
2616 #if VM_MAP_DEBUG_APPLE_PROTECT
2617 if (vm_map_debug_apple_protect) {
2618 struct proc *p;
2619 p = current_proc();
2620 printf("APPLE_PROTECT: %d[%s] map %p %s(%s) -> 0x%x\n",
2621 p->p_pid, p->p_comm, map, __FUNCTION__, vpath, kr);
2622 }
2623 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
2624 FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
2625
2626 if (kr) {
2627 printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n",
2628 cryptname, kr);
2629 if (kr == kIOReturnNotPrivileged) {
2630 /* text encryption returned decryption failure */
2631 return LOAD_DECRYPTFAIL;
2632 } else {
2633 return LOAD_RESOURCE;
2634 }
2635 }
2636
2637 /* this is terrible, but we have to rescan the load commands to find the
2638 * virtual address of this encrypted stuff. This code is gonna look like
2639 * the dyld source one day... */
2640 struct mach_header *header = (struct mach_header *)addr;
2641 size_t mach_header_sz = sizeof(struct mach_header);
2642 if (header->magic == MH_MAGIC_64 ||
2643 header->magic == MH_CIGAM_64) {
2644 mach_header_sz = sizeof(struct mach_header_64);
2645 }
2646 offset = mach_header_sz;
2647 uint32_t ncmds = header->ncmds;
2648 while (ncmds--) {
2649 /*
2650 * Get a pointer to the command.
2651 */
2652 struct load_command *lcp = (struct load_command *)(addr + offset);
2653 offset += lcp->cmdsize;
2654
2655 switch (lcp->cmd) {
2656 case LC_SEGMENT_64:
2657 seg64 = (struct segment_command_64 *)lcp;
2658 if ((seg64->fileoff <= eip->cryptoff) &&
2659 (seg64->fileoff + seg64->filesize >=
2660 eip->cryptoff + eip->cryptsize)) {
2661 map_offset = seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide;
2662 map_size = eip->cryptsize;
2663 crypto_backing_offset = macho_offset + eip->cryptoff;
2664 goto remap_now;
2665 }
2666 case LC_SEGMENT:
2667 seg32 = (struct segment_command *)lcp;
2668 if ((seg32->fileoff <= eip->cryptoff) &&
2669 (seg32->fileoff + seg32->filesize >=
2670 eip->cryptoff + eip->cryptsize)) {
2671 map_offset = seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide;
2672 map_size = eip->cryptsize;
2673 crypto_backing_offset = macho_offset + eip->cryptoff;
2674 goto remap_now;
2675 }
2676 }
2677 }
2678
2679 /* if we get here, did not find anything */
2680 return LOAD_BADMACHO;
2681
2682 remap_now:
2683 /* now remap using the decrypter */
2684 MACHO_PRINTF(("+++ set_code_unprotect: vm[0x%llx:0x%llx]\n",
2685 (uint64_t) map_offset,
2686 (uint64_t) (map_offset + map_size)));
2687 kr = vm_map_apple_protected(map,
2688 map_offset,
2689 map_offset + map_size,
2690 crypto_backing_offset,
2691 &crypt_info);
2692 if (kr) {
2693 printf("set_code_unprotect(): mapping failed with %x\n", kr);
2694 return LOAD_PROTECT;
2695 }
2696
2697 return LOAD_SUCCESS;
2698 }
2699
2700 #endif
2701
2702 /*
2703 * This routine exists to support the load_dylinker().
2704 *
2705 * This routine has its own, separate, understanding of the FAT file format,
2706 * which is terrifically unfortunate.
2707 */
2708 static
2709 load_return_t
2710 get_macho_vnode(
2711 const char *path,
2712 integer_t archbits,
2713 struct mach_header *mach_header,
2714 off_t *file_offset,
2715 off_t *macho_size,
2716 struct macho_data *data,
2717 struct vnode **vpp
2718 )
2719 {
2720 struct vnode *vp;
2721 vfs_context_t ctx = vfs_context_current();
2722 proc_t p = vfs_context_proc(ctx);
2723 kauth_cred_t kerncred;
2724 struct nameidata *ndp = &data->__nid;
2725 boolean_t is_fat;
2726 struct fat_arch fat_arch;
2727 int error;
2728 int resid;
2729 union macho_vnode_header *header = &data->__header;
2730 off_t fsize = (off_t)0;
2731
2732 /*
2733 * Capture the kernel credential for use in the actual read of the
2734 * file, since the user doing the execution may have execute rights
2735 * but not read rights, but to exec something, we have to either map
2736 * or read it into the new process address space, which requires
2737 * read rights. This is to deal with lack of common credential
2738 * serialization code which would treat NOCRED as "serialize 'root'".
2739 */
2740 kerncred = vfs_context_ucred(vfs_context_kernel());
2741
2742 /* init the namei data to point the file user's program name */
2743 NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
2744
2745 if ((error = namei(ndp)) != 0) {
2746 if (error == ENOENT) {
2747 error = LOAD_ENOENT;
2748 } else {
2749 error = LOAD_FAILURE;
2750 }
2751 return error;
2752 }
2753 nameidone(ndp);
2754 vp = ndp->ni_vp;
2755
2756 /* check for regular file */
2757 if (vp->v_type != VREG) {
2758 error = LOAD_PROTECT;
2759 goto bad1;
2760 }
2761
2762 /* get size */
2763 if ((error = vnode_size(vp, &fsize, ctx)) != 0) {
2764 error = LOAD_FAILURE;
2765 goto bad1;
2766 }
2767
2768 /* Check mount point */
2769 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
2770 error = LOAD_PROTECT;
2771 goto bad1;
2772 }
2773
2774 /* check access */
2775 if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE | KAUTH_VNODE_READ_DATA, ctx)) != 0) {
2776 error = LOAD_PROTECT;
2777 goto bad1;
2778 }
2779
2780 /* try to open it */
2781 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
2782 error = LOAD_PROTECT;
2783 goto bad1;
2784 }
2785
2786 if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)header, sizeof(*header), 0,
2787 UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p)) != 0) {
2788 error = LOAD_IOERROR;
2789 goto bad2;
2790 }
2791
2792 if (resid) {
2793 error = LOAD_BADMACHO;
2794 goto bad2;
2795 }
2796
2797 if (header->mach_header.magic == MH_MAGIC ||
2798 header->mach_header.magic == MH_MAGIC_64) {
2799 is_fat = FALSE;
2800 } else if (OSSwapBigToHostInt32(header->fat_header.magic) == FAT_MAGIC) {
2801 is_fat = TRUE;
2802 } else {
2803 error = LOAD_BADMACHO;
2804 goto bad2;
2805 }
2806
2807 if (is_fat) {
2808 error = fatfile_validate_fatarches((vm_offset_t)(&header->fat_header),
2809 sizeof(*header));
2810 if (error != LOAD_SUCCESS) {
2811 goto bad2;
2812 }
2813
2814 /* Look up our architecture in the fat file. */
2815 error = fatfile_getarch_with_bits(archbits,
2816 (vm_offset_t)(&header->fat_header), sizeof(*header), &fat_arch);
2817 if (error != LOAD_SUCCESS) {
2818 goto bad2;
2819 }
2820
2821 /* Read the Mach-O header out of it */
2822 error = vn_rdwr(UIO_READ, vp, (caddr_t)&header->mach_header,
2823 sizeof(header->mach_header), fat_arch.offset,
2824 UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p);
2825 if (error) {
2826 error = LOAD_IOERROR;
2827 goto bad2;
2828 }
2829
2830 if (resid) {
2831 error = LOAD_BADMACHO;
2832 goto bad2;
2833 }
2834
2835 /* Is this really a Mach-O? */
2836 if (header->mach_header.magic != MH_MAGIC &&
2837 header->mach_header.magic != MH_MAGIC_64) {
2838 error = LOAD_BADMACHO;
2839 goto bad2;
2840 }
2841
2842 *file_offset = fat_arch.offset;
2843 *macho_size = fat_arch.size;
2844 } else {
2845 /*
2846 * Force get_macho_vnode() to fail if the architecture bits
2847 * do not match the expected architecture bits. This in
2848 * turn causes load_dylinker() to fail for the same reason,
2849 * so it ensures the dynamic linker and the binary are in
2850 * lock-step. This is potentially bad, if we ever add to
2851 * the CPU_ARCH_* bits any bits that are desirable but not
2852 * required, since the dynamic linker might work, but we will
2853 * refuse to load it because of this check.
2854 */
2855 if ((cpu_type_t)(header->mach_header.cputype & CPU_ARCH_MASK) != archbits) {
2856 error = LOAD_BADARCH;
2857 goto bad2;
2858 }
2859
2860 *file_offset = 0;
2861 *macho_size = fsize;
2862 }
2863
2864 *mach_header = header->mach_header;
2865 *vpp = vp;
2866
2867 ubc_setsize(vp, fsize);
2868 return error;
2869
2870 bad2:
2871 (void) VNOP_CLOSE(vp, FREAD, ctx);
2872 bad1:
2873 vnode_put(vp);
2874 return error;
2875 }