]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/mach_loader.c
xnu-3247.10.11.tar.gz
[apple/xnu.git] / bsd / kern / mach_loader.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (C) 1988, 1989, NeXT, Inc.
30 *
31 * File: kern/mach_loader.c
32 * Author: Avadis Tevanian, Jr.
33 *
34 * Mach object file loader (kernel version, for now).
35 *
36 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
37 * Started.
38 */
39
40 #include <sys/param.h>
41 #include <sys/vnode_internal.h>
42 #include <sys/uio.h>
43 #include <sys/namei.h>
44 #include <sys/proc_internal.h>
45 #include <sys/kauth.h>
46 #include <sys/stat.h>
47 #include <sys/malloc.h>
48 #include <sys/mount_internal.h>
49 #include <sys/fcntl.h>
50 #include <sys/ubc_internal.h>
51 #include <sys/imgact.h>
52 #include <sys/codesign.h>
53
54 #include <mach/mach_types.h>
55 #include <mach/vm_map.h> /* vm_allocate() */
56 #include <mach/mach_vm.h> /* mach_vm_allocate() */
57 #include <mach/vm_statistics.h>
58 #include <mach/task.h>
59 #include <mach/thread_act.h>
60
61 #include <machine/vmparam.h>
62 #include <machine/exec.h>
63 #include <machine/pal_routines.h>
64
65 #include <kern/kern_types.h>
66 #include <kern/cpu_number.h>
67 #include <kern/mach_loader.h>
68 #include <kern/mach_fat.h>
69 #include <kern/kalloc.h>
70 #include <kern/task.h>
71 #include <kern/thread.h>
72 #include <kern/page_decrypt.h>
73
74 #include <mach-o/fat.h>
75 #include <mach-o/loader.h>
76
77 #include <vm/pmap.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_pager.h>
81 #include <vm/vnode_pager.h>
82 #include <vm/vm_protos.h>
83 #include <IOKit/IOReturn.h> /* for kIOReturnNotPrivileged */
84
85 /*
86 * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
87 * when KERNEL is defined.
88 */
89 extern pmap_t pmap_create(ledger_t ledger, vm_map_size_t size,
90 boolean_t is_64bit);
91
92 extern kern_return_t machine_thread_neon_state_initialize(thread_t thread);
93
94 /* XXX should have prototypes in a shared header file */
95 extern int get_map_nentries(vm_map_t);
96
97 extern kern_return_t memory_object_signed(memory_object_control_t control,
98 boolean_t is_signed);
99
100 /* An empty load_result_t */
101 static load_result_t load_result_null = {
102 .mach_header = MACH_VM_MIN_ADDRESS,
103 .entry_point = MACH_VM_MIN_ADDRESS,
104 .user_stack = MACH_VM_MIN_ADDRESS,
105 .user_stack_size = 0,
106 .all_image_info_addr = MACH_VM_MIN_ADDRESS,
107 .all_image_info_size = 0,
108 .thread_count = 0,
109 .unixproc = 0,
110 .dynlinker = 0,
111 .needs_dynlinker = 0,
112 .prog_allocated_stack = 0,
113 .prog_stack_size = 0,
114 .validentry = 0,
115 .using_lcmain = 0,
116 .csflags = 0,
117 .has_pagezero = 0,
118 .uuid = { 0 },
119 .min_vm_addr = MACH_VM_MAX_ADDRESS,
120 .max_vm_addr = MACH_VM_MIN_ADDRESS,
121 .cs_end_offset = 0
122 };
123
124 /*
125 * Prototypes of static functions.
126 */
127 static load_return_t
128 parse_machfile(
129 struct vnode *vp,
130 vm_map_t map,
131 thread_t thread,
132 struct mach_header *header,
133 off_t file_offset,
134 off_t macho_size,
135 int depth,
136 int64_t slide,
137 int64_t dyld_slide,
138 load_result_t *result
139 );
140
141 static load_return_t
142 load_segment(
143 struct load_command *lcp,
144 uint32_t filetype,
145 void *control,
146 off_t pager_offset,
147 off_t macho_size,
148 struct vnode *vp,
149 vm_map_t map,
150 int64_t slide,
151 load_result_t *result
152 );
153
154 static load_return_t
155 load_uuid(
156 struct uuid_command *uulp,
157 char *command_end,
158 load_result_t *result
159 );
160
161 static load_return_t
162 load_code_signature(
163 struct linkedit_data_command *lcp,
164 struct vnode *vp,
165 off_t macho_offset,
166 off_t macho_size,
167 cpu_type_t cputype,
168 load_result_t *result);
169
170 #if CONFIG_CODE_DECRYPTION
171 static load_return_t
172 set_code_unprotect(
173 struct encryption_info_command *lcp,
174 caddr_t addr,
175 vm_map_t map,
176 int64_t slide,
177 struct vnode *vp,
178 off_t macho_offset,
179 cpu_type_t cputype,
180 cpu_subtype_t cpusubtype);
181 #endif
182
183 static
184 load_return_t
185 load_main(
186 struct entry_point_command *epc,
187 thread_t thread,
188 int64_t slide,
189 load_result_t *result
190 );
191
192 static load_return_t
193 load_unixthread(
194 struct thread_command *tcp,
195 thread_t thread,
196 int64_t slide,
197 load_result_t *result
198 );
199
200 static load_return_t
201 load_threadstate(
202 thread_t thread,
203 uint32_t *ts,
204 uint32_t total_size
205 );
206
207 static load_return_t
208 load_threadstack(
209 thread_t thread,
210 uint32_t *ts,
211 uint32_t total_size,
212 mach_vm_offset_t *user_stack,
213 int *customstack
214 );
215
216 static load_return_t
217 load_threadentry(
218 thread_t thread,
219 uint32_t *ts,
220 uint32_t total_size,
221 mach_vm_offset_t *entry_point
222 );
223
224 static load_return_t
225 load_dylinker(
226 struct dylinker_command *lcp,
227 integer_t archbits,
228 vm_map_t map,
229 thread_t thread,
230 int depth,
231 int64_t slide,
232 load_result_t *result
233 );
234
235 struct macho_data;
236
237 static load_return_t
238 get_macho_vnode(
239 char *path,
240 integer_t archbits,
241 struct mach_header *mach_header,
242 off_t *file_offset,
243 off_t *macho_size,
244 struct macho_data *macho_data,
245 struct vnode **vpp
246 );
247
248 static inline void
249 widen_segment_command(const struct segment_command *scp32,
250 struct segment_command_64 *scp)
251 {
252 scp->cmd = scp32->cmd;
253 scp->cmdsize = scp32->cmdsize;
254 bcopy(scp32->segname, scp->segname, sizeof(scp->segname));
255 scp->vmaddr = scp32->vmaddr;
256 scp->vmsize = scp32->vmsize;
257 scp->fileoff = scp32->fileoff;
258 scp->filesize = scp32->filesize;
259 scp->maxprot = scp32->maxprot;
260 scp->initprot = scp32->initprot;
261 scp->nsects = scp32->nsects;
262 scp->flags = scp32->flags;
263 }
264
265 static void
266 note_all_image_info_section(const struct segment_command_64 *scp,
267 boolean_t is64, size_t section_size, const void *sections,
268 int64_t slide, load_result_t *result)
269 {
270 const union {
271 struct section s32;
272 struct section_64 s64;
273 } *sectionp;
274 unsigned int i;
275
276 if (strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0)
277 return;
278 for (i = 0; i < scp->nsects; ++i) {
279 sectionp = (const void *)
280 ((const char *)sections + section_size * i);
281 if (0 == strncmp(sectionp->s64.sectname, "__all_image_info",
282 sizeof(sectionp->s64.sectname))) {
283 result->all_image_info_addr =
284 is64 ? sectionp->s64.addr : sectionp->s32.addr;
285 result->all_image_info_addr += slide;
286 result->all_image_info_size =
287 is64 ? sectionp->s64.size : sectionp->s32.size;
288 return;
289 }
290 }
291 }
292
293
294 load_return_t
295 load_machfile(
296 struct image_params *imgp,
297 struct mach_header *header,
298 thread_t thread,
299 vm_map_t new_map,
300 load_result_t *result
301 )
302 {
303 struct vnode *vp = imgp->ip_vp;
304 off_t file_offset = imgp->ip_arch_offset;
305 off_t macho_size = imgp->ip_arch_size;
306 off_t file_size = imgp->ip_vattr->va_data_size;
307
308 pmap_t pmap = 0; /* protected by create_map */
309 vm_map_t map;
310 vm_map_t old_map;
311 task_t old_task = TASK_NULL; /* protected by create_map */
312 load_result_t myresult;
313 load_return_t lret;
314 boolean_t create_map = FALSE;
315 boolean_t enforce_hard_pagezero = TRUE;
316 int spawn = (imgp->ip_flags & IMGPF_SPAWN);
317 task_t task = current_task();
318 proc_t p = current_proc();
319 mach_vm_offset_t aslr_offset = 0;
320 mach_vm_offset_t dyld_aslr_offset = 0;
321 kern_return_t kret;
322
323 if (macho_size > file_size) {
324 return(LOAD_BADMACHO);
325 }
326
327 if (new_map == VM_MAP_NULL) {
328 create_map = TRUE;
329 old_task = current_task();
330 }
331
332 /*
333 * If we are spawning, we have created backing objects for the process
334 * already, which include non-lazily creating the task map. So we
335 * are going to switch out the task map with one appropriate for the
336 * bitness of the image being loaded.
337 */
338 if (spawn) {
339 create_map = TRUE;
340 old_task = get_threadtask(thread);
341 }
342
343 if (create_map) {
344 task_t ledger_task;
345 if (imgp->ip_new_thread) {
346 ledger_task = get_threadtask(imgp->ip_new_thread);
347 } else {
348 ledger_task = task;
349 }
350 pmap = pmap_create(get_task_ledger(ledger_task),
351 (vm_map_size_t) 0,
352 ((imgp->ip_flags & IMGPF_IS_64BIT) != 0));
353 pal_switch_pmap(thread, pmap, imgp->ip_flags & IMGPF_IS_64BIT);
354 map = vm_map_create(pmap,
355 0,
356 vm_compute_max_offset(((imgp->ip_flags & IMGPF_IS_64BIT) == IMGPF_IS_64BIT)),
357 TRUE);
358 } else
359 map = new_map;
360
361 #if (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
362 /* enforce 16KB alignment for watch targets with new ABI */
363 vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT);
364 #endif /* __arm64__ */
365
366 #ifndef CONFIG_ENFORCE_SIGNED_CODE
367 /* This turns off faulting for executable pages, which allows
368 * to circumvent Code Signing Enforcement. The per process
369 * flag (CS_ENFORCEMENT) is not set yet, but we can use the
370 * global flag.
371 */
372 if ( !cs_enforcement(NULL) && (header->flags & MH_ALLOW_STACK_EXECUTION) )
373 vm_map_disable_NX(map);
374 #endif
375
376 /* Forcibly disallow execution from data pages on even if the arch
377 * normally permits it. */
378 if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC))
379 vm_map_disallow_data_exec(map);
380
381 /*
382 * Compute a random offset for ASLR, and an independent random offset for dyld.
383 */
384 if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
385 uint64_t max_slide_pages;
386
387 max_slide_pages = vm_map_get_max_aslr_slide_pages(map);
388
389 aslr_offset = random();
390 aslr_offset %= max_slide_pages;
391 aslr_offset <<= vm_map_page_shift(map);
392
393 dyld_aslr_offset = random();
394 dyld_aslr_offset %= max_slide_pages;
395 dyld_aslr_offset <<= vm_map_page_shift(map);
396 }
397
398 if (!result)
399 result = &myresult;
400
401 *result = load_result_null;
402
403 lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
404 0, (int64_t)aslr_offset, (int64_t)dyld_aslr_offset, result);
405
406 if (lret != LOAD_SUCCESS) {
407 if (create_map) {
408 vm_map_deallocate(map); /* will lose pmap reference too */
409 }
410 return(lret);
411 }
412
413 #if __x86_64__
414 /*
415 * On x86, for compatibility, don't enforce the hard page-zero restriction for 32-bit binaries.
416 */
417 if ((imgp->ip_flags & IMGPF_IS_64BIT) == 0) {
418 enforce_hard_pagezero = FALSE;
419 }
420 #endif
421 /*
422 * Check to see if the page zero is enforced by the map->min_offset.
423 */
424 if (enforce_hard_pagezero &&
425 (vm_map_has_hard_pagezero(map, 0x1000) == FALSE)) {
426 {
427 if (create_map) {
428 vm_map_deallocate(map); /* will lose pmap reference too */
429 }
430 return (LOAD_BADMACHO);
431 }
432 }
433
434 /*
435 * Commit to new map.
436 *
437 * Swap the new map for the old, which consumes our new map
438 * reference but each leaves us responsible for the old_map reference.
439 * That lets us get off the pmap associated with it, and
440 * then we can release it.
441 */
442
443 if (create_map) {
444 /*
445 * If this is an exec, then we are going to destroy the old
446 * task, and it's correct to halt it; if it's spawn, the
447 * task is not yet running, and it makes no sense.
448 */
449 if (!spawn) {
450 /*
451 * Mark the task as halting and start the other
452 * threads towards terminating themselves. Then
453 * make sure any threads waiting for a process
454 * transition get informed that we are committed to
455 * this transition, and then finally complete the
456 * task halting (wait for threads and then cleanup
457 * task resources).
458 *
459 * NOTE: task_start_halt() makes sure that no new
460 * threads are created in the task during the transition.
461 * We need to mark the workqueue as exiting before we
462 * wait for threads to terminate (at the end of which
463 * we no longer have a prohibition on thread creation).
464 *
465 * Finally, clean up any lingering workqueue data structures
466 * that may have been left behind by the workqueue threads
467 * as they exited (and then clean up the work queue itself).
468 */
469 kret = task_start_halt(task);
470 if (kret != KERN_SUCCESS) {
471 vm_map_deallocate(map); /* will lose pmap reference too */
472 return (LOAD_FAILURE);
473 }
474 proc_transcommit(p, 0);
475 workqueue_mark_exiting(p);
476 task_complete_halt(task);
477 workqueue_exit(p);
478 kqueue_dealloc(p->p_wqkqueue);
479 p->p_wqkqueue = NULL;
480 }
481 old_map = swap_task_map(old_task, thread, map, !spawn);
482 vm_map_deallocate(old_map);
483 }
484 return(LOAD_SUCCESS);
485 }
486
487 int macho_printf = 0;
488 #define MACHO_PRINTF(args) \
489 do { \
490 if (macho_printf) { \
491 printf args; \
492 } \
493 } while (0)
494
495 /*
496 * The file size of a mach-o file is limited to 32 bits; this is because
497 * this is the limit on the kalloc() of enough bytes for a mach_header and
498 * the contents of its sizeofcmds, which is currently constrained to 32
499 * bits in the file format itself. We read into the kernel buffer the
500 * commands section, and then parse it in order to parse the mach-o file
501 * format load_command segment(s). We are only interested in a subset of
502 * the total set of possible commands. If "map"==VM_MAP_NULL or
503 * "thread"==THREAD_NULL, do not make permament VM modifications,
504 * just preflight the parse.
505 */
506 static
507 load_return_t
508 parse_machfile(
509 struct vnode *vp,
510 vm_map_t map,
511 thread_t thread,
512 struct mach_header *header,
513 off_t file_offset,
514 off_t macho_size,
515 int depth,
516 int64_t aslr_offset,
517 int64_t dyld_aslr_offset,
518 load_result_t *result
519 )
520 {
521 uint32_t ncmds;
522 struct load_command *lcp;
523 struct dylinker_command *dlp = 0;
524 integer_t dlarchbits = 0;
525 void * control;
526 load_return_t ret = LOAD_SUCCESS;
527 caddr_t addr;
528 void * kl_addr;
529 vm_size_t size,kl_size;
530 size_t offset;
531 size_t oldoffset; /* for overflow check */
532 int pass;
533 proc_t p = current_proc(); /* XXXX */
534 int error;
535 int resid = 0;
536 size_t mach_header_sz = sizeof(struct mach_header);
537 boolean_t abi64;
538 boolean_t got_code_signatures = FALSE;
539 int64_t slide = 0;
540
541 if (header->magic == MH_MAGIC_64 ||
542 header->magic == MH_CIGAM_64) {
543 mach_header_sz = sizeof(struct mach_header_64);
544 }
545
546 /*
547 * Break infinite recursion
548 */
549 if (depth > 1) {
550 return(LOAD_FAILURE);
551 }
552
553 depth++;
554
555 /*
556 * Check to see if right machine type.
557 */
558 if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) ||
559 !grade_binary(header->cputype,
560 header->cpusubtype & ~CPU_SUBTYPE_MASK))
561 return(LOAD_BADARCH);
562
563 abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
564
565 switch (header->filetype) {
566
567 case MH_EXECUTE:
568 if (depth != 1) {
569 return (LOAD_FAILURE);
570 }
571
572 break;
573 case MH_DYLINKER:
574 if (depth != 2) {
575 return (LOAD_FAILURE);
576 }
577 break;
578
579 default:
580 return (LOAD_FAILURE);
581 }
582
583 /*
584 * Get the pager for the file.
585 */
586 control = ubc_getobject(vp, UBC_FLAGS_NONE);
587
588 /*
589 * Map portion that must be accessible directly into
590 * kernel's map.
591 */
592 if ((off_t)(mach_header_sz + header->sizeofcmds) > macho_size)
593 return(LOAD_BADMACHO);
594
595 /*
596 * Round size of Mach-O commands up to page boundry.
597 */
598 size = round_page(mach_header_sz + header->sizeofcmds);
599 if (size <= 0)
600 return(LOAD_BADMACHO);
601
602 /*
603 * Map the load commands into kernel memory.
604 */
605 addr = 0;
606 kl_size = size;
607 kl_addr = kalloc(size);
608 addr = (caddr_t)kl_addr;
609 if (addr == NULL)
610 return(LOAD_NOSPACE);
611
612 error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
613 UIO_SYSSPACE, 0, kauth_cred_get(), &resid, p);
614 if (error) {
615 if (kl_addr)
616 kfree(kl_addr, kl_size);
617 return(LOAD_IOERROR);
618 }
619
620 if (resid) {
621 /* We must be able to read in as much as the mach_header indicated */
622 if (kl_addr)
623 kfree(kl_addr, kl_size);
624 return(LOAD_BADMACHO);
625 }
626
627 /*
628 * For PIE and dyld, slide everything by the ASLR offset.
629 */
630 if ((header->flags & MH_PIE) || (header->filetype == MH_DYLINKER)) {
631 slide = aslr_offset;
632 }
633
634 /*
635 * Scan through the commands, processing each one as necessary.
636 * We parse in three passes through the headers:
637 * 0: determine if TEXT and DATA boundary can be page-aligned
638 * 1: thread state, uuid, code signature
639 * 2: segments
640 * 3: dyld, encryption, check entry point
641 */
642
643 for (pass = 0; pass <= 3; pass++) {
644
645 if (pass == 0) {
646 /* see if we need to adjust the slide to re-align... */
647 /* no re-alignment needed on X86_64 or ARM32 kernel */
648 continue;
649 } else if (pass == 1) {
650 }
651
652 /*
653 * Check that the entry point is contained in an executable segments
654 */
655 if ((pass == 3) && (!result->using_lcmain && result->validentry == 0)) {
656 thread_state_initialize(thread);
657 ret = LOAD_FAILURE;
658 break;
659 }
660
661 /*
662 * Loop through each of the load_commands indicated by the
663 * Mach-O header; if an absurd value is provided, we just
664 * run off the end of the reserved section by incrementing
665 * the offset too far, so we are implicitly fail-safe.
666 */
667 offset = mach_header_sz;
668 ncmds = header->ncmds;
669
670 while (ncmds--) {
671 /*
672 * Get a pointer to the command.
673 */
674 lcp = (struct load_command *)(addr + offset);
675 oldoffset = offset;
676 offset += lcp->cmdsize;
677
678 /*
679 * Perform prevalidation of the struct load_command
680 * before we attempt to use its contents. Invalid
681 * values are ones which result in an overflow, or
682 * which can not possibly be valid commands, or which
683 * straddle or exist past the reserved section at the
684 * start of the image.
685 */
686 if (oldoffset > offset ||
687 lcp->cmdsize < sizeof(struct load_command) ||
688 offset > header->sizeofcmds + mach_header_sz) {
689 ret = LOAD_BADMACHO;
690 break;
691 }
692
693 /*
694 * Act on struct load_command's for which kernel
695 * intervention is required.
696 */
697 switch(lcp->cmd) {
698 case LC_SEGMENT:
699 if (pass == 0) {
700 break;
701 }
702
703 if (pass != 2)
704 break;
705
706 if (abi64) {
707 /*
708 * Having an LC_SEGMENT command for the
709 * wrong ABI is invalid <rdar://problem/11021230>
710 */
711 ret = LOAD_BADMACHO;
712 break;
713 }
714
715 ret = load_segment(lcp,
716 header->filetype,
717 control,
718 file_offset,
719 macho_size,
720 vp,
721 map,
722 slide,
723 result);
724 break;
725 case LC_SEGMENT_64:
726 if (pass != 2)
727 break;
728
729 if (!abi64) {
730 /*
731 * Having an LC_SEGMENT_64 command for the
732 * wrong ABI is invalid <rdar://problem/11021230>
733 */
734 ret = LOAD_BADMACHO;
735 break;
736 }
737
738 ret = load_segment(lcp,
739 header->filetype,
740 control,
741 file_offset,
742 macho_size,
743 vp,
744 map,
745 slide,
746 result);
747 break;
748 case LC_UNIXTHREAD:
749 if (pass != 1)
750 break;
751 ret = load_unixthread(
752 (struct thread_command *) lcp,
753 thread,
754 slide,
755 result);
756 break;
757 case LC_MAIN:
758 if (pass != 1)
759 break;
760 if (depth != 1)
761 break;
762 ret = load_main(
763 (struct entry_point_command *) lcp,
764 thread,
765 slide,
766 result);
767 break;
768 case LC_LOAD_DYLINKER:
769 if (pass != 3)
770 break;
771 if ((depth == 1) && (dlp == 0)) {
772 dlp = (struct dylinker_command *)lcp;
773 dlarchbits = (header->cputype & CPU_ARCH_MASK);
774 } else {
775 ret = LOAD_FAILURE;
776 }
777 break;
778 case LC_UUID:
779 if (pass == 1 && depth == 1) {
780 ret = load_uuid((struct uuid_command *) lcp,
781 (char *)addr + mach_header_sz + header->sizeofcmds,
782 result);
783 }
784 break;
785 case LC_CODE_SIGNATURE:
786 /* CODE SIGNING */
787 if (pass != 1)
788 break;
789 /* pager -> uip ->
790 load signatures & store in uip
791 set VM object "signed_pages"
792 */
793 ret = load_code_signature(
794 (struct linkedit_data_command *) lcp,
795 vp,
796 file_offset,
797 macho_size,
798 header->cputype,
799 result);
800 if (ret != LOAD_SUCCESS) {
801 printf("proc %d: load code signature error %d "
802 "for file \"%s\"\n",
803 p->p_pid, ret, vp->v_name);
804 /*
805 * Allow injections to be ignored on devices w/o enforcement enabled
806 */
807 if (!cs_enforcement(NULL))
808 ret = LOAD_SUCCESS; /* ignore error */
809
810 } else {
811 got_code_signatures = TRUE;
812 }
813
814 if (got_code_signatures) {
815 unsigned tainted = CS_VALIDATE_TAINTED;
816 boolean_t valid = FALSE;
817 struct cs_blob *blobs;
818 vm_size_t off = 0;
819
820
821 if (cs_debug > 10)
822 printf("validating initial pages of %s\n", vp->v_name);
823 blobs = ubc_get_cs_blobs(vp);
824
825 while (off < size && ret == LOAD_SUCCESS) {
826 tainted = CS_VALIDATE_TAINTED;
827
828 valid = cs_validate_page(blobs,
829 NULL,
830 file_offset + off,
831 addr + off,
832 &tainted);
833 if (!valid || (tainted & CS_VALIDATE_TAINTED)) {
834 if (cs_debug)
835 printf("CODE SIGNING: %s[%d]: invalid initial page at offset %lld validated:%d tainted:%d csflags:0x%x\n",
836 vp->v_name, p->p_pid, (long long)(file_offset + off), valid, tainted, result->csflags);
837 if (cs_enforcement(NULL) ||
838 (result->csflags & (CS_HARD|CS_KILL|CS_ENFORCEMENT))) {
839 ret = LOAD_FAILURE;
840 }
841 result->csflags &= ~CS_VALID;
842 }
843 off += PAGE_SIZE;
844 }
845 }
846
847 break;
848 #if CONFIG_CODE_DECRYPTION
849 case LC_ENCRYPTION_INFO:
850 case LC_ENCRYPTION_INFO_64:
851 if (pass != 3)
852 break;
853 ret = set_code_unprotect(
854 (struct encryption_info_command *) lcp,
855 addr, map, slide, vp, file_offset,
856 header->cputype, header->cpusubtype);
857 if (ret != LOAD_SUCCESS) {
858 printf("proc %d: set_code_unprotect() error %d "
859 "for file \"%s\"\n",
860 p->p_pid, ret, vp->v_name);
861 /*
862 * Don't let the app run if it's
863 * encrypted but we failed to set up the
864 * decrypter. If the keys are missing it will
865 * return LOAD_DECRYPTFAIL.
866 */
867 if (ret == LOAD_DECRYPTFAIL) {
868 /* failed to load due to missing FP keys */
869 proc_lock(p);
870 p->p_lflag |= P_LTERM_DECRYPTFAIL;
871 proc_unlock(p);
872 }
873 psignal(p, SIGKILL);
874 }
875 break;
876 #endif
877 default:
878 /* Other commands are ignored by the kernel */
879 ret = LOAD_SUCCESS;
880 break;
881 }
882 if (ret != LOAD_SUCCESS)
883 break;
884 }
885 if (ret != LOAD_SUCCESS)
886 break;
887 }
888
889 if (ret == LOAD_SUCCESS) {
890 if (! got_code_signatures) {
891 if (cs_enforcement(NULL)) {
892 ret = LOAD_FAILURE;
893 } else {
894 /*
895 * No embedded signatures: look for detached by taskgated,
896 * this is only done on OSX, on embedded platforms we expect everything
897 * to be have embedded signatures.
898 */
899 struct cs_blob *blob;
900
901 blob = ubc_cs_blob_get(vp, -1, file_offset);
902 if (blob != NULL) {
903 unsigned int cs_flag_data = blob->csb_flags;
904 if(0 != ubc_cs_generation_check(vp)) {
905 if (0 != ubc_cs_blob_revalidate(vp, blob, 0)) {
906 /* clear out the flag data if revalidation fails */
907 cs_flag_data = 0;
908 result->csflags &= ~CS_VALID;
909 }
910 }
911 /* get flags to be applied to the process */
912 result->csflags |= cs_flag_data;
913 }
914 }
915 }
916
917 /* Make sure if we need dyld, we got it */
918 if (result->needs_dynlinker && !dlp) {
919 ret = LOAD_FAILURE;
920 }
921
922 if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
923 /*
924 * load the dylinker, and slide it by the independent DYLD ASLR
925 * offset regardless of the PIE-ness of the main binary.
926 */
927 ret = load_dylinker(dlp, dlarchbits, map, thread, depth,
928 dyld_aslr_offset, result);
929 }
930
931 if((ret == LOAD_SUCCESS) && (depth == 1)) {
932 if (result->thread_count == 0) {
933 ret = LOAD_FAILURE;
934 }
935 }
936 }
937
938 if (kl_addr )
939 kfree(kl_addr, kl_size);
940
941 return(ret);
942 }
943
944 #if CONFIG_CODE_DECRYPTION
945
946 #define APPLE_UNPROTECTED_HEADER_SIZE (3 * 4096)
947
948 static load_return_t
949 unprotect_dsmos_segment(
950 uint64_t file_off,
951 uint64_t file_size,
952 struct vnode *vp,
953 off_t macho_offset,
954 vm_map_t map,
955 vm_map_offset_t map_addr,
956 vm_map_size_t map_size)
957 {
958 kern_return_t kr;
959
960 /*
961 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
962 * this part of a Universal binary) are not protected...
963 * The rest needs to be "transformed".
964 */
965 if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE &&
966 file_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) {
967 /* it's all unprotected, nothing to do... */
968 kr = KERN_SUCCESS;
969 } else {
970 if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE) {
971 /*
972 * We start mapping in the unprotected area.
973 * Skip the unprotected part...
974 */
975 vm_map_offset_t delta;
976
977 delta = APPLE_UNPROTECTED_HEADER_SIZE;
978 delta -= file_off;
979 map_addr += delta;
980 map_size -= delta;
981 }
982 /* ... transform the rest of the mapping. */
983 struct pager_crypt_info crypt_info;
984 crypt_info.page_decrypt = dsmos_page_transform;
985 crypt_info.crypt_ops = NULL;
986 crypt_info.crypt_end = NULL;
987 #pragma unused(vp, macho_offset)
988 crypt_info.crypt_ops = (void *)0x2e69cf40;
989 vm_map_offset_t crypto_backing_offset;
990 crypto_backing_offset = -1; /* i.e. use map entry's offset */
991 #if DEVELOPMENT || DEBUG
992 struct proc *p;
993 p = current_proc();
994 printf("APPLE_PROTECT: %d[%s] map %p [0x%llx:0x%llx] %s(%s)\n",
995 p->p_pid, p->p_comm, map,
996 (uint64_t) map_addr, (uint64_t) (map_addr + map_size),
997 __FUNCTION__, vp->v_name);
998 #endif /* DEVELOPMENT || DEBUG */
999 kr = vm_map_apple_protected(map,
1000 map_addr,
1001 map_addr + map_size,
1002 crypto_backing_offset,
1003 &crypt_info);
1004 }
1005
1006 if (kr != KERN_SUCCESS) {
1007 return LOAD_FAILURE;
1008 }
1009 return LOAD_SUCCESS;
1010 }
1011 #else /* CONFIG_CODE_DECRYPTION */
1012 static load_return_t
1013 unprotect_dsmos_segment(
1014 __unused uint64_t file_off,
1015 __unused uint64_t file_size,
1016 __unused struct vnode *vp,
1017 __unused off_t macho_offset,
1018 __unused vm_map_t map,
1019 __unused vm_map_offset_t map_addr,
1020 __unused vm_map_size_t map_size)
1021 {
1022 return LOAD_SUCCESS;
1023 }
1024 #endif /* CONFIG_CODE_DECRYPTION */
1025
1026
1027 /*
1028 * map_segment:
1029 * Maps a Mach-O segment, taking care of mis-alignment (wrt the system
1030 * page size) issues.
1031 *
1032 * The mapping might result in 1, 2 or 3 map entries:
1033 * 1. for the first page, which could be overlap with the previous
1034 * mapping,
1035 * 2. for the center (if applicable),
1036 * 3. for the last page, which could overlap with the next mapping.
1037 *
1038 * For each of those map entries, we might have to interpose a
1039 * "fourk_pager" to deal with mis-alignment wrt the system page size,
1040 * either in the mapping address and/or size or the file offset and/or
1041 * size.
1042 * The "fourk_pager" itself would be mapped with proper alignment
1043 * wrt the system page size and would then be populated with the
1044 * information about the intended mapping, with a "4KB" granularity.
1045 */
1046 static kern_return_t
1047 map_segment(
1048 vm_map_t map,
1049 vm_map_offset_t vm_start,
1050 vm_map_offset_t vm_end,
1051 memory_object_control_t control,
1052 vm_map_offset_t file_start,
1053 vm_map_offset_t file_end,
1054 vm_prot_t initprot,
1055 vm_prot_t maxprot)
1056 {
1057 int extra_vm_flags, cur_extra_vm_flags;
1058 vm_map_offset_t cur_offset, cur_start, cur_end;
1059 kern_return_t ret;
1060 vm_map_offset_t effective_page_mask;
1061
1062 if (vm_end < vm_start ||
1063 file_end < file_start) {
1064 return LOAD_BADMACHO;
1065 }
1066 if (vm_end == vm_start ||
1067 file_end == file_start) {
1068 /* nothing to map... */
1069 return LOAD_SUCCESS;
1070 }
1071
1072 effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
1073
1074 extra_vm_flags = 0;
1075 if (vm_map_page_aligned(vm_start, effective_page_mask) &&
1076 vm_map_page_aligned(vm_end, effective_page_mask) &&
1077 vm_map_page_aligned(file_start, effective_page_mask) &&
1078 vm_map_page_aligned(file_end, effective_page_mask)) {
1079 /* all page-aligned and map-aligned: proceed */
1080 } else {
1081 panic("map_segment: unexpected mis-alignment "
1082 "vm[0x%llx:0x%llx] file[0x%llx:0x%llx]\n",
1083 (uint64_t) vm_start,
1084 (uint64_t) vm_end,
1085 (uint64_t) file_start,
1086 (uint64_t) file_end);
1087 }
1088
1089 cur_offset = 0;
1090 cur_start = vm_start;
1091 cur_end = vm_start;
1092 if (cur_end >= vm_start + (file_end - file_start)) {
1093 /* all mapped: done */
1094 goto done;
1095 }
1096 if (vm_map_round_page(cur_end, effective_page_mask) >=
1097 vm_map_trunc_page(vm_start + (file_end - file_start),
1098 effective_page_mask)) {
1099 /* no middle */
1100 } else {
1101 cur_start = cur_end;
1102 if ((vm_start & effective_page_mask) !=
1103 (file_start & effective_page_mask)) {
1104 /* one 4K pager for the middle */
1105 cur_extra_vm_flags = extra_vm_flags;
1106 } else {
1107 /* regular mapping for the middle */
1108 cur_extra_vm_flags = 0;
1109 }
1110 cur_end = vm_map_trunc_page(vm_start + (file_end -
1111 file_start),
1112 effective_page_mask);
1113 if (control != MEMORY_OBJECT_CONTROL_NULL) {
1114 ret = vm_map_enter_mem_object_control(
1115 map,
1116 &cur_start,
1117 cur_end - cur_start,
1118 (mach_vm_offset_t)0,
1119 VM_FLAGS_FIXED | cur_extra_vm_flags,
1120 control,
1121 file_start + cur_offset,
1122 TRUE, /* copy */
1123 initprot, maxprot,
1124 VM_INHERIT_DEFAULT);
1125 } else {
1126 ret = vm_map_enter_mem_object(
1127 map,
1128 &cur_start,
1129 cur_end - cur_start,
1130 (mach_vm_offset_t)0,
1131 VM_FLAGS_FIXED | cur_extra_vm_flags,
1132 IPC_PORT_NULL,
1133 0, /* offset */
1134 TRUE, /* copy */
1135 initprot, maxprot,
1136 VM_INHERIT_DEFAULT);
1137 }
1138 if (ret != KERN_SUCCESS) {
1139 return (LOAD_NOSPACE);
1140 }
1141 cur_offset += cur_end - cur_start;
1142 }
1143 if (cur_end >= vm_start + (file_end - file_start)) {
1144 /* all mapped: done */
1145 goto done;
1146 }
1147 cur_start = cur_end;
1148 done:
1149 assert(cur_end >= vm_start + (file_end - file_start));
1150 return LOAD_SUCCESS;
1151 }
1152
1153 static
1154 load_return_t
1155 load_segment(
1156 struct load_command *lcp,
1157 uint32_t filetype,
1158 void * control,
1159 off_t pager_offset,
1160 off_t macho_size,
1161 struct vnode *vp,
1162 vm_map_t map,
1163 int64_t slide,
1164 load_result_t *result)
1165 {
1166 struct segment_command_64 segment_command, *scp;
1167 kern_return_t ret;
1168 vm_map_size_t delta_size;
1169 vm_prot_t initprot;
1170 vm_prot_t maxprot;
1171 size_t segment_command_size, total_section_size,
1172 single_section_size;
1173 vm_map_offset_t file_offset, file_size;
1174 vm_map_offset_t vm_offset, vm_size;
1175 vm_map_offset_t vm_start, vm_end, vm_end_aligned;
1176 vm_map_offset_t file_start, file_end;
1177 kern_return_t kr;
1178 boolean_t verbose;
1179 vm_map_size_t effective_page_size;
1180 vm_map_offset_t effective_page_mask;
1181
1182 effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map));
1183 effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
1184
1185 verbose = FALSE;
1186 if (LC_SEGMENT_64 == lcp->cmd) {
1187 segment_command_size = sizeof(struct segment_command_64);
1188 single_section_size = sizeof(struct section_64);
1189 } else {
1190 segment_command_size = sizeof(struct segment_command);
1191 single_section_size = sizeof(struct section);
1192 }
1193 if (lcp->cmdsize < segment_command_size)
1194 return (LOAD_BADMACHO);
1195 total_section_size = lcp->cmdsize - segment_command_size;
1196
1197 if (LC_SEGMENT_64 == lcp->cmd) {
1198 scp = (struct segment_command_64 *)lcp;
1199 } else {
1200 scp = &segment_command;
1201 widen_segment_command((struct segment_command *)lcp, scp);
1202 }
1203
1204 if (verbose) {
1205 MACHO_PRINTF(("+++ load_segment %s "
1206 "vm[0x%llx:0x%llx] file[0x%llx:0x%llx] "
1207 "prot %d/%d flags 0x%x\n",
1208 scp->segname,
1209 (uint64_t)(slide + scp->vmaddr),
1210 (uint64_t)(slide + scp->vmaddr + scp->vmsize),
1211 pager_offset + scp->fileoff,
1212 pager_offset + scp->fileoff + scp->filesize,
1213 scp->initprot,
1214 scp->maxprot,
1215 scp->flags));
1216 }
1217
1218 /*
1219 * Make sure what we get from the file is really ours (as specified
1220 * by macho_size).
1221 */
1222 if (scp->fileoff + scp->filesize < scp->fileoff ||
1223 scp->fileoff + scp->filesize > (uint64_t)macho_size) {
1224 return (LOAD_BADMACHO);
1225 }
1226 /*
1227 * Ensure that the number of sections specified would fit
1228 * within the load command size.
1229 */
1230 if (total_section_size / single_section_size < scp->nsects) {
1231 return (LOAD_BADMACHO);
1232 }
1233 /*
1234 * Make sure the segment is page-aligned in the file.
1235 */
1236 file_offset = pager_offset + scp->fileoff; /* limited to 32 bits */
1237 file_size = scp->filesize;
1238 if ((file_offset & PAGE_MASK_64) != 0 ||
1239 /* we can't mmap() it if it's not page-aligned in the file */
1240 (file_offset & vm_map_page_mask(map)) != 0) {
1241 /*
1242 * The 1st test would have failed if the system's page size
1243 * was what this process believe is the page size, so let's
1244 * fail here too for the sake of consistency.
1245 */
1246 return (LOAD_BADMACHO);
1247 }
1248
1249 /*
1250 * If we have a code signature attached for this slice
1251 * require that the segments are within the signed part
1252 * of the file.
1253 */
1254 if (result->cs_end_offset &&
1255 result->cs_end_offset < (off_t)scp->fileoff &&
1256 result->cs_end_offset - scp->fileoff < scp->filesize)
1257 {
1258 if (cs_debug)
1259 printf("section outside code signature\n");
1260 return LOAD_BADMACHO;
1261 }
1262
1263 vm_offset = scp->vmaddr + slide;
1264 vm_size = scp->vmsize;
1265
1266 if (vm_size == 0)
1267 return (LOAD_SUCCESS);
1268 if (scp->vmaddr == 0 &&
1269 file_size == 0 &&
1270 vm_size != 0 &&
1271 (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
1272 (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
1273 /*
1274 * For PIE, extend page zero rather than moving it. Extending
1275 * page zero keeps early allocations from falling predictably
1276 * between the end of page zero and the beginning of the first
1277 * slid segment.
1278 */
1279 /*
1280 * This is a "page zero" segment: it starts at address 0,
1281 * is not mapped from the binary file and is not accessible.
1282 * User-space should never be able to access that memory, so
1283 * make it completely off limits by raising the VM map's
1284 * minimum offset.
1285 */
1286 vm_end = vm_offset + vm_size;
1287 if (vm_end < vm_offset) {
1288 return (LOAD_BADMACHO);
1289 }
1290 if (verbose) {
1291 MACHO_PRINTF(("++++++ load_segment: "
1292 "page_zero up to 0x%llx\n",
1293 (uint64_t) vm_end));
1294 }
1295 {
1296 vm_end = vm_map_round_page(vm_end,
1297 PAGE_MASK_64);
1298 vm_end_aligned = vm_end;
1299 }
1300 ret = vm_map_raise_min_offset(map,
1301 vm_end_aligned);
1302
1303 if (ret != KERN_SUCCESS) {
1304 return (LOAD_FAILURE);
1305 }
1306 return (LOAD_SUCCESS);
1307 } else {
1308 }
1309
1310 {
1311 file_start = vm_map_trunc_page(file_offset,
1312 effective_page_mask);
1313 file_end = vm_map_round_page(file_offset + file_size,
1314 effective_page_mask);
1315 vm_start = vm_map_trunc_page(vm_offset,
1316 effective_page_mask);
1317 vm_end = vm_map_round_page(vm_offset + vm_size,
1318 effective_page_mask);
1319 }
1320
1321 if (vm_start < result->min_vm_addr)
1322 result->min_vm_addr = vm_start;
1323 if (vm_end > result->max_vm_addr)
1324 result->max_vm_addr = vm_end;
1325
1326 if (map == VM_MAP_NULL)
1327 return (LOAD_SUCCESS);
1328
1329 if (vm_size > 0) {
1330 initprot = (scp->initprot) & VM_PROT_ALL;
1331 maxprot = (scp->maxprot) & VM_PROT_ALL;
1332 /*
1333 * Map a copy of the file into the address space.
1334 */
1335 if (verbose) {
1336 MACHO_PRINTF(("++++++ load_segment: "
1337 "mapping at vm [0x%llx:0x%llx] of "
1338 "file [0x%llx:0x%llx]\n",
1339 (uint64_t) vm_start,
1340 (uint64_t) vm_end,
1341 (uint64_t) file_start,
1342 (uint64_t) file_end));
1343 }
1344 ret = map_segment(map,
1345 vm_start,
1346 vm_end,
1347 control,
1348 file_start,
1349 file_end,
1350 initprot,
1351 maxprot);
1352 if (ret) {
1353 return LOAD_NOSPACE;
1354 }
1355
1356 #if FIXME
1357 /*
1358 * If the file didn't end on a page boundary,
1359 * we need to zero the leftover.
1360 */
1361 delta_size = map_size - scp->filesize;
1362 if (delta_size > 0) {
1363 mach_vm_offset_t tmp;
1364
1365 ret = mach_vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE| VM_MAKE_TAG(VM_KERN_MEMORY_BSD));
1366 if (ret != KERN_SUCCESS) {
1367 return(LOAD_RESOURCE);
1368 }
1369
1370 if (copyout(tmp, map_addr + scp->filesize,
1371 delta_size)) {
1372 (void) mach_vm_deallocate(
1373 kernel_map, tmp, delta_size);
1374 return (LOAD_FAILURE);
1375 }
1376
1377 (void) mach_vm_deallocate(kernel_map, tmp, delta_size);
1378 }
1379 #endif /* FIXME */
1380 }
1381
1382 /*
1383 * If the virtual size of the segment is greater
1384 * than the size from the file, we need to allocate
1385 * zero fill memory for the rest.
1386 */
1387 if ((vm_end - vm_start) > (file_end - file_start)) {
1388 delta_size = (vm_end - vm_start) - (file_end - file_start);
1389 } else {
1390 delta_size = 0;
1391 }
1392 if (delta_size > 0) {
1393 mach_vm_offset_t tmp;
1394
1395 tmp = vm_start + (file_end - file_start);
1396 if (verbose) {
1397 MACHO_PRINTF(("++++++ load_segment: "
1398 "delta mapping vm [0x%llx:0x%llx]\n",
1399 (uint64_t) tmp,
1400 (uint64_t) (tmp + delta_size)));
1401 }
1402 kr = map_segment(map,
1403 tmp,
1404 tmp + delta_size,
1405 MEMORY_OBJECT_CONTROL_NULL,
1406 0,
1407 delta_size,
1408 scp->initprot,
1409 scp->maxprot);
1410 if (kr != KERN_SUCCESS) {
1411 return(LOAD_NOSPACE);
1412 }
1413 }
1414
1415 if ( (scp->fileoff == 0) && (scp->filesize != 0) )
1416 result->mach_header = vm_offset;
1417
1418 if (scp->flags & SG_PROTECTED_VERSION_1) {
1419 ret = unprotect_dsmos_segment(file_start,
1420 file_end - file_start,
1421 vp,
1422 pager_offset,
1423 map,
1424 vm_start,
1425 vm_end - vm_start);
1426 if (ret != LOAD_SUCCESS) {
1427 return ret;
1428 }
1429 } else {
1430 ret = LOAD_SUCCESS;
1431 }
1432
1433 if (LOAD_SUCCESS == ret &&
1434 filetype == MH_DYLINKER &&
1435 result->all_image_info_addr == MACH_VM_MIN_ADDRESS) {
1436 note_all_image_info_section(scp,
1437 LC_SEGMENT_64 == lcp->cmd,
1438 single_section_size,
1439 ((const char *)lcp +
1440 segment_command_size),
1441 slide,
1442 result);
1443 }
1444
1445 if (result->entry_point != MACH_VM_MIN_ADDRESS) {
1446 if ((result->entry_point >= vm_offset) && (result->entry_point < (vm_offset + vm_size))) {
1447 if ((scp->initprot & (VM_PROT_READ|VM_PROT_EXECUTE)) == (VM_PROT_READ|VM_PROT_EXECUTE)) {
1448 result->validentry = 1;
1449 } else {
1450 /* right range but wrong protections, unset if previously validated */
1451 result->validentry = 0;
1452 }
1453 }
1454 }
1455
1456 return ret;
1457 }
1458
1459 static
1460 load_return_t
1461 load_uuid(
1462 struct uuid_command *uulp,
1463 char *command_end,
1464 load_result_t *result
1465 )
1466 {
1467 /*
1468 * We need to check the following for this command:
1469 * - The command size should be atleast the size of struct uuid_command
1470 * - The UUID part of the command should be completely within the mach-o header
1471 */
1472
1473 if ((uulp->cmdsize < sizeof(struct uuid_command)) ||
1474 (((char *)uulp + sizeof(struct uuid_command)) > command_end)) {
1475 return (LOAD_BADMACHO);
1476 }
1477
1478 memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid));
1479 return (LOAD_SUCCESS);
1480 }
1481
1482 static
1483 load_return_t
1484 load_main(
1485 struct entry_point_command *epc,
1486 thread_t thread,
1487 int64_t slide,
1488 load_result_t *result
1489 )
1490 {
1491 mach_vm_offset_t addr;
1492 kern_return_t ret;
1493
1494 if (epc->cmdsize < sizeof(*epc))
1495 return (LOAD_BADMACHO);
1496 if (result->thread_count != 0) {
1497 return (LOAD_FAILURE);
1498 }
1499
1500 if (thread == THREAD_NULL)
1501 return (LOAD_SUCCESS);
1502
1503 /* LC_MAIN specifies stack size but not location */
1504 if (epc->stacksize) {
1505 result->prog_stack_size = 1;
1506 result->user_stack_size = epc->stacksize;
1507 } else {
1508 result->prog_stack_size = 0;
1509 result->user_stack_size = MAXSSIZ;
1510 }
1511 result->prog_allocated_stack = 0;
1512
1513 /* use default location for stack */
1514 ret = thread_userstackdefault(thread, &addr);
1515 if (ret != KERN_SUCCESS)
1516 return(LOAD_FAILURE);
1517
1518 /* The stack slides down from the default location */
1519 result->user_stack = addr;
1520 result->user_stack -= slide;
1521
1522 if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
1523 /* Already processed LC_MAIN or LC_UNIXTHREAD */
1524 return (LOAD_FAILURE);
1525 }
1526
1527 /* kernel does *not* use entryoff from LC_MAIN. Dyld uses it. */
1528 result->needs_dynlinker = TRUE;
1529 result->using_lcmain = TRUE;
1530
1531 ret = thread_state_initialize( thread );
1532 if (ret != KERN_SUCCESS) {
1533 return(LOAD_FAILURE);
1534 }
1535
1536
1537 result->unixproc = TRUE;
1538 result->thread_count++;
1539
1540 return(LOAD_SUCCESS);
1541 }
1542
1543
1544 static
1545 load_return_t
1546 load_unixthread(
1547 struct thread_command *tcp,
1548 thread_t thread,
1549 int64_t slide,
1550 load_result_t *result
1551 )
1552 {
1553 load_return_t ret;
1554 int customstack =0;
1555 mach_vm_offset_t addr;
1556
1557 if (tcp->cmdsize < sizeof(*tcp))
1558 return (LOAD_BADMACHO);
1559 if (result->thread_count != 0) {
1560 return (LOAD_FAILURE);
1561 }
1562
1563 if (thread == THREAD_NULL)
1564 return (LOAD_SUCCESS);
1565
1566 ret = load_threadstack(thread,
1567 (uint32_t *)(((vm_offset_t)tcp) +
1568 sizeof(struct thread_command)),
1569 tcp->cmdsize - sizeof(struct thread_command),
1570 &addr,
1571 &customstack);
1572 if (ret != LOAD_SUCCESS)
1573 return(ret);
1574
1575 /* LC_UNIXTHREAD optionally specifies stack size and location */
1576
1577 if (customstack) {
1578 result->prog_stack_size = 0; /* unknown */
1579 result->prog_allocated_stack = 1;
1580 } else {
1581 result->prog_allocated_stack = 0;
1582 result->prog_stack_size = 0;
1583 result->user_stack_size = MAXSSIZ;
1584 }
1585
1586 /* The stack slides down from the default location */
1587 result->user_stack = addr;
1588 result->user_stack -= slide;
1589
1590 ret = load_threadentry(thread,
1591 (uint32_t *)(((vm_offset_t)tcp) +
1592 sizeof(struct thread_command)),
1593 tcp->cmdsize - sizeof(struct thread_command),
1594 &addr);
1595 if (ret != LOAD_SUCCESS)
1596 return(ret);
1597
1598 if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
1599 /* Already processed LC_MAIN or LC_UNIXTHREAD */
1600 return (LOAD_FAILURE);
1601 }
1602
1603 result->entry_point = addr;
1604 result->entry_point += slide;
1605
1606 ret = load_threadstate(thread,
1607 (uint32_t *)(((vm_offset_t)tcp) +
1608 sizeof(struct thread_command)),
1609 tcp->cmdsize - sizeof(struct thread_command));
1610 if (ret != LOAD_SUCCESS)
1611 return (ret);
1612
1613
1614 result->unixproc = TRUE;
1615 result->thread_count++;
1616
1617 return(LOAD_SUCCESS);
1618 }
1619
1620 static
1621 load_return_t
1622 load_threadstate(
1623 thread_t thread,
1624 uint32_t *ts,
1625 uint32_t total_size
1626 )
1627 {
1628 kern_return_t ret;
1629 uint32_t size;
1630 int flavor;
1631 uint32_t thread_size;
1632 uint32_t *local_ts;
1633 uint32_t local_ts_size;
1634
1635 local_ts = NULL;
1636 local_ts_size = 0;
1637
1638 ret = thread_state_initialize( thread );
1639 if (ret != KERN_SUCCESS) {
1640 ret = LOAD_FAILURE;
1641 goto done;
1642 }
1643
1644 if (total_size > 0) {
1645 local_ts_size = total_size;
1646 local_ts = kalloc(local_ts_size);
1647 if (local_ts == NULL) {
1648 ret = LOAD_FAILURE;
1649 goto done;
1650 }
1651 memcpy(local_ts, ts, local_ts_size);
1652 ts = local_ts;
1653 }
1654
1655 /*
1656 * Set the new thread state; iterate through the state flavors in
1657 * the mach-o file.
1658 */
1659 while (total_size > 0) {
1660 flavor = *ts++;
1661 size = *ts++;
1662 if (UINT32_MAX-2 < size ||
1663 UINT32_MAX/sizeof(uint32_t) < size+2) {
1664 ret = LOAD_BADMACHO;
1665 goto done;
1666 }
1667 thread_size = (size+2)*sizeof(uint32_t);
1668 if (thread_size > total_size) {
1669 ret = LOAD_BADMACHO;
1670 goto done;
1671 }
1672 total_size -= thread_size;
1673 /*
1674 * Third argument is a kernel space pointer; it gets cast
1675 * to the appropriate type in machine_thread_set_state()
1676 * based on the value of flavor.
1677 */
1678 ret = thread_setstatus(thread, flavor, (thread_state_t)ts, size);
1679 if (ret != KERN_SUCCESS) {
1680 ret = LOAD_FAILURE;
1681 goto done;
1682 }
1683 ts += size; /* ts is a (uint32_t *) */
1684 }
1685 ret = LOAD_SUCCESS;
1686
1687 done:
1688 if (local_ts != NULL) {
1689 kfree(local_ts, local_ts_size);
1690 local_ts = NULL;
1691 }
1692 return ret;
1693 }
1694
1695 static
1696 load_return_t
1697 load_threadstack(
1698 thread_t thread,
1699 uint32_t *ts,
1700 uint32_t total_size,
1701 mach_vm_offset_t *user_stack,
1702 int *customstack
1703 )
1704 {
1705 kern_return_t ret;
1706 uint32_t size;
1707 int flavor;
1708 uint32_t stack_size;
1709
1710 while (total_size > 0) {
1711 flavor = *ts++;
1712 size = *ts++;
1713 if (UINT32_MAX-2 < size ||
1714 UINT32_MAX/sizeof(uint32_t) < size+2)
1715 return (LOAD_BADMACHO);
1716 stack_size = (size+2)*sizeof(uint32_t);
1717 if (stack_size > total_size)
1718 return(LOAD_BADMACHO);
1719 total_size -= stack_size;
1720
1721 /*
1722 * Third argument is a kernel space pointer; it gets cast
1723 * to the appropriate type in thread_userstack() based on
1724 * the value of flavor.
1725 */
1726 ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack);
1727 if (ret != KERN_SUCCESS) {
1728 return(LOAD_FAILURE);
1729 }
1730 ts += size; /* ts is a (uint32_t *) */
1731 }
1732 return(LOAD_SUCCESS);
1733 }
1734
1735 static
1736 load_return_t
1737 load_threadentry(
1738 thread_t thread,
1739 uint32_t *ts,
1740 uint32_t total_size,
1741 mach_vm_offset_t *entry_point
1742 )
1743 {
1744 kern_return_t ret;
1745 uint32_t size;
1746 int flavor;
1747 uint32_t entry_size;
1748
1749 /*
1750 * Set the thread state.
1751 */
1752 *entry_point = MACH_VM_MIN_ADDRESS;
1753 while (total_size > 0) {
1754 flavor = *ts++;
1755 size = *ts++;
1756 if (UINT32_MAX-2 < size ||
1757 UINT32_MAX/sizeof(uint32_t) < size+2)
1758 return (LOAD_BADMACHO);
1759 entry_size = (size+2)*sizeof(uint32_t);
1760 if (entry_size > total_size)
1761 return(LOAD_BADMACHO);
1762 total_size -= entry_size;
1763 /*
1764 * Third argument is a kernel space pointer; it gets cast
1765 * to the appropriate type in thread_entrypoint() based on
1766 * the value of flavor.
1767 */
1768 ret = thread_entrypoint(thread, flavor, (thread_state_t)ts, size, entry_point);
1769 if (ret != KERN_SUCCESS) {
1770 return(LOAD_FAILURE);
1771 }
1772 ts += size; /* ts is a (uint32_t *) */
1773 }
1774 return(LOAD_SUCCESS);
1775 }
1776
1777 struct macho_data {
1778 struct nameidata __nid;
1779 union macho_vnode_header {
1780 struct mach_header mach_header;
1781 struct fat_header fat_header;
1782 char __pad[512];
1783 } __header;
1784 };
1785
1786 #define DEFAULT_DYLD_PATH "/usr/lib/dyld"
1787
1788 static load_return_t
1789 load_dylinker(
1790 struct dylinker_command *lcp,
1791 integer_t archbits,
1792 vm_map_t map,
1793 thread_t thread,
1794 int depth,
1795 int64_t slide,
1796 load_result_t *result
1797 )
1798 {
1799 char *name;
1800 char *p;
1801 struct vnode *vp = NULLVP; /* set by get_macho_vnode() */
1802 struct mach_header *header;
1803 off_t file_offset = 0; /* set by get_macho_vnode() */
1804 off_t macho_size = 0; /* set by get_macho_vnode() */
1805 load_result_t *myresult;
1806 kern_return_t ret;
1807 struct macho_data *macho_data;
1808 struct {
1809 struct mach_header __header;
1810 load_result_t __myresult;
1811 struct macho_data __macho_data;
1812 } *dyld_data;
1813
1814 if (lcp->cmdsize < sizeof(*lcp))
1815 return (LOAD_BADMACHO);
1816
1817 name = (char *)lcp + lcp->name.offset;
1818 /*
1819 * Check for a proper null terminated string.
1820 */
1821 p = name;
1822 do {
1823 if (p >= (char *)lcp + lcp->cmdsize)
1824 return(LOAD_BADMACHO);
1825 } while (*p++);
1826
1827 #if !(DEVELOPMENT || DEBUG)
1828 if (0 != strcmp(name, DEFAULT_DYLD_PATH)) {
1829 return (LOAD_BADMACHO);
1830 }
1831 #endif
1832
1833 /* Allocate wad-of-data from heap to reduce excessively deep stacks */
1834
1835 MALLOC(dyld_data, void *, sizeof (*dyld_data), M_TEMP, M_WAITOK);
1836 header = &dyld_data->__header;
1837 myresult = &dyld_data->__myresult;
1838 macho_data = &dyld_data->__macho_data;
1839
1840 ret = get_macho_vnode(name, archbits, header,
1841 &file_offset, &macho_size, macho_data, &vp);
1842 if (ret)
1843 goto novp_out;
1844
1845 *myresult = load_result_null;
1846
1847 /*
1848 * First try to map dyld in directly. This should work most of
1849 * the time since there shouldn't normally be something already
1850 * mapped to its address.
1851 */
1852
1853 ret = parse_machfile(vp, map, thread, header, file_offset,
1854 macho_size, depth, slide, 0, myresult);
1855
1856 /*
1857 * If it turned out something was in the way, then we'll take
1858 * take this longer path to preflight dyld's vm ranges, then
1859 * map it at a free location in the address space.
1860 */
1861
1862 if (ret == LOAD_NOSPACE) {
1863 mach_vm_offset_t dyl_start, map_addr;
1864 mach_vm_size_t dyl_length;
1865 int64_t slide_amount;
1866
1867 *myresult = load_result_null;
1868
1869 /*
1870 * Preflight parsing the Mach-O file with a NULL
1871 * map, which will return the ranges needed for a
1872 * subsequent map attempt (with a slide) in "myresult"
1873 */
1874 ret = parse_machfile(vp, VM_MAP_NULL, THREAD_NULL, header,
1875 file_offset, macho_size, depth,
1876 0 /* slide */, 0, myresult);
1877
1878 if (ret != LOAD_SUCCESS) {
1879 goto out;
1880 }
1881
1882 dyl_start = myresult->min_vm_addr;
1883 dyl_length = myresult->max_vm_addr - myresult->min_vm_addr;
1884
1885 dyl_length += slide;
1886
1887 /* To find an appropriate load address, do a quick allocation */
1888 map_addr = dyl_start;
1889 ret = mach_vm_allocate(map, &map_addr, dyl_length, VM_FLAGS_ANYWHERE);
1890 if (ret != KERN_SUCCESS) {
1891 ret = LOAD_NOSPACE;
1892 goto out;
1893 }
1894
1895 ret = mach_vm_deallocate(map, map_addr, dyl_length);
1896 if (ret != KERN_SUCCESS) {
1897 ret = LOAD_NOSPACE;
1898 goto out;
1899 }
1900
1901 if (map_addr < dyl_start)
1902 slide_amount = -(int64_t)(dyl_start - map_addr);
1903 else
1904 slide_amount = (int64_t)(map_addr - dyl_start);
1905
1906 slide_amount += slide;
1907
1908 *myresult = load_result_null;
1909
1910 ret = parse_machfile(vp, map, thread, header,
1911 file_offset, macho_size, depth,
1912 slide_amount, 0, myresult);
1913
1914 if (ret) {
1915 goto out;
1916 }
1917 }
1918
1919 if (ret == LOAD_SUCCESS) {
1920 result->dynlinker = TRUE;
1921 result->entry_point = myresult->entry_point;
1922 result->validentry = myresult->validentry;
1923 result->all_image_info_addr = myresult->all_image_info_addr;
1924 result->all_image_info_size = myresult->all_image_info_size;
1925 if (myresult->platform_binary) {
1926 result->csflags |= CS_DYLD_PLATFORM;
1927 }
1928 }
1929 out:
1930 vnode_put(vp);
1931 novp_out:
1932 FREE(dyld_data, M_TEMP);
1933 return (ret);
1934
1935 }
1936
1937 static load_return_t
1938 load_code_signature(
1939 struct linkedit_data_command *lcp,
1940 struct vnode *vp,
1941 off_t macho_offset,
1942 off_t macho_size,
1943 cpu_type_t cputype,
1944 load_result_t *result)
1945 {
1946 int ret;
1947 kern_return_t kr;
1948 vm_offset_t addr;
1949 int resid;
1950 struct cs_blob *blob;
1951 int error;
1952 vm_size_t blob_size;
1953
1954 addr = 0;
1955 blob = NULL;
1956
1957 if (lcp->cmdsize != sizeof (struct linkedit_data_command) ||
1958 lcp->dataoff + lcp->datasize > macho_size) {
1959 ret = LOAD_BADMACHO;
1960 goto out;
1961 }
1962
1963 blob = ubc_cs_blob_get(vp, cputype, macho_offset);
1964 if (blob != NULL) {
1965 /* we already have a blob for this vnode and cputype */
1966 if (blob->csb_cpu_type == cputype &&
1967 blob->csb_base_offset == macho_offset &&
1968 blob->csb_mem_size == lcp->datasize) {
1969 /* it matches the blob we want here, lets verify the version */
1970 if(0 != ubc_cs_generation_check(vp)) {
1971 if (0 != ubc_cs_blob_revalidate(vp, blob, 0)) {
1972 ret = LOAD_FAILURE; /* set error same as from ubc_cs_blob_add */
1973 goto out;
1974 }
1975 }
1976 ret = LOAD_SUCCESS;
1977 } else {
1978 /* the blob has changed for this vnode: fail ! */
1979 ret = LOAD_BADMACHO;
1980 }
1981 goto out;
1982 }
1983
1984 blob_size = lcp->datasize;
1985 kr = ubc_cs_blob_allocate(&addr, &blob_size);
1986 if (kr != KERN_SUCCESS) {
1987 ret = LOAD_NOSPACE;
1988 goto out;
1989 }
1990
1991 resid = 0;
1992 error = vn_rdwr(UIO_READ,
1993 vp,
1994 (caddr_t) addr,
1995 lcp->datasize,
1996 macho_offset + lcp->dataoff,
1997 UIO_SYSSPACE,
1998 0,
1999 kauth_cred_get(),
2000 &resid,
2001 current_proc());
2002 if (error || resid != 0) {
2003 ret = LOAD_IOERROR;
2004 goto out;
2005 }
2006
2007 if (ubc_cs_blob_add(vp,
2008 cputype,
2009 macho_offset,
2010 addr,
2011 lcp->datasize,
2012 0,
2013 &blob)) {
2014 ret = LOAD_FAILURE;
2015 goto out;
2016 } else {
2017 /* ubc_cs_blob_add() has consumed "addr" */
2018 addr = 0;
2019 }
2020
2021 #if CHECK_CS_VALIDATION_BITMAP
2022 ubc_cs_validation_bitmap_allocate( vp );
2023 #endif
2024
2025 ret = LOAD_SUCCESS;
2026 out:
2027 if (ret == LOAD_SUCCESS) {
2028 if (blob == NULL)
2029 panic("sucess, but no blob!");
2030
2031 result->csflags |= blob->csb_flags;
2032 result->platform_binary = blob->csb_platform_binary;
2033 result->cs_end_offset = blob->csb_end_offset;
2034 }
2035 if (addr != 0) {
2036 ubc_cs_blob_deallocate(addr, blob_size);
2037 addr = 0;
2038 }
2039
2040 return ret;
2041 }
2042
2043
2044 #if CONFIG_CODE_DECRYPTION
2045
2046 static load_return_t
2047 set_code_unprotect(
2048 struct encryption_info_command *eip,
2049 caddr_t addr,
2050 vm_map_t map,
2051 int64_t slide,
2052 struct vnode *vp,
2053 off_t macho_offset,
2054 cpu_type_t cputype,
2055 cpu_subtype_t cpusubtype)
2056 {
2057 int error, len;
2058 pager_crypt_info_t crypt_info;
2059 const char * cryptname = 0;
2060 char *vpath;
2061
2062 size_t offset;
2063 struct segment_command_64 *seg64;
2064 struct segment_command *seg32;
2065 vm_map_offset_t map_offset, map_size;
2066 vm_object_offset_t crypto_backing_offset;
2067 kern_return_t kr;
2068
2069 if (eip->cmdsize < sizeof(*eip)) return LOAD_BADMACHO;
2070
2071 switch(eip->cryptid) {
2072 case 0:
2073 /* not encrypted, just an empty load command */
2074 return LOAD_SUCCESS;
2075 case 1:
2076 cryptname="com.apple.unfree";
2077 break;
2078 case 0x10:
2079 /* some random cryptid that you could manually put into
2080 * your binary if you want NULL */
2081 cryptname="com.apple.null";
2082 break;
2083 default:
2084 return LOAD_BADMACHO;
2085 }
2086
2087 if (map == VM_MAP_NULL) return (LOAD_SUCCESS);
2088 if (NULL == text_crypter_create) return LOAD_FAILURE;
2089
2090 MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
2091 if(vpath == NULL) return LOAD_FAILURE;
2092
2093 len = MAXPATHLEN;
2094 error = vn_getpath(vp, vpath, &len);
2095 if (error) {
2096 FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
2097 return LOAD_FAILURE;
2098 }
2099
2100 /* set up decrypter first */
2101 crypt_file_data_t crypt_data = {
2102 .filename = vpath,
2103 .cputype = cputype,
2104 .cpusubtype = cpusubtype};
2105 kr=text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data);
2106 #if DEVELOPMENT || DEBUG
2107 struct proc *p;
2108 p = current_proc();
2109 printf("APPLE_PROTECT: %d[%s] map %p %s(%s) -> 0x%x\n",
2110 p->p_pid, p->p_comm, map, __FUNCTION__, vpath, kr);
2111 #endif /* DEVELOPMENT || DEBUG */
2112 FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
2113
2114 if(kr) {
2115 printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n",
2116 cryptname, kr);
2117 if (kr == kIOReturnNotPrivileged) {
2118 /* text encryption returned decryption failure */
2119 return(LOAD_DECRYPTFAIL);
2120 }else
2121 return LOAD_RESOURCE;
2122 }
2123
2124 /* this is terrible, but we have to rescan the load commands to find the
2125 * virtual address of this encrypted stuff. This code is gonna look like
2126 * the dyld source one day... */
2127 struct mach_header *header = (struct mach_header *)addr;
2128 size_t mach_header_sz = sizeof(struct mach_header);
2129 if (header->magic == MH_MAGIC_64 ||
2130 header->magic == MH_CIGAM_64) {
2131 mach_header_sz = sizeof(struct mach_header_64);
2132 }
2133 offset = mach_header_sz;
2134 uint32_t ncmds = header->ncmds;
2135 while (ncmds--) {
2136 /*
2137 * Get a pointer to the command.
2138 */
2139 struct load_command *lcp = (struct load_command *)(addr + offset);
2140 offset += lcp->cmdsize;
2141
2142 switch(lcp->cmd) {
2143 case LC_SEGMENT_64:
2144 seg64 = (struct segment_command_64 *)lcp;
2145 if ((seg64->fileoff <= eip->cryptoff) &&
2146 (seg64->fileoff+seg64->filesize >=
2147 eip->cryptoff+eip->cryptsize)) {
2148 map_offset = seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide;
2149 map_size = eip->cryptsize;
2150 crypto_backing_offset = macho_offset + eip->cryptoff;
2151 goto remap_now;
2152 }
2153 case LC_SEGMENT:
2154 seg32 = (struct segment_command *)lcp;
2155 if ((seg32->fileoff <= eip->cryptoff) &&
2156 (seg32->fileoff+seg32->filesize >=
2157 eip->cryptoff+eip->cryptsize)) {
2158 map_offset = seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide;
2159 map_size = eip->cryptsize;
2160 crypto_backing_offset = macho_offset + eip->cryptoff;
2161 goto remap_now;
2162 }
2163 }
2164 }
2165
2166 /* if we get here, did not find anything */
2167 return LOAD_BADMACHO;
2168
2169 remap_now:
2170 /* now remap using the decrypter */
2171 MACHO_PRINTF(("+++ set_code_unprotect: vm[0x%llx:0x%llx]\n",
2172 (uint64_t) map_offset,
2173 (uint64_t) (map_offset+map_size)));
2174 kr = vm_map_apple_protected(map,
2175 map_offset,
2176 map_offset+map_size,
2177 crypto_backing_offset,
2178 &crypt_info);
2179 if (kr) {
2180 printf("set_code_unprotect(): mapping failed with %x\n", kr);
2181 return LOAD_PROTECT;
2182 }
2183
2184 return LOAD_SUCCESS;
2185 }
2186
2187 #endif
2188
2189 /*
2190 * This routine exists to support the load_dylinker().
2191 *
2192 * This routine has its own, separate, understanding of the FAT file format,
2193 * which is terrifically unfortunate.
2194 */
2195 static
2196 load_return_t
2197 get_macho_vnode(
2198 char *path,
2199 integer_t archbits,
2200 struct mach_header *mach_header,
2201 off_t *file_offset,
2202 off_t *macho_size,
2203 struct macho_data *data,
2204 struct vnode **vpp
2205 )
2206 {
2207 struct vnode *vp;
2208 vfs_context_t ctx = vfs_context_current();
2209 proc_t p = vfs_context_proc(ctx);
2210 kauth_cred_t kerncred;
2211 struct nameidata *ndp = &data->__nid;
2212 boolean_t is_fat;
2213 struct fat_arch fat_arch;
2214 int error;
2215 int resid;
2216 union macho_vnode_header *header = &data->__header;
2217 off_t fsize = (off_t)0;
2218
2219 /*
2220 * Capture the kernel credential for use in the actual read of the
2221 * file, since the user doing the execution may have execute rights
2222 * but not read rights, but to exec something, we have to either map
2223 * or read it into the new process address space, which requires
2224 * read rights. This is to deal with lack of common credential
2225 * serialization code which would treat NOCRED as "serialize 'root'".
2226 */
2227 kerncred = vfs_context_ucred(vfs_context_kernel());
2228
2229 /* init the namei data to point the file user's program name */
2230 NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
2231
2232 if ((error = namei(ndp)) != 0) {
2233 if (error == ENOENT) {
2234 error = LOAD_ENOENT;
2235 } else {
2236 error = LOAD_FAILURE;
2237 }
2238 return(error);
2239 }
2240 nameidone(ndp);
2241 vp = ndp->ni_vp;
2242
2243 /* check for regular file */
2244 if (vp->v_type != VREG) {
2245 error = LOAD_PROTECT;
2246 goto bad1;
2247 }
2248
2249 /* get size */
2250 if ((error = vnode_size(vp, &fsize, ctx)) != 0) {
2251 error = LOAD_FAILURE;
2252 goto bad1;
2253 }
2254
2255 /* Check mount point */
2256 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
2257 error = LOAD_PROTECT;
2258 goto bad1;
2259 }
2260
2261 /* check access */
2262 if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE | KAUTH_VNODE_READ_DATA, ctx)) != 0) {
2263 error = LOAD_PROTECT;
2264 goto bad1;
2265 }
2266
2267 /* try to open it */
2268 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
2269 error = LOAD_PROTECT;
2270 goto bad1;
2271 }
2272
2273 if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)header, sizeof (*header), 0,
2274 UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p)) != 0) {
2275 error = LOAD_IOERROR;
2276 goto bad2;
2277 }
2278
2279 if (resid) {
2280 error = LOAD_BADMACHO;
2281 goto bad2;
2282 }
2283
2284 if (header->mach_header.magic == MH_MAGIC ||
2285 header->mach_header.magic == MH_MAGIC_64) {
2286 is_fat = FALSE;
2287 } else if (OSSwapBigToHostInt32(header->fat_header.magic) == FAT_MAGIC) {
2288 is_fat = TRUE;
2289 } else {
2290 error = LOAD_BADMACHO;
2291 goto bad2;
2292 }
2293
2294 if (is_fat) {
2295
2296 error = fatfile_validate_fatarches((vm_offset_t)(&header->fat_header),
2297 sizeof(*header));
2298 if (error != LOAD_SUCCESS) {
2299 goto bad2;
2300 }
2301
2302 /* Look up our architecture in the fat file. */
2303 error = fatfile_getarch_with_bits(archbits,
2304 (vm_offset_t)(&header->fat_header), sizeof(*header), &fat_arch);
2305 if (error != LOAD_SUCCESS)
2306 goto bad2;
2307
2308 /* Read the Mach-O header out of it */
2309 error = vn_rdwr(UIO_READ, vp, (caddr_t)&header->mach_header,
2310 sizeof (header->mach_header), fat_arch.offset,
2311 UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p);
2312 if (error) {
2313 error = LOAD_IOERROR;
2314 goto bad2;
2315 }
2316
2317 if (resid) {
2318 error = LOAD_BADMACHO;
2319 goto bad2;
2320 }
2321
2322 /* Is this really a Mach-O? */
2323 if (header->mach_header.magic != MH_MAGIC &&
2324 header->mach_header.magic != MH_MAGIC_64) {
2325 error = LOAD_BADMACHO;
2326 goto bad2;
2327 }
2328
2329 *file_offset = fat_arch.offset;
2330 *macho_size = fat_arch.size;
2331 } else {
2332 /*
2333 * Force get_macho_vnode() to fail if the architecture bits
2334 * do not match the expected architecture bits. This in
2335 * turn causes load_dylinker() to fail for the same reason,
2336 * so it ensures the dynamic linker and the binary are in
2337 * lock-step. This is potentially bad, if we ever add to
2338 * the CPU_ARCH_* bits any bits that are desirable but not
2339 * required, since the dynamic linker might work, but we will
2340 * refuse to load it because of this check.
2341 */
2342 if ((cpu_type_t)(header->mach_header.cputype & CPU_ARCH_MASK) != archbits) {
2343 error = LOAD_BADARCH;
2344 goto bad2;
2345 }
2346
2347 *file_offset = 0;
2348 *macho_size = fsize;
2349 }
2350
2351 *mach_header = header->mach_header;
2352 *vpp = vp;
2353
2354 ubc_setsize(vp, fsize);
2355 return (error);
2356
2357 bad2:
2358 (void) VNOP_CLOSE(vp, FREAD, ctx);
2359 bad1:
2360 vnode_put(vp);
2361 return(error);
2362 }