2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (C) 1988, 1989, NeXT, Inc.
31 * File: kern/mach_loader.c
32 * Author: Avadis Tevanian, Jr.
34 * Mach object file loader (kernel version, for now).
36 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
40 #include <sys/param.h>
41 #include <sys/vnode_internal.h>
43 #include <sys/namei.h>
44 #include <sys/proc_internal.h>
45 #include <sys/kauth.h>
47 #include <sys/malloc.h>
48 #include <sys/mount_internal.h>
49 #include <sys/fcntl.h>
50 #include <sys/ubc_internal.h>
51 #include <sys/imgact.h>
52 #include <sys/codesign.h>
53 #include <sys/proc_uuid_policy.h>
54 #include <sys/reason.h>
55 #include <sys/kdebug.h>
57 #include <mach/mach_types.h>
58 #include <mach/vm_map.h> /* vm_allocate() */
59 #include <mach/mach_vm.h> /* mach_vm_allocate() */
60 #include <mach/vm_statistics.h>
61 #include <mach/task.h>
62 #include <mach/thread_act.h>
64 #include <machine/vmparam.h>
65 #include <machine/exec.h>
66 #include <machine/pal_routines.h>
69 #include <kern/kern_types.h>
70 #include <kern/cpu_number.h>
71 #include <kern/mach_loader.h>
72 #include <kern/mach_fat.h>
73 #include <kern/kalloc.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/page_decrypt.h>
78 #include <mach-o/fat.h>
79 #include <mach-o/loader.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_pager.h>
85 #include <vm/vnode_pager.h>
86 #include <vm/vm_protos.h>
87 #include <IOKit/IOReturn.h> /* for kIOReturnNotPrivileged */
89 #include <os/overflow.h>
92 * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
93 * when KERNEL is defined.
95 extern pmap_t
pmap_create_options(ledger_t ledger
, vm_map_size_t size
,
98 /* XXX should have prototypes in a shared header file */
99 extern int get_map_nentries(vm_map_t
);
101 extern kern_return_t
memory_object_signed(memory_object_control_t control
,
102 boolean_t is_signed
);
104 /* An empty load_result_t */
105 static const load_result_t load_result_null
= {
106 .mach_header
= MACH_VM_MIN_ADDRESS
,
107 .entry_point
= MACH_VM_MIN_ADDRESS
,
108 .user_stack
= MACH_VM_MIN_ADDRESS
,
109 .user_stack_size
= 0,
110 .user_stack_alloc
= MACH_VM_MIN_ADDRESS
,
111 .user_stack_alloc_size
= 0,
112 .all_image_info_addr
= MACH_VM_MIN_ADDRESS
,
113 .all_image_info_size
= 0,
117 .needs_dynlinker
= 0,
126 .min_vm_addr
= MACH_VM_MAX_ADDRESS
,
127 .max_vm_addr
= MACH_VM_MIN_ADDRESS
,
134 * Prototypes of static functions.
141 struct mach_header
*header
,
147 load_result_t
*result
,
148 load_result_t
*binresult
,
149 struct image_params
*imgp
154 struct load_command
*lcp
,
162 load_result_t
*result
167 struct uuid_command
*uulp
,
169 load_result_t
*result
174 struct version_min_command
*vmc
,
175 boolean_t
*found_version_cmd
,
176 load_result_t
*result
181 struct linkedit_data_command
*lcp
,
186 load_result_t
*result
,
187 struct image_params
*imgp
);
189 #if CONFIG_CODE_DECRYPTION
192 struct encryption_info_command
*lcp
,
199 cpu_subtype_t cpusubtype
);
205 struct entry_point_command
*epc
,
208 load_result_t
*result
216 load_result_t
*result
221 struct thread_command
*tcp
,
224 load_result_t
*result
240 mach_vm_offset_t
*user_stack
,
242 load_result_t
*result
250 mach_vm_offset_t
*entry_point
255 struct dylinker_command
*lcp
,
261 load_result_t
*result
,
262 struct image_params
*imgp
266 extern int bootarg_no32exec
;
268 check_if_simulator_binary(
269 struct image_params
*imgp
,
280 struct mach_header
*mach_header
,
283 struct macho_data
*macho_data
,
288 widen_segment_command(const struct segment_command
*scp32
,
289 struct segment_command_64
*scp
)
291 scp
->cmd
= scp32
->cmd
;
292 scp
->cmdsize
= scp32
->cmdsize
;
293 bcopy(scp32
->segname
, scp
->segname
, sizeof(scp
->segname
));
294 scp
->vmaddr
= scp32
->vmaddr
;
295 scp
->vmsize
= scp32
->vmsize
;
296 scp
->fileoff
= scp32
->fileoff
;
297 scp
->filesize
= scp32
->filesize
;
298 scp
->maxprot
= scp32
->maxprot
;
299 scp
->initprot
= scp32
->initprot
;
300 scp
->nsects
= scp32
->nsects
;
301 scp
->flags
= scp32
->flags
;
305 note_all_image_info_section(const struct segment_command_64
*scp
,
306 boolean_t is64
, size_t section_size
, const void *sections
,
307 int64_t slide
, load_result_t
*result
)
311 struct section_64 s64
;
316 if (strncmp(scp
->segname
, "__DATA", sizeof(scp
->segname
)) != 0) {
319 for (i
= 0; i
< scp
->nsects
; ++i
) {
320 sectionp
= (const void *)
321 ((const char *)sections
+ section_size
* i
);
322 if (0 == strncmp(sectionp
->s64
.sectname
, "__all_image_info",
323 sizeof(sectionp
->s64
.sectname
))) {
324 result
->all_image_info_addr
=
325 is64
? sectionp
->s64
.addr
: sectionp
->s32
.addr
;
326 result
->all_image_info_addr
+= slide
;
327 result
->all_image_info_size
=
328 is64
? sectionp
->s64
.size
: sectionp
->s32
.size
;
336 * Allow bypassing some security rules (hard pagezero, no write+execute)
337 * in exchange for better binary compatibility for legacy apps built
338 * before 16KB-alignment was enforced.
340 const int fourk_binary_compatibility_unsafe
= TRUE
;
341 const int fourk_binary_compatibility_allow_wx
= FALSE
;
342 #endif /* __arm64__ */
346 struct image_params
*imgp
,
347 struct mach_header
*header
,
350 load_result_t
*result
353 struct vnode
*vp
= imgp
->ip_vp
;
354 off_t file_offset
= imgp
->ip_arch_offset
;
355 off_t macho_size
= imgp
->ip_arch_size
;
356 off_t file_size
= imgp
->ip_vattr
->va_data_size
;
357 pmap_t pmap
= 0; /* protected by create_map */
359 load_result_t myresult
;
361 boolean_t enforce_hard_pagezero
= TRUE
;
362 int in_exec
= (imgp
->ip_flags
& IMGPF_EXEC
);
363 task_t task
= current_task();
364 int64_t aslr_page_offset
= 0;
365 int64_t dyld_aslr_page_offset
= 0;
366 int64_t aslr_section_size
= 0;
367 int64_t aslr_section_offset
= 0;
369 unsigned int pmap_flags
= 0;
371 if (macho_size
> file_size
) {
372 return LOAD_BADMACHO
;
375 result
->is_64bit_addr
= ((imgp
->ip_flags
& IMGPF_IS_64BIT_ADDR
) == IMGPF_IS_64BIT_ADDR
);
376 result
->is_64bit_data
= ((imgp
->ip_flags
& IMGPF_IS_64BIT_DATA
) == IMGPF_IS_64BIT_DATA
);
377 #if defined(HAS_APPLE_PAC)
378 pmap_flags
|= (imgp
->ip_flags
& IMGPF_NOJOP
) ? PMAP_CREATE_DISABLE_JOP
: 0;
379 #endif /* defined(HAS_APPLE_PAC) */
380 pmap_flags
|= result
->is_64bit_addr
? PMAP_CREATE_64BIT
: 0;
383 if (imgp
->ip_new_thread
) {
384 ledger_task
= get_threadtask(imgp
->ip_new_thread
);
388 pmap
= pmap_create_options(get_task_ledger(ledger_task
),
392 return LOAD_RESOURCE
;
394 map
= vm_map_create(pmap
,
396 vm_compute_max_offset(result
->is_64bit_addr
),
399 #if defined(__arm64__)
400 if (result
->is_64bit_addr
) {
401 /* enforce 16KB alignment of VM map entries */
402 vm_map_set_page_shift(map
, SIXTEENK_PAGE_SHIFT
);
404 vm_map_set_page_shift(map
, page_shift_user32
);
406 #elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
407 /* enforce 16KB alignment for watch targets with new ABI */
408 vm_map_set_page_shift(map
, SIXTEENK_PAGE_SHIFT
);
409 #endif /* __arm64__ */
411 #ifndef CONFIG_ENFORCE_SIGNED_CODE
412 /* This turns off faulting for executable pages, which allows
413 * to circumvent Code Signing Enforcement. The per process
414 * flag (CS_ENFORCEMENT) is not set yet, but we can use the
417 if (!cs_process_global_enforcement() && (header
->flags
& MH_ALLOW_STACK_EXECUTION
)) {
418 vm_map_disable_NX(map
);
419 // TODO: Message Trace or log that this is happening
423 /* Forcibly disallow execution from data pages on even if the arch
424 * normally permits it. */
425 if ((header
->flags
& MH_NO_HEAP_EXECUTION
) && !(imgp
->ip_flags
& IMGPF_ALLOW_DATA_EXEC
)) {
426 vm_map_disallow_data_exec(map
);
430 * Compute a random offset for ASLR, and an independent random offset for dyld.
432 if (!(imgp
->ip_flags
& IMGPF_DISABLE_ASLR
)) {
433 vm_map_get_max_aslr_slide_section(map
, &aslr_section_offset
, &aslr_section_size
);
434 aslr_section_offset
= (random() % aslr_section_offset
) * aslr_section_size
;
436 aslr_page_offset
= random();
437 aslr_page_offset
%= vm_map_get_max_aslr_slide_pages(map
);
438 aslr_page_offset
<<= vm_map_page_shift(map
);
440 dyld_aslr_page_offset
= random();
441 dyld_aslr_page_offset
%= vm_map_get_max_loader_aslr_slide_pages(map
);
442 dyld_aslr_page_offset
<<= vm_map_page_shift(map
);
444 aslr_page_offset
+= aslr_section_offset
;
451 *result
= load_result_null
;
454 * re-set the bitness on the load result since we cleared the load result above.
456 result
->is_64bit_addr
= ((imgp
->ip_flags
& IMGPF_IS_64BIT_ADDR
) == IMGPF_IS_64BIT_ADDR
);
457 result
->is_64bit_data
= ((imgp
->ip_flags
& IMGPF_IS_64BIT_DATA
) == IMGPF_IS_64BIT_DATA
);
459 lret
= parse_machfile(vp
, map
, thread
, header
, file_offset
, macho_size
,
460 0, aslr_page_offset
, dyld_aslr_page_offset
, result
,
463 if (lret
!= LOAD_SUCCESS
) {
464 vm_map_deallocate(map
); /* will lose pmap reference too */
470 * On x86, for compatibility, don't enforce the hard page-zero restriction for 32-bit binaries.
472 if (!result
->is_64bit_addr
) {
473 enforce_hard_pagezero
= FALSE
;
477 * For processes with IMGPF_HIGH_BITS_ASLR, add a few random high bits
478 * to the start address for "anywhere" memory allocations.
480 #define VM_MAP_HIGH_START_BITS_COUNT 8
481 #define VM_MAP_HIGH_START_BITS_SHIFT 27
482 if (result
->is_64bit_addr
&&
483 (imgp
->ip_flags
& IMGPF_HIGH_BITS_ASLR
)) {
485 vm_map_offset_t high_start
;
487 random_bits
= random();
488 random_bits
&= (1 << VM_MAP_HIGH_START_BITS_COUNT
) - 1;
489 high_start
= (((vm_map_offset_t
)random_bits
)
490 << VM_MAP_HIGH_START_BITS_SHIFT
);
491 vm_map_set_high_start(map
, high_start
);
493 #endif /* __x86_64__ */
496 * Check to see if the page zero is enforced by the map->min_offset.
498 if (enforce_hard_pagezero
&&
499 (vm_map_has_hard_pagezero(map
, 0x1000) == FALSE
)) {
501 if (!result
->is_64bit_addr
&& /* not 64-bit address space */
502 !(header
->flags
& MH_PIE
) && /* not PIE */
503 (vm_map_page_shift(map
) != FOURK_PAGE_SHIFT
||
504 PAGE_SHIFT
!= FOURK_PAGE_SHIFT
) && /* page size != 4KB */
505 result
->has_pagezero
&& /* has a "soft" page zero */
506 fourk_binary_compatibility_unsafe
) {
508 * For backwards compatibility of "4K" apps on
509 * a 16K system, do not enforce a hard page zero...
512 #endif /* __arm64__ */
514 vm_map_deallocate(map
); /* will lose pmap reference too */
515 return LOAD_BADMACHO
;
519 vm_commit_pagezero_status(map
);
522 * If this is an exec, then we are going to destroy the old
523 * task, and it's correct to halt it; if it's spawn, the
524 * task is not yet running, and it makes no sense.
527 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
529 * Mark the task as halting and start the other
530 * threads towards terminating themselves. Then
531 * make sure any threads waiting for a process
532 * transition get informed that we are committed to
533 * this transition, and then finally complete the
534 * task halting (wait for threads and then cleanup
537 * NOTE: task_start_halt() makes sure that no new
538 * threads are created in the task during the transition.
539 * We need to mark the workqueue as exiting before we
540 * wait for threads to terminate (at the end of which
541 * we no longer have a prohibition on thread creation).
543 * Finally, clean up any lingering workqueue data structures
544 * that may have been left behind by the workqueue threads
545 * as they exited (and then clean up the work queue itself).
547 kret
= task_start_halt(task
);
548 if (kret
!= KERN_SUCCESS
) {
549 vm_map_deallocate(map
); /* will lose pmap reference too */
552 proc_transcommit(p
, 0);
553 workq_mark_exiting(p
);
554 task_complete_halt(task
);
558 * Roll up accounting info to new task. The roll up is done after
559 * task_complete_halt to make sure the thread accounting info is
560 * rolled up to current_task.
562 task_rollup_accounting_info(get_threadtask(thread
), task
);
566 #ifdef CONFIG_32BIT_TELEMETRY
567 if (!result
->is_64bit_data
) {
569 * This may not need to be an AST; we merely need to ensure that
570 * we gather telemetry at the point where all of the information
571 * that we want has been added to the process.
573 task_set_32bit_log_flag(get_threadtask(thread
));
574 act_set_astbsd(thread
);
576 #endif /* CONFIG_32BIT_TELEMETRY */
581 int macho_printf
= 0;
582 #define MACHO_PRINTF(args) \
584 if (macho_printf) { \
590 * The file size of a mach-o file is limited to 32 bits; this is because
591 * this is the limit on the kalloc() of enough bytes for a mach_header and
592 * the contents of its sizeofcmds, which is currently constrained to 32
593 * bits in the file format itself. We read into the kernel buffer the
594 * commands section, and then parse it in order to parse the mach-o file
595 * format load_command segment(s). We are only interested in a subset of
596 * the total set of possible commands. If "map"==VM_MAP_NULL or
597 * "thread"==THREAD_NULL, do not make permament VM modifications,
598 * just preflight the parse.
606 struct mach_header
*header
,
611 int64_t dyld_aslr_offset
,
612 load_result_t
*result
,
613 load_result_t
*binresult
,
614 struct image_params
*imgp
618 struct load_command
*lcp
;
619 struct dylinker_command
*dlp
= 0;
620 integer_t dlarchbits
= 0;
622 load_return_t ret
= LOAD_SUCCESS
;
624 vm_size_t alloc_size
, cmds_size
;
626 size_t oldoffset
; /* for overflow check */
628 proc_t p
= vfs_context_proc(imgp
->ip_vfs_context
);
631 int spawn
= (imgp
->ip_flags
& IMGPF_SPAWN
);
632 int vfexec
= (imgp
->ip_flags
& IMGPF_VFORK_EXEC
);
633 size_t mach_header_sz
= sizeof(struct mach_header
);
635 boolean_t got_code_signatures
= FALSE
;
636 boolean_t found_header_segment
= FALSE
;
637 boolean_t found_xhdr
= FALSE
;
638 boolean_t found_version_cmd
= FALSE
;
640 boolean_t dyld_no_load_addr
= FALSE
;
641 boolean_t is_dyld
= FALSE
;
642 vm_map_offset_t effective_page_mask
= MAX(PAGE_MASK
, vm_map_page_mask(map
));
644 uint32_t pagezero_end
= 0;
645 uint32_t executable_end
= 0;
646 uint32_t writable_start
= 0;
647 vm_map_size_t effective_page_size
;
649 effective_page_size
= MAX(PAGE_SIZE
, vm_map_page_size(map
));
650 #endif /* __arm64__ */
652 if (header
->magic
== MH_MAGIC_64
||
653 header
->magic
== MH_CIGAM_64
) {
654 mach_header_sz
= sizeof(struct mach_header_64
);
658 * Break infinite recursion
667 * Check to see if right machine type.
669 if (((cpu_type_t
)(header
->cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
)) ||
670 !grade_binary(header
->cputype
,
671 header
->cpusubtype
& ~CPU_SUBTYPE_MASK
, TRUE
)) {
675 abi64
= ((header
->cputype
& CPU_ARCH_ABI64
) == CPU_ARCH_ABI64
);
677 switch (header
->filetype
) {
683 if (header
->flags
& MH_DYLDLINK
) {
684 /* Check properties of dynamic executables */
685 if (!(header
->flags
& MH_PIE
) && pie_required(header
->cputype
, header
->cpusubtype
& ~CPU_SUBTYPE_MASK
)) {
688 result
->needs_dynlinker
= TRUE
;
690 /* Check properties of static executables (disallowed except for development) */
691 #if !(DEVELOPMENT || DEBUG)
695 #endif /* CONFIG_EMBEDDED */
710 * Get the pager for the file.
712 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
714 /* ensure header + sizeofcmds falls within the file */
715 if (os_add_overflow(mach_header_sz
, header
->sizeofcmds
, &cmds_size
) ||
716 (off_t
)cmds_size
> macho_size
||
717 round_page_overflow(cmds_size
, &alloc_size
)) {
718 return LOAD_BADMACHO
;
722 * Map the load commands into kernel memory.
724 addr
= kalloc(alloc_size
);
729 error
= vn_rdwr(UIO_READ
, vp
, addr
, alloc_size
, file_offset
,
730 UIO_SYSSPACE
, 0, vfs_context_ucred(imgp
->ip_vfs_context
), &resid
, p
);
732 kfree(addr
, alloc_size
);
737 /* We must be able to read in as much as the mach_header indicated */
738 kfree(addr
, alloc_size
);
739 return LOAD_BADMACHO
;
743 * For PIE and dyld, slide everything by the ASLR offset.
745 if ((header
->flags
& MH_PIE
) || is_dyld
) {
750 * Scan through the commands, processing each one as necessary.
751 * We parse in three passes through the headers:
752 * 0: determine if TEXT and DATA boundary can be page-aligned
753 * 1: thread state, uuid, code signature
755 * 3: dyld, encryption, check entry point
758 boolean_t slide_realign
= FALSE
;
761 slide_realign
= TRUE
;
765 for (pass
= 0; pass
<= 3; pass
++) {
766 if (pass
== 0 && !slide_realign
&& !is_dyld
) {
767 /* if we dont need to realign the slide or determine dyld's load
768 * address, pass 0 can be skipped */
770 } else if (pass
== 1) {
775 is_pie
= ((header
->flags
& MH_PIE
) != 0);
776 if (pagezero_end
!= 0 &&
777 pagezero_end
< effective_page_size
) {
778 /* need at least 1 page for PAGEZERO */
779 adjust
= effective_page_size
;
780 MACHO_PRINTF(("pagezero boundary at "
781 "0x%llx; adjust slide from "
782 "0x%llx to 0x%llx%s\n",
783 (uint64_t) pagezero_end
,
788 : " BUT NO PIE ****** :-(")));
791 pagezero_end
+= adjust
;
792 executable_end
+= adjust
;
793 writable_start
+= adjust
;
796 if (pagezero_end
!= 0) {
797 result
->has_pagezero
= TRUE
;
799 if (executable_end
== writable_start
&&
800 (executable_end
& effective_page_mask
) != 0 &&
801 (executable_end
& FOURK_PAGE_MASK
) == 0) {
803 * The TEXT/DATA boundary is 4K-aligned but
804 * not page-aligned. Adjust the slide to make
805 * it page-aligned and avoid having a page
806 * with both write and execute permissions.
809 (effective_page_size
-
810 (executable_end
& effective_page_mask
));
811 MACHO_PRINTF(("page-unaligned X-W boundary at "
812 "0x%llx; adjust slide from "
813 "0x%llx to 0x%llx%s\n",
814 (uint64_t) executable_end
,
819 : " BUT NO PIE ****** :-(")));
824 #endif /* __arm64__ */
826 if (dyld_no_load_addr
&& binresult
) {
828 * The dyld Mach-O does not specify a load address. Try to locate
829 * it right after the main binary. If binresult == NULL, load
830 * directly to the given slide.
832 slide
= vm_map_round_page(slide
+ binresult
->max_vm_addr
, effective_page_mask
);
837 * Check that the entry point is contained in an executable segments
840 if (depth
== 1 && imgp
&& (imgp
->ip_flags
& IMGPF_DRIVER
)) {
841 /* Driver binaries must have driverkit platform */
842 if (result
->ip_platform
== PLATFORM_DRIVERKIT
) {
843 /* Driver binaries have no entry point */
844 ret
= setup_driver_main(thread
, slide
, result
);
848 } else if (!result
->using_lcmain
&& result
->validentry
== 0) {
851 if (ret
!= KERN_SUCCESS
) {
852 thread_state_initialize(thread
);
858 * Check that some segment maps the start of the mach-o file, which is
859 * needed by the dynamic loader to read the mach headers, etc.
861 if ((pass
== 3) && (found_header_segment
== FALSE
)) {
867 * Loop through each of the load_commands indicated by the
868 * Mach-O header; if an absurd value is provided, we just
869 * run off the end of the reserved section by incrementing
870 * the offset too far, so we are implicitly fail-safe.
872 offset
= mach_header_sz
;
873 ncmds
= header
->ncmds
;
876 /* ensure enough space for a minimal load command */
877 if (offset
+ sizeof(struct load_command
) > cmds_size
) {
883 * Get a pointer to the command.
885 lcp
= (struct load_command
*)(addr
+ offset
);
889 * Perform prevalidation of the struct load_command
890 * before we attempt to use its contents. Invalid
891 * values are ones which result in an overflow, or
892 * which can not possibly be valid commands, or which
893 * straddle or exist past the reserved section at the
894 * start of the image.
896 if (os_add_overflow(offset
, lcp
->cmdsize
, &offset
) ||
897 lcp
->cmdsize
< sizeof(struct load_command
) ||
898 offset
> cmds_size
) {
904 * Act on struct load_command's for which kernel
905 * intervention is required.
906 * Note that each load command implementation is expected to validate
907 * that lcp->cmdsize is large enough to fit its specific struct type
908 * before dereferencing fields not covered by struct load_command.
912 struct segment_command
*scp
= (struct segment_command
*) lcp
;
913 if (scp
->cmdsize
< sizeof(*scp
)) {
918 if (is_dyld
&& scp
->vmaddr
== 0 && scp
->fileoff
== 0) {
919 dyld_no_load_addr
= TRUE
;
920 if (!slide_realign
) {
921 /* got what we need, bail early on pass 0 */
929 if (scp
->initprot
== 0 && scp
->maxprot
== 0 && scp
->vmaddr
== 0) {
931 if (os_add3_overflow(scp
->vmaddr
, scp
->vmsize
, slide
, &pagezero_end
)) {
936 if (scp
->initprot
& VM_PROT_EXECUTE
) {
938 if (os_add3_overflow(scp
->vmaddr
, scp
->vmsize
, slide
, &executable_end
)) {
943 if (scp
->initprot
& VM_PROT_WRITE
) {
945 if (os_add_overflow(scp
->vmaddr
, slide
, &writable_start
)) {
950 #endif /* __arm64__ */
954 if (pass
== 1 && !strncmp(scp
->segname
, "__XHDR", sizeof(scp
->segname
))) {
964 * Having an LC_SEGMENT command for the
965 * wrong ABI is invalid <rdar://problem/11021230>
971 ret
= load_segment(lcp
,
980 if (ret
== LOAD_SUCCESS
&& scp
->fileoff
== 0 && scp
->filesize
> 0) {
981 /* Enforce a single segment mapping offset zero, with R+X
983 if (found_header_segment
||
984 ((scp
->initprot
& (VM_PROT_READ
| VM_PROT_EXECUTE
)) != (VM_PROT_READ
| VM_PROT_EXECUTE
))) {
988 found_header_segment
= TRUE
;
993 case LC_SEGMENT_64
: {
994 struct segment_command_64
*scp64
= (struct segment_command_64
*) lcp
;
995 if (scp64
->cmdsize
< sizeof(*scp64
)) {
1000 if (is_dyld
&& scp64
->vmaddr
== 0 && scp64
->fileoff
== 0) {
1001 dyld_no_load_addr
= TRUE
;
1002 if (!slide_realign
) {
1003 /* got what we need, bail early on pass 0 */
1009 if (pass
== 1 && !strncmp(scp64
->segname
, "__XHDR", sizeof(scp64
->segname
))) {
1019 * Having an LC_SEGMENT_64 command for the
1020 * wrong ABI is invalid <rdar://problem/11021230>
1022 ret
= LOAD_BADMACHO
;
1026 ret
= load_segment(lcp
,
1036 if (ret
== LOAD_SUCCESS
&& scp64
->fileoff
== 0 && scp64
->filesize
> 0) {
1037 /* Enforce a single segment mapping offset zero, with R+X
1039 if (found_header_segment
||
1040 ((scp64
->initprot
& (VM_PROT_READ
| VM_PROT_EXECUTE
)) != (VM_PROT_READ
| VM_PROT_EXECUTE
))) {
1041 ret
= LOAD_BADMACHO
;
1044 found_header_segment
= TRUE
;
1053 ret
= load_unixthread(
1054 (struct thread_command
*) lcp
,
1067 (struct entry_point_command
*) lcp
,
1072 case LC_LOAD_DYLINKER
:
1076 if ((depth
== 1) && (dlp
== 0)) {
1077 dlp
= (struct dylinker_command
*)lcp
;
1078 dlarchbits
= (header
->cputype
& CPU_ARCH_MASK
);
1084 if (pass
== 1 && depth
== 1) {
1085 ret
= load_uuid((struct uuid_command
*) lcp
,
1086 (char *)addr
+ cmds_size
,
1090 case LC_CODE_SIGNATURE
:
1096 * load signatures & store in uip
1097 * set VM object "signed_pages"
1099 ret
= load_code_signature(
1100 (struct linkedit_data_command
*) lcp
,
1107 if (ret
!= LOAD_SUCCESS
) {
1108 printf("proc %d: load code signature error %d "
1109 "for file \"%s\"\n",
1110 p
->p_pid
, ret
, vp
->v_name
);
1112 * Allow injections to be ignored on devices w/o enforcement enabled
1114 if (!cs_process_global_enforcement()) {
1115 ret
= LOAD_SUCCESS
; /* ignore error */
1118 got_code_signatures
= TRUE
;
1121 if (got_code_signatures
) {
1122 unsigned tainted
= CS_VALIDATE_TAINTED
;
1123 boolean_t valid
= FALSE
;
1127 if (cs_debug
> 10) {
1128 printf("validating initial pages of %s\n", vp
->v_name
);
1131 while (off
< alloc_size
&& ret
== LOAD_SUCCESS
) {
1132 tainted
= CS_VALIDATE_TAINTED
;
1134 valid
= cs_validate_range(vp
,
1140 if (!valid
|| (tainted
& CS_VALIDATE_TAINTED
)) {
1142 printf("CODE SIGNING: %s[%d]: invalid initial page at offset %lld validated:%d tainted:%d csflags:0x%x\n",
1143 vp
->v_name
, p
->p_pid
, (long long)(file_offset
+ off
), valid
, tainted
, result
->csflags
);
1145 if (cs_process_global_enforcement() ||
1146 (result
->csflags
& (CS_HARD
| CS_KILL
| CS_ENFORCEMENT
))) {
1149 result
->csflags
&= ~CS_VALID
;
1156 #if CONFIG_CODE_DECRYPTION
1157 case LC_ENCRYPTION_INFO
:
1158 case LC_ENCRYPTION_INFO_64
:
1162 ret
= set_code_unprotect(
1163 (struct encryption_info_command
*) lcp
,
1164 addr
, map
, slide
, vp
, file_offset
,
1165 header
->cputype
, header
->cpusubtype
);
1166 if (ret
!= LOAD_SUCCESS
) {
1167 os_reason_t load_failure_reason
= OS_REASON_NULL
;
1168 printf("proc %d: set_code_unprotect() error %d "
1169 "for file \"%s\"\n",
1170 p
->p_pid
, ret
, vp
->v_name
);
1172 * Don't let the app run if it's
1173 * encrypted but we failed to set up the
1174 * decrypter. If the keys are missing it will
1175 * return LOAD_DECRYPTFAIL.
1177 if (ret
== LOAD_DECRYPTFAIL
) {
1178 /* failed to load due to missing FP keys */
1180 p
->p_lflag
|= P_LTERM_DECRYPTFAIL
;
1183 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
1184 p
->p_pid
, OS_REASON_EXEC
, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT
, 0, 0);
1185 load_failure_reason
= os_reason_create(OS_REASON_EXEC
, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT
);
1187 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
1188 p
->p_pid
, OS_REASON_EXEC
, EXEC_EXIT_REASON_DECRYPT
, 0, 0);
1189 load_failure_reason
= os_reason_create(OS_REASON_EXEC
, EXEC_EXIT_REASON_DECRYPT
);
1193 * Don't signal the process if it was forked and in a partially constructed
1194 * state as part of a spawn -- it will just be torn down when the exec fails.
1197 assert(load_failure_reason
!= OS_REASON_NULL
);
1199 psignal_vfork_with_reason(p
, get_threadtask(imgp
->ip_new_thread
), imgp
->ip_new_thread
, SIGKILL
, load_failure_reason
);
1200 load_failure_reason
= OS_REASON_NULL
;
1202 psignal_with_reason(p
, SIGKILL
, load_failure_reason
);
1203 load_failure_reason
= OS_REASON_NULL
;
1206 os_reason_free(load_failure_reason
);
1207 load_failure_reason
= OS_REASON_NULL
;
1212 case LC_VERSION_MIN_IPHONEOS
:
1213 case LC_VERSION_MIN_MACOSX
:
1214 case LC_VERSION_MIN_WATCHOS
:
1215 case LC_VERSION_MIN_TVOS
: {
1216 struct version_min_command
*vmc
;
1218 if (depth
!= 1 || pass
!= 1) {
1221 vmc
= (struct version_min_command
*) lcp
;
1222 ret
= load_version(vmc
, &found_version_cmd
, result
);
1225 case LC_BUILD_VERSION
: {
1226 if (depth
!= 1 || pass
!= 1) {
1229 struct build_version_command
* bvc
= (struct build_version_command
*)lcp
;
1230 if (bvc
->cmdsize
< sizeof(*bvc
)) {
1231 ret
= LOAD_BADMACHO
;
1234 if (found_version_cmd
== TRUE
) {
1235 ret
= LOAD_BADMACHO
;
1238 result
->ip_platform
= bvc
->platform
;
1239 found_version_cmd
= TRUE
;
1243 /* Other commands are ignored by the kernel */
1247 if (ret
!= LOAD_SUCCESS
) {
1251 if (ret
!= LOAD_SUCCESS
) {
1256 if (ret
== LOAD_SUCCESS
) {
1257 if (!got_code_signatures
&& cs_process_global_enforcement()) {
1261 /* Make sure if we need dyld, we got it */
1262 if (result
->needs_dynlinker
&& !dlp
) {
1266 if ((ret
== LOAD_SUCCESS
) && (dlp
!= 0)) {
1268 * load the dylinker, and slide it by the independent DYLD ASLR
1269 * offset regardless of the PIE-ness of the main binary.
1271 ret
= load_dylinker(dlp
, dlarchbits
, map
, thread
, depth
,
1272 dyld_aslr_offset
, result
, imgp
);
1275 if ((ret
== LOAD_SUCCESS
) && (depth
== 1)) {
1276 if (result
->thread_count
== 0) {
1279 #if CONFIG_ENFORCE_SIGNED_CODE
1280 if (result
->needs_dynlinker
&& !(result
->csflags
& CS_DYLD_PLATFORM
)) {
1287 if (ret
== LOAD_BADMACHO
&& found_xhdr
) {
1288 ret
= LOAD_BADMACHO_UPX
;
1291 kfree(addr
, alloc_size
);
1297 validate_potential_simulator_binary(
1298 cpu_type_t exectype __unused
,
1299 struct image_params
*imgp __unused
,
1300 off_t file_offset __unused
,
1301 off_t macho_size __unused
)
1304 /* Allow 32 bit exec only for simulator binaries */
1305 if (bootarg_no32exec
&& imgp
!= NULL
&& exectype
== CPU_TYPE_X86
) {
1306 if (imgp
->ip_simulator_binary
== IMGPF_SB_DEFAULT
) {
1307 boolean_t simulator_binary
= check_if_simulator_binary(imgp
, file_offset
, macho_size
);
1308 imgp
->ip_simulator_binary
= simulator_binary
? IMGPF_SB_TRUE
: IMGPF_SB_FALSE
;
1311 if (imgp
->ip_simulator_binary
!= IMGPF_SB_TRUE
) {
1312 return LOAD_BADARCH
;
1316 return LOAD_SUCCESS
;
1321 check_if_simulator_binary(
1322 struct image_params
*imgp
,
1326 struct mach_header
*header
;
1327 char *ip_vdata
= NULL
;
1328 kauth_cred_t cred
= NULL
;
1330 struct load_command
*lcp
;
1331 boolean_t simulator_binary
= FALSE
;
1333 vm_size_t alloc_size
, cmds_size
;
1335 proc_t p
= current_proc(); /* XXXX */
1338 size_t mach_header_sz
= sizeof(struct mach_header
);
1341 cred
= kauth_cred_proc_ref(p
);
1343 /* Allocate page to copyin mach header */
1344 ip_vdata
= kalloc(PAGE_SIZE
);
1345 if (ip_vdata
== NULL
) {
1349 /* Read the Mach-O header */
1350 error
= vn_rdwr(UIO_READ
, imgp
->ip_vp
, ip_vdata
,
1351 PAGE_SIZE
, file_offset
,
1352 UIO_SYSSPACE
, (IO_UNIT
| IO_NODELOCKED
),
1358 header
= (struct mach_header
*)ip_vdata
;
1360 if (header
->magic
== MH_MAGIC_64
||
1361 header
->magic
== MH_CIGAM_64
) {
1362 mach_header_sz
= sizeof(struct mach_header_64
);
1365 /* ensure header + sizeofcmds falls within the file */
1366 if (os_add_overflow(mach_header_sz
, header
->sizeofcmds
, &cmds_size
) ||
1367 (off_t
)cmds_size
> macho_size
||
1368 round_page_overflow(cmds_size
, &alloc_size
)) {
1373 * Map the load commands into kernel memory.
1375 addr
= kalloc(alloc_size
);
1380 error
= vn_rdwr(UIO_READ
, imgp
->ip_vp
, addr
, alloc_size
, file_offset
,
1381 UIO_SYSSPACE
, IO_NODELOCKED
, cred
, &resid
, p
);
1387 /* We must be able to read in as much as the mach_header indicated */
1392 * Loop through each of the load_commands indicated by the
1393 * Mach-O header; if an absurd value is provided, we just
1394 * run off the end of the reserved section by incrementing
1395 * the offset too far, so we are implicitly fail-safe.
1397 offset
= mach_header_sz
;
1398 ncmds
= header
->ncmds
;
1401 /* ensure enough space for a minimal load command */
1402 if (offset
+ sizeof(struct load_command
) > cmds_size
) {
1407 * Get a pointer to the command.
1409 lcp
= (struct load_command
*)(addr
+ offset
);
1412 * Perform prevalidation of the struct load_command
1413 * before we attempt to use its contents. Invalid
1414 * values are ones which result in an overflow, or
1415 * which can not possibly be valid commands, or which
1416 * straddle or exist past the reserved section at the
1417 * start of the image.
1419 if (os_add_overflow(offset
, lcp
->cmdsize
, &offset
) ||
1420 lcp
->cmdsize
< sizeof(struct load_command
) ||
1421 offset
> cmds_size
) {
1425 /* Check if its a simulator binary. */
1427 case LC_VERSION_MIN_WATCHOS
:
1428 simulator_binary
= TRUE
;
1431 case LC_BUILD_VERSION
: {
1432 struct build_version_command
*bvc
;
1434 bvc
= (struct build_version_command
*) lcp
;
1435 if (bvc
->cmdsize
< sizeof(*bvc
)) {
1436 /* unsafe to use this command struct if cmdsize
1437 * validated above is too small for it to fit */
1440 if (bvc
->platform
== PLATFORM_IOSSIMULATOR
||
1441 bvc
->platform
== PLATFORM_WATCHOSSIMULATOR
) {
1442 simulator_binary
= TRUE
;
1448 case LC_VERSION_MIN_IPHONEOS
: {
1449 simulator_binary
= TRUE
;
1454 /* ignore other load commands */
1458 if (simulator_binary
== TRUE
) {
1465 kfree(ip_vdata
, PAGE_SIZE
);
1469 kauth_cred_unref(&cred
);
1473 kfree(addr
, alloc_size
);
1476 return simulator_binary
;
1478 #endif /* __x86_64__ */
1480 #if CONFIG_CODE_DECRYPTION
1482 #define APPLE_UNPROTECTED_HEADER_SIZE (3 * 4096)
1484 static load_return_t
1485 unprotect_dsmos_segment(
1491 vm_map_offset_t map_addr
,
1492 vm_map_size_t map_size
)
1497 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
1498 * this part of a Universal binary) are not protected...
1499 * The rest needs to be "transformed".
1501 if (file_off
<= APPLE_UNPROTECTED_HEADER_SIZE
&&
1502 file_off
+ file_size
<= APPLE_UNPROTECTED_HEADER_SIZE
) {
1503 /* it's all unprotected, nothing to do... */
1506 if (file_off
<= APPLE_UNPROTECTED_HEADER_SIZE
) {
1508 * We start mapping in the unprotected area.
1509 * Skip the unprotected part...
1511 vm_map_offset_t delta
;
1513 delta
= APPLE_UNPROTECTED_HEADER_SIZE
;
1518 /* ... transform the rest of the mapping. */
1519 struct pager_crypt_info crypt_info
;
1520 crypt_info
.page_decrypt
= dsmos_page_transform
;
1521 crypt_info
.crypt_ops
= NULL
;
1522 crypt_info
.crypt_end
= NULL
;
1523 #pragma unused(vp, macho_offset)
1524 crypt_info
.crypt_ops
= (void *)0x2e69cf40;
1525 vm_map_offset_t crypto_backing_offset
;
1526 crypto_backing_offset
= -1; /* i.e. use map entry's offset */
1527 #if VM_MAP_DEBUG_APPLE_PROTECT
1528 if (vm_map_debug_apple_protect
) {
1531 printf("APPLE_PROTECT: %d[%s] map %p "
1532 "[0x%llx:0x%llx] %s(%s)\n",
1533 p
->p_pid
, p
->p_comm
, map
,
1534 (uint64_t) map_addr
,
1535 (uint64_t) (map_addr
+ map_size
),
1536 __FUNCTION__
, vp
->v_name
);
1538 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
1540 /* The DSMOS pager can only be used by apple signed code */
1541 struct cs_blob
* blob
= csvnode_get_blob(vp
, file_off
);
1542 if (blob
== NULL
|| !blob
->csb_platform_binary
|| blob
->csb_platform_path
) {
1543 return LOAD_FAILURE
;
1546 kr
= vm_map_apple_protected(map
,
1548 map_addr
+ map_size
,
1549 crypto_backing_offset
,
1553 if (kr
!= KERN_SUCCESS
) {
1554 return LOAD_FAILURE
;
1556 return LOAD_SUCCESS
;
1558 #else /* CONFIG_CODE_DECRYPTION */
1559 static load_return_t
1560 unprotect_dsmos_segment(
1561 __unused
uint64_t file_off
,
1562 __unused
uint64_t file_size
,
1563 __unused
struct vnode
*vp
,
1564 __unused off_t macho_offset
,
1565 __unused vm_map_t map
,
1566 __unused vm_map_offset_t map_addr
,
1567 __unused vm_map_size_t map_size
)
1569 return LOAD_SUCCESS
;
1571 #endif /* CONFIG_CODE_DECRYPTION */
1576 * Maps a Mach-O segment, taking care of mis-alignment (wrt the system
1577 * page size) issues.
1579 * The mapping might result in 1, 2 or 3 map entries:
1580 * 1. for the first page, which could be overlap with the previous
1582 * 2. for the center (if applicable),
1583 * 3. for the last page, which could overlap with the next mapping.
1585 * For each of those map entries, we might have to interpose a
1586 * "fourk_pager" to deal with mis-alignment wrt the system page size,
1587 * either in the mapping address and/or size or the file offset and/or
1589 * The "fourk_pager" itself would be mapped with proper alignment
1590 * wrt the system page size and would then be populated with the
1591 * information about the intended mapping, with a "4KB" granularity.
1593 static kern_return_t
1596 vm_map_offset_t vm_start
,
1597 vm_map_offset_t vm_end
,
1598 memory_object_control_t control
,
1599 vm_map_offset_t file_start
,
1600 vm_map_offset_t file_end
,
1603 load_result_t
*result
)
1605 vm_map_offset_t cur_offset
, cur_start
, cur_end
;
1607 vm_map_offset_t effective_page_mask
;
1608 vm_map_kernel_flags_t vmk_flags
, cur_vmk_flags
;
1610 if (vm_end
< vm_start
||
1611 file_end
< file_start
) {
1612 return LOAD_BADMACHO
;
1614 if (vm_end
== vm_start
||
1615 file_end
== file_start
) {
1616 /* nothing to map... */
1617 return LOAD_SUCCESS
;
1620 effective_page_mask
= MAX(PAGE_MASK
, vm_map_page_mask(map
));
1622 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1623 if (vm_map_page_aligned(vm_start
, effective_page_mask
) &&
1624 vm_map_page_aligned(vm_end
, effective_page_mask
) &&
1625 vm_map_page_aligned(file_start
, effective_page_mask
) &&
1626 vm_map_page_aligned(file_end
, effective_page_mask
)) {
1627 /* all page-aligned and map-aligned: proceed */
1630 /* use an intermediate "4K" pager */
1631 vmk_flags
.vmkf_fourk
= TRUE
;
1632 #else /* __arm64__ */
1633 panic("map_segment: unexpected mis-alignment "
1634 "vm[0x%llx:0x%llx] file[0x%llx:0x%llx]\n",
1635 (uint64_t) vm_start
,
1637 (uint64_t) file_start
,
1638 (uint64_t) file_end
);
1639 #endif /* __arm64__ */
1643 cur_start
= vm_start
;
1646 if (!vm_map_page_aligned(vm_start
, effective_page_mask
)) {
1647 /* one 4K pager for the 1st page */
1648 cur_end
= vm_map_round_page(cur_start
, effective_page_mask
);
1649 if (cur_end
> vm_end
) {
1650 cur_end
= vm_start
+ (file_end
- file_start
);
1652 if (control
!= MEMORY_OBJECT_CONTROL_NULL
) {
1653 /* no copy-on-read for mapped binaries */
1654 vmk_flags
.vmkf_no_copy_on_read
= 1;
1655 ret
= vm_map_enter_mem_object_control(
1658 cur_end
- cur_start
,
1659 (mach_vm_offset_t
)0,
1662 VM_KERN_MEMORY_NONE
,
1664 file_start
+ cur_offset
,
1667 VM_INHERIT_DEFAULT
);
1669 ret
= vm_map_enter_mem_object(
1672 cur_end
- cur_start
,
1673 (mach_vm_offset_t
)0,
1676 VM_KERN_MEMORY_NONE
,
1681 VM_INHERIT_DEFAULT
);
1683 if (ret
!= KERN_SUCCESS
) {
1684 return LOAD_NOSPACE
;
1686 cur_offset
+= cur_end
- cur_start
;
1688 #endif /* __arm64__ */
1689 if (cur_end
>= vm_start
+ (file_end
- file_start
)) {
1690 /* all mapped: done */
1693 if (vm_map_round_page(cur_end
, effective_page_mask
) >=
1694 vm_map_trunc_page(vm_start
+ (file_end
- file_start
),
1695 effective_page_mask
)) {
1698 cur_start
= cur_end
;
1699 if ((vm_start
& effective_page_mask
) !=
1700 (file_start
& effective_page_mask
)) {
1701 /* one 4K pager for the middle */
1702 cur_vmk_flags
= vmk_flags
;
1704 /* regular mapping for the middle */
1705 cur_vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1710 #else /* CONFIG_EMBEDDED */
1712 * This process doesn't have its new csflags (from
1713 * the image being loaded) yet, so tell VM to override the
1714 * current process's CS_ENFORCEMENT for this mapping.
1716 if (result
->csflags
& CS_ENFORCEMENT
) {
1717 cur_vmk_flags
.vmkf_cs_enforcement
= TRUE
;
1719 cur_vmk_flags
.vmkf_cs_enforcement
= FALSE
;
1721 cur_vmk_flags
.vmkf_cs_enforcement_override
= TRUE
;
1722 #endif /* CONFIG_EMBEDDED */
1724 cur_end
= vm_map_trunc_page(vm_start
+ (file_end
-
1726 effective_page_mask
);
1727 if (control
!= MEMORY_OBJECT_CONTROL_NULL
) {
1728 /* no copy-on-read for mapped binaries */
1729 cur_vmk_flags
.vmkf_no_copy_on_read
= 1;
1730 ret
= vm_map_enter_mem_object_control(
1733 cur_end
- cur_start
,
1734 (mach_vm_offset_t
)0,
1737 VM_KERN_MEMORY_NONE
,
1739 file_start
+ cur_offset
,
1742 VM_INHERIT_DEFAULT
);
1744 ret
= vm_map_enter_mem_object(
1747 cur_end
- cur_start
,
1748 (mach_vm_offset_t
)0,
1751 VM_KERN_MEMORY_NONE
,
1756 VM_INHERIT_DEFAULT
);
1758 if (ret
!= KERN_SUCCESS
) {
1759 return LOAD_NOSPACE
;
1761 cur_offset
+= cur_end
- cur_start
;
1763 if (cur_end
>= vm_start
+ (file_end
- file_start
)) {
1764 /* all mapped: done */
1767 cur_start
= cur_end
;
1769 if (!vm_map_page_aligned(vm_start
+ (file_end
- file_start
),
1770 effective_page_mask
)) {
1771 /* one 4K pager for the last page */
1772 cur_end
= vm_start
+ (file_end
- file_start
);
1773 if (control
!= MEMORY_OBJECT_CONTROL_NULL
) {
1774 /* no copy-on-read for mapped binaries */
1775 vmk_flags
.vmkf_no_copy_on_read
= 1;
1776 ret
= vm_map_enter_mem_object_control(
1779 cur_end
- cur_start
,
1780 (mach_vm_offset_t
)0,
1783 VM_KERN_MEMORY_NONE
,
1785 file_start
+ cur_offset
,
1788 VM_INHERIT_DEFAULT
);
1790 ret
= vm_map_enter_mem_object(
1793 cur_end
- cur_start
,
1794 (mach_vm_offset_t
)0,
1797 VM_KERN_MEMORY_NONE
,
1802 VM_INHERIT_DEFAULT
);
1804 if (ret
!= KERN_SUCCESS
) {
1805 return LOAD_NOSPACE
;
1807 cur_offset
+= cur_end
- cur_start
;
1809 #endif /* __arm64__ */
1811 assert(cur_end
>= vm_start
+ (file_end
- file_start
));
1812 return LOAD_SUCCESS
;
1818 struct load_command
*lcp
,
1826 load_result_t
*result
)
1828 struct segment_command_64 segment_command
, *scp
;
1830 vm_map_size_t delta_size
;
1833 size_t segment_command_size
, total_section_size
,
1834 single_section_size
;
1835 vm_map_offset_t file_offset
, file_size
;
1836 vm_map_offset_t vm_offset
, vm_size
;
1837 vm_map_offset_t vm_start
, vm_end
, vm_end_aligned
;
1838 vm_map_offset_t file_start
, file_end
;
1841 vm_map_size_t effective_page_size
;
1842 vm_map_offset_t effective_page_mask
;
1844 vm_map_kernel_flags_t vmk_flags
;
1845 boolean_t fourk_align
;
1846 #endif /* __arm64__ */
1848 effective_page_size
= MAX(PAGE_SIZE
, vm_map_page_size(map
));
1849 effective_page_mask
= MAX(PAGE_MASK
, vm_map_page_mask(map
));
1852 if (LC_SEGMENT_64
== lcp
->cmd
) {
1853 segment_command_size
= sizeof(struct segment_command_64
);
1854 single_section_size
= sizeof(struct section_64
);
1856 /* 64-bit binary: should already be 16K-aligned */
1857 fourk_align
= FALSE
;
1858 #endif /* __arm64__ */
1860 segment_command_size
= sizeof(struct segment_command
);
1861 single_section_size
= sizeof(struct section
);
1863 /* 32-bit binary: might need 4K-alignment */
1864 if (effective_page_size
!= FOURK_PAGE_SIZE
) {
1865 /* not using 4K page size: need fourk_pager */
1869 /* using 4K page size: no need for re-alignment */
1870 fourk_align
= FALSE
;
1872 #endif /* __arm64__ */
1874 if (lcp
->cmdsize
< segment_command_size
) {
1875 return LOAD_BADMACHO
;
1877 total_section_size
= lcp
->cmdsize
- segment_command_size
;
1879 if (LC_SEGMENT_64
== lcp
->cmd
) {
1880 scp
= (struct segment_command_64
*)lcp
;
1882 scp
= &segment_command
;
1883 widen_segment_command((struct segment_command
*)lcp
, scp
);
1887 MACHO_PRINTF(("+++ load_segment %s "
1888 "vm[0x%llx:0x%llx] file[0x%llx:0x%llx] "
1889 "prot %d/%d flags 0x%x\n",
1891 (uint64_t)(slide
+ scp
->vmaddr
),
1892 (uint64_t)(slide
+ scp
->vmaddr
+ scp
->vmsize
),
1893 pager_offset
+ scp
->fileoff
,
1894 pager_offset
+ scp
->fileoff
+ scp
->filesize
,
1901 * Make sure what we get from the file is really ours (as specified
1904 if (scp
->fileoff
+ scp
->filesize
< scp
->fileoff
||
1905 scp
->fileoff
+ scp
->filesize
> (uint64_t)macho_size
) {
1906 return LOAD_BADMACHO
;
1909 * Ensure that the number of sections specified would fit
1910 * within the load command size.
1912 if (total_section_size
/ single_section_size
< scp
->nsects
) {
1913 return LOAD_BADMACHO
;
1916 * Make sure the segment is page-aligned in the file.
1918 file_offset
= pager_offset
+ scp
->fileoff
; /* limited to 32 bits */
1919 file_size
= scp
->filesize
;
1922 if ((file_offset
& FOURK_PAGE_MASK
) != 0) {
1924 * we can't mmap() it if it's not at least 4KB-aligned
1927 return LOAD_BADMACHO
;
1930 #endif /* __arm64__ */
1931 if ((file_offset
& PAGE_MASK_64
) != 0 ||
1932 /* we can't mmap() it if it's not page-aligned in the file */
1933 (file_offset
& vm_map_page_mask(map
)) != 0) {
1935 * The 1st test would have failed if the system's page size
1936 * was what this process believe is the page size, so let's
1937 * fail here too for the sake of consistency.
1939 return LOAD_BADMACHO
;
1943 * If we have a code signature attached for this slice
1944 * require that the segments are within the signed part
1947 if (result
->cs_end_offset
&&
1948 result
->cs_end_offset
< (off_t
)scp
->fileoff
&&
1949 result
->cs_end_offset
- scp
->fileoff
< scp
->filesize
) {
1951 printf("section outside code signature\n");
1953 return LOAD_BADMACHO
;
1956 if (os_add_overflow(scp
->vmaddr
, slide
, &vm_offset
)) {
1958 printf("vmaddr too large\n");
1960 return LOAD_BADMACHO
;
1963 vm_size
= scp
->vmsize
;
1966 return LOAD_SUCCESS
;
1968 if (scp
->vmaddr
== 0 &&
1971 (scp
->initprot
& VM_PROT_ALL
) == VM_PROT_NONE
&&
1972 (scp
->maxprot
& VM_PROT_ALL
) == VM_PROT_NONE
) {
1974 * For PIE, extend page zero rather than moving it. Extending
1975 * page zero keeps early allocations from falling predictably
1976 * between the end of page zero and the beginning of the first
1980 * This is a "page zero" segment: it starts at address 0,
1981 * is not mapped from the binary file and is not accessible.
1982 * User-space should never be able to access that memory, so
1983 * make it completely off limits by raising the VM map's
1986 vm_end
= vm_offset
+ vm_size
;
1987 if (vm_end
< vm_offset
) {
1988 return LOAD_BADMACHO
;
1991 MACHO_PRINTF(("++++++ load_segment: "
1992 "page_zero up to 0x%llx\n",
1993 (uint64_t) vm_end
));
1997 /* raise min_offset as much as page-alignment allows */
1998 vm_end_aligned
= vm_map_trunc_page(vm_end
,
1999 effective_page_mask
);
2001 #endif /* __arm64__ */
2003 vm_end
= vm_map_round_page(vm_end
,
2005 vm_end_aligned
= vm_end
;
2007 ret
= vm_map_raise_min_offset(map
,
2011 vm_end
> vm_end_aligned
) {
2012 /* use fourk_pager to map the rest of pagezero */
2013 assert(fourk_align
);
2014 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
2015 vmk_flags
.vmkf_fourk
= TRUE
;
2016 ret
= vm_map_enter_mem_object(
2019 vm_end
- vm_end_aligned
,
2020 (mach_vm_offset_t
) 0, /* mask */
2023 VM_KERN_MEMORY_NONE
,
2027 (scp
->initprot
& VM_PROT_ALL
),
2028 (scp
->maxprot
& VM_PROT_ALL
),
2029 VM_INHERIT_DEFAULT
);
2031 #endif /* __arm64__ */
2033 if (ret
!= KERN_SUCCESS
) {
2034 return LOAD_FAILURE
;
2036 return LOAD_SUCCESS
;
2039 /* not PAGEZERO: should not be mapped at address 0 */
2040 if (filetype
!= MH_DYLINKER
&& scp
->vmaddr
== 0) {
2041 return LOAD_BADMACHO
;
2043 #endif /* CONFIG_EMBEDDED */
2049 file_start
= vm_map_trunc_page(file_offset
,
2051 file_end
= vm_map_round_page(file_offset
+ file_size
,
2053 vm_start
= vm_map_trunc_page(vm_offset
,
2055 vm_end
= vm_map_round_page(vm_offset
+ vm_size
,
2057 if (!strncmp(scp
->segname
, "__LINKEDIT", 11) &&
2058 page_aligned(file_start
) &&
2059 vm_map_page_aligned(file_start
, vm_map_page_mask(map
)) &&
2060 page_aligned(vm_start
) &&
2061 vm_map_page_aligned(vm_start
, vm_map_page_mask(map
))) {
2062 /* XXX last segment: ignore mis-aligned tail */
2063 file_end
= vm_map_round_page(file_end
,
2064 effective_page_mask
);
2065 vm_end
= vm_map_round_page(vm_end
,
2066 effective_page_mask
);
2069 #endif /* __arm64__ */
2071 file_start
= vm_map_trunc_page(file_offset
,
2072 effective_page_mask
);
2073 file_end
= vm_map_round_page(file_offset
+ file_size
,
2074 effective_page_mask
);
2075 vm_start
= vm_map_trunc_page(vm_offset
,
2076 effective_page_mask
);
2077 vm_end
= vm_map_round_page(vm_offset
+ vm_size
,
2078 effective_page_mask
);
2081 if (vm_start
< result
->min_vm_addr
) {
2082 result
->min_vm_addr
= vm_start
;
2084 if (vm_end
> result
->max_vm_addr
) {
2085 result
->max_vm_addr
= vm_end
;
2088 if (map
== VM_MAP_NULL
) {
2089 return LOAD_SUCCESS
;
2093 initprot
= (scp
->initprot
) & VM_PROT_ALL
;
2094 maxprot
= (scp
->maxprot
) & VM_PROT_ALL
;
2096 * Map a copy of the file into the address space.
2099 MACHO_PRINTF(("++++++ load_segment: "
2100 "mapping at vm [0x%llx:0x%llx] of "
2101 "file [0x%llx:0x%llx]\n",
2102 (uint64_t) vm_start
,
2104 (uint64_t) file_start
,
2105 (uint64_t) file_end
));
2107 ret
= map_segment(map
,
2117 return LOAD_NOSPACE
;
2122 * If the file didn't end on a page boundary,
2123 * we need to zero the leftover.
2125 delta_size
= map_size
- scp
->filesize
;
2126 if (delta_size
> 0) {
2127 mach_vm_offset_t tmp
;
2129 ret
= mach_vm_allocate_kernel(kernel_map
, &tmp
, delta_size
, VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_BSD
);
2130 if (ret
!= KERN_SUCCESS
) {
2131 return LOAD_RESOURCE
;
2134 if (copyout(tmp
, map_addr
+ scp
->filesize
,
2136 (void) mach_vm_deallocate(
2137 kernel_map
, tmp
, delta_size
);
2138 return LOAD_FAILURE
;
2141 (void) mach_vm_deallocate(kernel_map
, tmp
, delta_size
);
2147 * If the virtual size of the segment is greater
2148 * than the size from the file, we need to allocate
2149 * zero fill memory for the rest.
2151 if ((vm_end
- vm_start
) > (file_end
- file_start
)) {
2152 delta_size
= (vm_end
- vm_start
) - (file_end
- file_start
);
2156 if (delta_size
> 0) {
2157 mach_vm_offset_t tmp
;
2159 tmp
= vm_start
+ (file_end
- file_start
);
2161 MACHO_PRINTF(("++++++ load_segment: "
2162 "delta mapping vm [0x%llx:0x%llx]\n",
2164 (uint64_t) (tmp
+ delta_size
)));
2166 kr
= map_segment(map
,
2169 MEMORY_OBJECT_CONTROL_NULL
,
2175 if (kr
!= KERN_SUCCESS
) {
2176 return LOAD_NOSPACE
;
2180 if ((scp
->fileoff
== 0) && (scp
->filesize
!= 0)) {
2181 result
->mach_header
= vm_offset
;
2184 if (scp
->flags
& SG_PROTECTED_VERSION_1
) {
2185 ret
= unprotect_dsmos_segment(file_start
,
2186 file_end
- file_start
,
2192 if (ret
!= LOAD_SUCCESS
) {
2199 if (LOAD_SUCCESS
== ret
&&
2200 filetype
== MH_DYLINKER
&&
2201 result
->all_image_info_addr
== MACH_VM_MIN_ADDRESS
) {
2202 note_all_image_info_section(scp
,
2203 LC_SEGMENT_64
== lcp
->cmd
,
2204 single_section_size
,
2205 ((const char *)lcp
+
2206 segment_command_size
),
2211 if (result
->entry_point
!= MACH_VM_MIN_ADDRESS
) {
2212 if ((result
->entry_point
>= vm_offset
) && (result
->entry_point
< (vm_offset
+ vm_size
))) {
2213 if ((scp
->initprot
& (VM_PROT_READ
| VM_PROT_EXECUTE
)) == (VM_PROT_READ
| VM_PROT_EXECUTE
)) {
2214 result
->validentry
= 1;
2216 /* right range but wrong protections, unset if previously validated */
2217 result
->validentry
= 0;
2228 struct uuid_command
*uulp
,
2230 load_result_t
*result
2234 * We need to check the following for this command:
2235 * - The command size should be atleast the size of struct uuid_command
2236 * - The UUID part of the command should be completely within the mach-o header
2239 if ((uulp
->cmdsize
< sizeof(struct uuid_command
)) ||
2240 (((char *)uulp
+ sizeof(struct uuid_command
)) > command_end
)) {
2241 return LOAD_BADMACHO
;
2244 memcpy(&result
->uuid
[0], &uulp
->uuid
[0], sizeof(result
->uuid
));
2245 return LOAD_SUCCESS
;
2251 struct version_min_command
*vmc
,
2252 boolean_t
*found_version_cmd
,
2253 load_result_t
*result
2256 uint32_t platform
= 0;
2259 if (vmc
->cmdsize
< sizeof(*vmc
)) {
2260 return LOAD_BADMACHO
;
2262 if (*found_version_cmd
== TRUE
) {
2263 return LOAD_BADMACHO
;
2265 *found_version_cmd
= TRUE
;
2268 case LC_VERSION_MIN_MACOSX
:
2269 platform
= PLATFORM_MACOS
;
2271 #if __x86_64__ /* __x86_64__ */
2272 case LC_VERSION_MIN_IPHONEOS
:
2273 platform
= PLATFORM_IOSSIMULATOR
;
2275 case LC_VERSION_MIN_WATCHOS
:
2276 platform
= PLATFORM_WATCHOSSIMULATOR
;
2278 case LC_VERSION_MIN_TVOS
:
2279 platform
= PLATFORM_TVOSSIMULATOR
;
2282 case LC_VERSION_MIN_IPHONEOS
: {
2284 extern int legacy_footprint_entitlement_mode
;
2285 if (vmc
->sdk
< (12 << 16)) {
2286 /* app built with a pre-iOS12 SDK: apply legacy footprint mitigation */
2287 result
->legacy_footprint
= TRUE
;
2289 #endif /* __arm64__ */
2290 platform
= PLATFORM_IOS
;
2293 case LC_VERSION_MIN_WATCHOS
:
2294 platform
= PLATFORM_WATCHOS
;
2296 case LC_VERSION_MIN_TVOS
:
2297 platform
= PLATFORM_TVOS
;
2299 #endif /* __x86_64__ */
2300 /* All LC_VERSION_MIN_* load commands are legacy and we will not be adding any more */
2303 __builtin_unreachable();
2305 result
->ip_platform
= platform
;
2306 result
->lr_sdk
= sdk
;
2307 return LOAD_SUCCESS
;
2313 struct entry_point_command
*epc
,
2316 load_result_t
*result
2319 mach_vm_offset_t addr
;
2322 if (epc
->cmdsize
< sizeof(*epc
)) {
2323 return LOAD_BADMACHO
;
2325 if (result
->thread_count
!= 0) {
2326 return LOAD_FAILURE
;
2329 if (thread
== THREAD_NULL
) {
2330 return LOAD_SUCCESS
;
2334 * LC_MAIN specifies stack size but not location.
2335 * Add guard page to allocation size (MAXSSIZ includes guard page).
2337 if (epc
->stacksize
) {
2338 if (os_add_overflow(epc
->stacksize
, 4 * PAGE_SIZE
, &result
->user_stack_size
)) {
2340 * We are going to immediately throw away this result, but we want
2341 * to make sure we aren't loading a dangerously close to
2342 * overflowing value, since this will have a guard page added to it
2343 * and be rounded to page boundaries
2345 return LOAD_BADMACHO
;
2347 result
->user_stack_size
= epc
->stacksize
;
2348 if (os_add_overflow(epc
->stacksize
, PAGE_SIZE
, &result
->user_stack_alloc_size
)) {
2349 return LOAD_BADMACHO
;
2351 result
->custom_stack
= TRUE
;
2353 result
->user_stack_alloc_size
= MAXSSIZ
;
2356 /* use default location for stack */
2357 ret
= thread_userstackdefault(&addr
, result
->is_64bit_addr
);
2358 if (ret
!= KERN_SUCCESS
) {
2359 return LOAD_FAILURE
;
2362 /* The stack slides down from the default location */
2363 result
->user_stack
= addr
;
2364 result
->user_stack
-= slide
;
2366 if (result
->using_lcmain
|| result
->entry_point
!= MACH_VM_MIN_ADDRESS
) {
2367 /* Already processed LC_MAIN or LC_UNIXTHREAD */
2368 return LOAD_FAILURE
;
2371 /* kernel does *not* use entryoff from LC_MAIN. Dyld uses it. */
2372 result
->needs_dynlinker
= TRUE
;
2373 result
->using_lcmain
= TRUE
;
2375 ret
= thread_state_initialize( thread
);
2376 if (ret
!= KERN_SUCCESS
) {
2377 return LOAD_FAILURE
;
2380 result
->unixproc
= TRUE
;
2381 result
->thread_count
++;
2383 return LOAD_SUCCESS
;
2391 load_result_t
*result
2394 mach_vm_offset_t addr
;
2397 /* Driver binaries have no LC_MAIN, use defaults */
2399 if (thread
== THREAD_NULL
) {
2400 return LOAD_SUCCESS
;
2403 result
->user_stack_alloc_size
= MAXSSIZ
;
2405 /* use default location for stack */
2406 ret
= thread_userstackdefault(&addr
, result
->is_64bit_addr
);
2407 if (ret
!= KERN_SUCCESS
) {
2408 return LOAD_FAILURE
;
2411 /* The stack slides down from the default location */
2412 result
->user_stack
= addr
;
2413 result
->user_stack
-= slide
;
2415 if (result
->using_lcmain
|| result
->entry_point
!= MACH_VM_MIN_ADDRESS
) {
2416 /* Already processed LC_MAIN or LC_UNIXTHREAD */
2417 return LOAD_FAILURE
;
2420 result
->needs_dynlinker
= TRUE
;
2422 ret
= thread_state_initialize( thread
);
2423 if (ret
!= KERN_SUCCESS
) {
2424 return LOAD_FAILURE
;
2427 result
->unixproc
= TRUE
;
2428 result
->thread_count
++;
2430 return LOAD_SUCCESS
;
2436 struct thread_command
*tcp
,
2439 load_result_t
*result
2443 int customstack
= 0;
2444 mach_vm_offset_t addr
;
2445 if (tcp
->cmdsize
< sizeof(*tcp
)) {
2446 return LOAD_BADMACHO
;
2448 if (result
->thread_count
!= 0) {
2449 return LOAD_FAILURE
;
2452 if (thread
== THREAD_NULL
) {
2453 return LOAD_SUCCESS
;
2456 ret
= load_threadstack(thread
,
2457 (uint32_t *)(((vm_offset_t
)tcp
) +
2458 sizeof(struct thread_command
)),
2459 tcp
->cmdsize
- sizeof(struct thread_command
),
2460 &addr
, &customstack
, result
);
2461 if (ret
!= LOAD_SUCCESS
) {
2465 /* LC_UNIXTHREAD optionally specifies stack size and location */
2468 result
->custom_stack
= TRUE
;
2470 result
->user_stack_alloc_size
= MAXSSIZ
;
2473 /* The stack slides down from the default location */
2474 result
->user_stack
= addr
;
2475 result
->user_stack
-= slide
;
2477 ret
= load_threadentry(thread
,
2478 (uint32_t *)(((vm_offset_t
)tcp
) +
2479 sizeof(struct thread_command
)),
2480 tcp
->cmdsize
- sizeof(struct thread_command
),
2482 if (ret
!= LOAD_SUCCESS
) {
2486 if (result
->using_lcmain
|| result
->entry_point
!= MACH_VM_MIN_ADDRESS
) {
2487 /* Already processed LC_MAIN or LC_UNIXTHREAD */
2488 return LOAD_FAILURE
;
2491 result
->entry_point
= addr
;
2492 result
->entry_point
+= slide
;
2494 ret
= load_threadstate(thread
,
2495 (uint32_t *)(((vm_offset_t
)tcp
) + sizeof(struct thread_command
)),
2496 tcp
->cmdsize
- sizeof(struct thread_command
),
2498 if (ret
!= LOAD_SUCCESS
) {
2502 result
->unixproc
= TRUE
;
2503 result
->thread_count
++;
2505 return LOAD_SUCCESS
;
2513 uint32_t total_size
,
2514 load_result_t
*result
2519 uint32_t thread_size
;
2520 uint32_t *local_ts
= NULL
;
2521 uint32_t local_ts_size
= 0;
2526 if (total_size
> 0) {
2527 local_ts_size
= total_size
;
2528 local_ts
= kalloc(local_ts_size
);
2529 if (local_ts
== NULL
) {
2530 return LOAD_FAILURE
;
2532 memcpy(local_ts
, ts
, local_ts_size
);
2537 * Validate the new thread state; iterate through the state flavors in
2539 * XXX: we should validate the machine state here, to avoid failing at
2540 * activation time where we can't bail out cleanly.
2542 while (total_size
> 0) {
2546 if (os_add_and_mul_overflow(size
, 2, sizeof(uint32_t), &thread_size
) ||
2547 os_sub_overflow(total_size
, thread_size
, &total_size
)) {
2548 ret
= LOAD_BADMACHO
;
2552 ts
+= size
; /* ts is a (uint32_t *) */
2555 result
->threadstate
= local_ts
;
2556 result
->threadstate_sz
= local_ts_size
;
2557 return LOAD_SUCCESS
;
2561 kfree(local_ts
, local_ts_size
);
2571 uint32_t total_size
,
2572 mach_vm_offset_t
*user_stack
,
2574 load_result_t
*result
2580 uint32_t stack_size
;
2582 while (total_size
> 0) {
2585 if (UINT32_MAX
- 2 < size
||
2586 UINT32_MAX
/ sizeof(uint32_t) < size
+ 2) {
2587 return LOAD_BADMACHO
;
2589 stack_size
= (size
+ 2) * sizeof(uint32_t);
2590 if (stack_size
> total_size
) {
2591 return LOAD_BADMACHO
;
2593 total_size
-= stack_size
;
2596 * Third argument is a kernel space pointer; it gets cast
2597 * to the appropriate type in thread_userstack() based on
2598 * the value of flavor.
2600 ret
= thread_userstack(thread
, flavor
, (thread_state_t
)ts
, size
, user_stack
, customstack
, result
->is_64bit_data
);
2601 if (ret
!= KERN_SUCCESS
) {
2602 return LOAD_FAILURE
;
2604 ts
+= size
; /* ts is a (uint32_t *) */
2606 return LOAD_SUCCESS
;
2614 uint32_t total_size
,
2615 mach_vm_offset_t
*entry_point
2621 uint32_t entry_size
;
2624 * Set the thread state.
2626 *entry_point
= MACH_VM_MIN_ADDRESS
;
2627 while (total_size
> 0) {
2630 if (UINT32_MAX
- 2 < size
||
2631 UINT32_MAX
/ sizeof(uint32_t) < size
+ 2) {
2632 return LOAD_BADMACHO
;
2634 entry_size
= (size
+ 2) * sizeof(uint32_t);
2635 if (entry_size
> total_size
) {
2636 return LOAD_BADMACHO
;
2638 total_size
-= entry_size
;
2640 * Third argument is a kernel space pointer; it gets cast
2641 * to the appropriate type in thread_entrypoint() based on
2642 * the value of flavor.
2644 ret
= thread_entrypoint(thread
, flavor
, (thread_state_t
)ts
, size
, entry_point
);
2645 if (ret
!= KERN_SUCCESS
) {
2646 return LOAD_FAILURE
;
2648 ts
+= size
; /* ts is a (uint32_t *) */
2650 return LOAD_SUCCESS
;
2654 struct nameidata __nid
;
2655 union macho_vnode_header
{
2656 struct mach_header mach_header
;
2657 struct fat_header fat_header
;
2662 #define DEFAULT_DYLD_PATH "/usr/lib/dyld"
2664 #if (DEVELOPMENT || DEBUG)
2665 extern char dyld_alt_path
[];
2666 extern int use_alt_dyld
;
2669 static load_return_t
2671 struct dylinker_command
*lcp
,
2677 load_result_t
*result
,
2678 struct image_params
*imgp
2682 struct vnode
*vp
= NULLVP
; /* set by get_macho_vnode() */
2683 struct mach_header
*header
;
2684 off_t file_offset
= 0; /* set by get_macho_vnode() */
2685 off_t macho_size
= 0; /* set by get_macho_vnode() */
2686 load_result_t
*myresult
;
2688 struct macho_data
*macho_data
;
2690 struct mach_header __header
;
2691 load_result_t __myresult
;
2692 struct macho_data __macho_data
;
2695 if (lcp
->cmdsize
< sizeof(*lcp
) || lcp
->name
.offset
>= lcp
->cmdsize
) {
2696 return LOAD_BADMACHO
;
2699 name
= (const char *)lcp
+ lcp
->name
.offset
;
2701 /* Check for a proper null terminated string. */
2702 size_t maxsz
= lcp
->cmdsize
- lcp
->name
.offset
;
2703 size_t namelen
= strnlen(name
, maxsz
);
2704 if (namelen
>= maxsz
) {
2705 return LOAD_BADMACHO
;
2708 #if (DEVELOPMENT || DEBUG)
2712 * If an alternate dyld has been specified via boot args, check
2713 * to see if PROC_UUID_ALT_DYLD_POLICY has been set on this
2714 * executable and redirect the kernel to load that linker.
2719 uint32_t policy_flags
= 0;
2720 int32_t policy_gencount
= 0;
2722 policy_error
= proc_uuid_policy_lookup(result
->uuid
, &policy_flags
, &policy_gencount
);
2723 if (policy_error
== 0) {
2724 if (policy_flags
& PROC_UUID_ALT_DYLD_POLICY
) {
2725 name
= dyld_alt_path
;
2731 #if !(DEVELOPMENT || DEBUG)
2732 if (0 != strcmp(name
, DEFAULT_DYLD_PATH
)) {
2733 return LOAD_BADMACHO
;
2737 /* Allocate wad-of-data from heap to reduce excessively deep stacks */
2739 MALLOC(dyld_data
, void *, sizeof(*dyld_data
), M_TEMP
, M_WAITOK
);
2740 header
= &dyld_data
->__header
;
2741 myresult
= &dyld_data
->__myresult
;
2742 macho_data
= &dyld_data
->__macho_data
;
2744 ret
= get_macho_vnode(name
, archbits
, header
,
2745 &file_offset
, &macho_size
, macho_data
, &vp
);
2750 *myresult
= load_result_null
;
2751 myresult
->is_64bit_addr
= result
->is_64bit_addr
;
2752 myresult
->is_64bit_data
= result
->is_64bit_data
;
2754 ret
= parse_machfile(vp
, map
, thread
, header
, file_offset
,
2755 macho_size
, depth
, slide
, 0, myresult
, result
, imgp
);
2757 if (ret
== LOAD_SUCCESS
) {
2758 if (result
->threadstate
) {
2759 /* don't use the app's threadstate if we have a dyld */
2760 kfree(result
->threadstate
, result
->threadstate_sz
);
2762 result
->threadstate
= myresult
->threadstate
;
2763 result
->threadstate_sz
= myresult
->threadstate_sz
;
2765 result
->dynlinker
= TRUE
;
2766 result
->entry_point
= myresult
->entry_point
;
2767 result
->validentry
= myresult
->validentry
;
2768 result
->all_image_info_addr
= myresult
->all_image_info_addr
;
2769 result
->all_image_info_size
= myresult
->all_image_info_size
;
2770 if (myresult
->platform_binary
) {
2771 result
->csflags
|= CS_DYLD_PLATFORM
;
2775 struct vnode_attr va
;
2777 VATTR_WANTED(&va
, va_fsid64
);
2778 VATTR_WANTED(&va
, va_fsid
);
2779 VATTR_WANTED(&va
, va_fileid
);
2780 int error
= vnode_getattr(vp
, &va
, imgp
->ip_vfs_context
);
2782 imgp
->ip_dyld_fsid
= vnode_get_va_fsid(&va
);
2783 imgp
->ip_dyld_fsobjid
= va
.va_fileid
;
2788 FREE(dyld_data
, M_TEMP
);
2792 static load_return_t
2793 load_code_signature(
2794 struct linkedit_data_command
*lcp
,
2799 load_result_t
*result
,
2800 struct image_params
*imgp
)
2806 struct cs_blob
*blob
;
2808 vm_size_t blob_size
;
2814 if (lcp
->cmdsize
!= sizeof(struct linkedit_data_command
)) {
2815 ret
= LOAD_BADMACHO
;
2820 if (os_add_overflow(lcp
->dataoff
, lcp
->datasize
, &sum
) || sum
> macho_size
) {
2821 ret
= LOAD_BADMACHO
;
2825 blob
= ubc_cs_blob_get(vp
, cputype
, macho_offset
);
2828 /* we already have a blob for this vnode and cputype */
2829 if (blob
->csb_cpu_type
!= cputype
||
2830 blob
->csb_base_offset
!= macho_offset
) {
2831 /* the blob has changed for this vnode: fail ! */
2832 ret
= LOAD_BADMACHO
;
2836 /* It matches the blob we want here, let's verify the version */
2837 if (ubc_cs_generation_check(vp
) == 0) {
2838 /* No need to revalidate, we're good! */
2843 /* That blob may be stale, let's revalidate. */
2844 error
= ubc_cs_blob_revalidate(vp
, blob
, imgp
, 0);
2846 /* Revalidation succeeded, we're good! */
2851 if (error
!= EAGAIN
) {
2852 printf("load_code_signature: revalidation failed: %d\n", error
);
2857 assert(error
== EAGAIN
);
2860 * Revalidation was not possible for this blob. We just continue as if there was no blob,
2861 * rereading the signature, and ubc_cs_blob_add will do the right thing.
2866 blob_size
= lcp
->datasize
;
2867 kr
= ubc_cs_blob_allocate(&addr
, &blob_size
);
2868 if (kr
!= KERN_SUCCESS
) {
2874 error
= vn_rdwr(UIO_READ
,
2878 macho_offset
+ lcp
->dataoff
,
2884 if (error
|| resid
!= 0) {
2889 if (ubc_cs_blob_add(vp
,
2898 ubc_cs_blob_deallocate(addr
, blob_size
);
2903 /* ubc_cs_blob_add() has consumed "addr" */
2907 #if CHECK_CS_VALIDATION_BITMAP
2908 ubc_cs_validation_bitmap_allocate( vp
);
2913 if (ret
== LOAD_SUCCESS
) {
2915 panic("success, but no blob!");
2918 result
->csflags
|= blob
->csb_flags
;
2919 result
->platform_binary
= blob
->csb_platform_binary
;
2920 result
->cs_end_offset
= blob
->csb_end_offset
;
2923 ubc_cs_blob_deallocate(addr
, blob_size
);
2931 #if CONFIG_CODE_DECRYPTION
2933 static load_return_t
2935 struct encryption_info_command
*eip
,
2942 cpu_subtype_t cpusubtype
)
2945 pager_crypt_info_t crypt_info
;
2946 const char * cryptname
= 0;
2950 struct segment_command_64
*seg64
;
2951 struct segment_command
*seg32
;
2952 vm_map_offset_t map_offset
, map_size
;
2953 vm_object_offset_t crypto_backing_offset
;
2956 if (eip
->cmdsize
< sizeof(*eip
)) {
2957 return LOAD_BADMACHO
;
2960 switch (eip
->cryptid
) {
2962 /* not encrypted, just an empty load command */
2963 return LOAD_SUCCESS
;
2965 cryptname
= "com.apple.unfree";
2968 /* some random cryptid that you could manually put into
2969 * your binary if you want NULL */
2970 cryptname
= "com.apple.null";
2973 return LOAD_BADMACHO
;
2976 if (map
== VM_MAP_NULL
) {
2977 return LOAD_SUCCESS
;
2979 if (NULL
== text_crypter_create
) {
2980 return LOAD_FAILURE
;
2983 MALLOC_ZONE(vpath
, char *, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
2984 if (vpath
== NULL
) {
2985 return LOAD_FAILURE
;
2989 error
= vn_getpath(vp
, vpath
, &len
);
2991 FREE_ZONE(vpath
, MAXPATHLEN
, M_NAMEI
);
2992 return LOAD_FAILURE
;
2995 /* set up decrypter first */
2996 crypt_file_data_t crypt_data
= {
2999 .cpusubtype
= cpusubtype
3001 kr
= text_crypter_create(&crypt_info
, cryptname
, (void*)&crypt_data
);
3002 #if VM_MAP_DEBUG_APPLE_PROTECT
3003 if (vm_map_debug_apple_protect
) {
3006 printf("APPLE_PROTECT: %d[%s] map %p %s(%s) -> 0x%x\n",
3007 p
->p_pid
, p
->p_comm
, map
, __FUNCTION__
, vpath
, kr
);
3009 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
3010 FREE_ZONE(vpath
, MAXPATHLEN
, M_NAMEI
);
3013 printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n",
3015 if (kr
== kIOReturnNotPrivileged
) {
3016 /* text encryption returned decryption failure */
3017 return LOAD_DECRYPTFAIL
;
3019 return LOAD_RESOURCE
;
3023 /* this is terrible, but we have to rescan the load commands to find the
3024 * virtual address of this encrypted stuff. This code is gonna look like
3025 * the dyld source one day... */
3026 struct mach_header
*header
= (struct mach_header
*)addr
;
3027 size_t mach_header_sz
= sizeof(struct mach_header
);
3028 if (header
->magic
== MH_MAGIC_64
||
3029 header
->magic
== MH_CIGAM_64
) {
3030 mach_header_sz
= sizeof(struct mach_header_64
);
3032 offset
= mach_header_sz
;
3033 uint32_t ncmds
= header
->ncmds
;
3036 * Get a pointer to the command.
3038 struct load_command
*lcp
= (struct load_command
*)(addr
+ offset
);
3039 offset
+= lcp
->cmdsize
;
3043 seg64
= (struct segment_command_64
*)lcp
;
3044 if ((seg64
->fileoff
<= eip
->cryptoff
) &&
3045 (seg64
->fileoff
+ seg64
->filesize
>=
3046 eip
->cryptoff
+ eip
->cryptsize
)) {
3047 map_offset
= seg64
->vmaddr
+ eip
->cryptoff
- seg64
->fileoff
+ slide
;
3048 map_size
= eip
->cryptsize
;
3049 crypto_backing_offset
= macho_offset
+ eip
->cryptoff
;
3053 seg32
= (struct segment_command
*)lcp
;
3054 if ((seg32
->fileoff
<= eip
->cryptoff
) &&
3055 (seg32
->fileoff
+ seg32
->filesize
>=
3056 eip
->cryptoff
+ eip
->cryptsize
)) {
3057 map_offset
= seg32
->vmaddr
+ eip
->cryptoff
- seg32
->fileoff
+ slide
;
3058 map_size
= eip
->cryptsize
;
3059 crypto_backing_offset
= macho_offset
+ eip
->cryptoff
;
3065 /* if we get here, did not find anything */
3066 return LOAD_BADMACHO
;
3069 /* now remap using the decrypter */
3070 MACHO_PRINTF(("+++ set_code_unprotect: vm[0x%llx:0x%llx]\n",
3071 (uint64_t) map_offset
,
3072 (uint64_t) (map_offset
+ map_size
)));
3073 kr
= vm_map_apple_protected(map
,
3075 map_offset
+ map_size
,
3076 crypto_backing_offset
,
3079 printf("set_code_unprotect(): mapping failed with %x\n", kr
);
3080 return LOAD_PROTECT
;
3083 return LOAD_SUCCESS
;
3089 * This routine exists to support the load_dylinker().
3091 * This routine has its own, separate, understanding of the FAT file format,
3092 * which is terrifically unfortunate.
3099 struct mach_header
*mach_header
,
3102 struct macho_data
*data
,
3107 vfs_context_t ctx
= vfs_context_current();
3108 proc_t p
= vfs_context_proc(ctx
);
3109 kauth_cred_t kerncred
;
3110 struct nameidata
*ndp
= &data
->__nid
;
3112 struct fat_arch fat_arch
;
3115 union macho_vnode_header
*header
= &data
->__header
;
3116 off_t fsize
= (off_t
)0;
3119 * Capture the kernel credential for use in the actual read of the
3120 * file, since the user doing the execution may have execute rights
3121 * but not read rights, but to exec something, we have to either map
3122 * or read it into the new process address space, which requires
3123 * read rights. This is to deal with lack of common credential
3124 * serialization code which would treat NOCRED as "serialize 'root'".
3126 kerncred
= vfs_context_ucred(vfs_context_kernel());
3128 /* init the namei data to point the file user's program name */
3129 NDINIT(ndp
, LOOKUP
, OP_OPEN
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE
, CAST_USER_ADDR_T(path
), ctx
);
3131 if ((error
= namei(ndp
)) != 0) {
3132 if (error
== ENOENT
) {
3133 error
= LOAD_ENOENT
;
3135 error
= LOAD_FAILURE
;
3142 /* check for regular file */
3143 if (vp
->v_type
!= VREG
) {
3144 error
= LOAD_PROTECT
;
3149 if ((error
= vnode_size(vp
, &fsize
, ctx
)) != 0) {
3150 error
= LOAD_FAILURE
;
3154 /* Check mount point */
3155 if (vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) {
3156 error
= LOAD_PROTECT
;
3161 if ((error
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_EXECUTE
| KAUTH_VNODE_READ_DATA
, ctx
)) != 0) {
3162 error
= LOAD_PROTECT
;
3166 /* try to open it */
3167 if ((error
= VNOP_OPEN(vp
, FREAD
, ctx
)) != 0) {
3168 error
= LOAD_PROTECT
;
3172 if ((error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)header
, sizeof(*header
), 0,
3173 UIO_SYSSPACE
, IO_NODELOCKED
, kerncred
, &resid
, p
)) != 0) {
3174 error
= LOAD_IOERROR
;
3179 error
= LOAD_BADMACHO
;
3183 if (header
->mach_header
.magic
== MH_MAGIC
||
3184 header
->mach_header
.magic
== MH_MAGIC_64
) {
3186 } else if (OSSwapBigToHostInt32(header
->fat_header
.magic
) == FAT_MAGIC
) {
3189 error
= LOAD_BADMACHO
;
3194 error
= fatfile_validate_fatarches((vm_offset_t
)(&header
->fat_header
),
3196 if (error
!= LOAD_SUCCESS
) {
3200 /* Look up our architecture in the fat file. */
3201 error
= fatfile_getarch_with_bits(archbits
,
3202 (vm_offset_t
)(&header
->fat_header
), sizeof(*header
), &fat_arch
);
3203 if (error
!= LOAD_SUCCESS
) {
3207 /* Read the Mach-O header out of it */
3208 error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
->mach_header
,
3209 sizeof(header
->mach_header
), fat_arch
.offset
,
3210 UIO_SYSSPACE
, IO_NODELOCKED
, kerncred
, &resid
, p
);
3212 error
= LOAD_IOERROR
;
3217 error
= LOAD_BADMACHO
;
3221 /* Is this really a Mach-O? */
3222 if (header
->mach_header
.magic
!= MH_MAGIC
&&
3223 header
->mach_header
.magic
!= MH_MAGIC_64
) {
3224 error
= LOAD_BADMACHO
;
3228 *file_offset
= fat_arch
.offset
;
3229 *macho_size
= fat_arch
.size
;
3232 * Force get_macho_vnode() to fail if the architecture bits
3233 * do not match the expected architecture bits. This in
3234 * turn causes load_dylinker() to fail for the same reason,
3235 * so it ensures the dynamic linker and the binary are in
3236 * lock-step. This is potentially bad, if we ever add to
3237 * the CPU_ARCH_* bits any bits that are desirable but not
3238 * required, since the dynamic linker might work, but we will
3239 * refuse to load it because of this check.
3241 if ((cpu_type_t
)(header
->mach_header
.cputype
& CPU_ARCH_MASK
) != archbits
) {
3242 error
= LOAD_BADARCH
;
3247 *macho_size
= fsize
;
3250 *mach_header
= header
->mach_header
;
3253 ubc_setsize(vp
, fsize
);
3257 (void) VNOP_CLOSE(vp
, FREAD
, ctx
);