2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (C) 1988, 1989, NeXT, Inc.
31 * File: kern/mach_loader.c
32 * Author: Avadis Tevanian, Jr.
34 * Mach object file loader (kernel version, for now).
36 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
40 #include <sys/param.h>
41 #include <sys/vnode_internal.h>
43 #include <sys/namei.h>
44 #include <sys/proc_internal.h>
45 #include <sys/kauth.h>
47 #include <sys/malloc.h>
48 #include <sys/mount_internal.h>
49 #include <sys/fcntl.h>
50 #include <sys/ubc_internal.h>
51 #include <sys/imgact.h>
53 #include <mach/mach_types.h>
54 #include <mach/vm_map.h> /* vm_allocate() */
55 #include <mach/mach_vm.h> /* mach_vm_allocate() */
56 #include <mach/vm_statistics.h>
57 #include <mach/task.h>
58 #include <mach/thread_act.h>
60 #include <machine/vmparam.h>
61 #include <machine/exec.h>
62 #include <machine/pal_routines.h>
64 #include <kern/kern_types.h>
65 #include <kern/cpu_number.h>
66 #include <kern/mach_loader.h>
67 #include <kern/mach_fat.h>
68 #include <kern/kalloc.h>
69 #include <kern/task.h>
70 #include <kern/thread.h>
71 #include <kern/page_decrypt.h>
73 #include <mach-o/fat.h>
74 #include <mach-o/loader.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_pager.h>
80 #include <vm/vnode_pager.h>
81 #include <vm/vm_protos.h>
84 * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
85 * when KERNEL is defined.
87 extern pmap_t
pmap_create(vm_map_size_t size
, boolean_t is_64bit
);
88 extern void pmap_switch(pmap_t
);
91 * XXX kern/thread.h should not treat these prototypes as MACH_KERNEL_PRIVATE
92 * when KERNEL is defined.
94 extern kern_return_t
thread_setstatus(thread_t thread
, int flavor
,
95 thread_state_t tstate
,
96 mach_msg_type_number_t count
);
98 extern kern_return_t
thread_state_initialize(thread_t thread
);
101 /* XXX should have prototypes in a shared header file */
102 extern int get_map_nentries(vm_map_t
);
104 extern kern_return_t
memory_object_signed(memory_object_control_t control
,
105 boolean_t is_signed
);
107 /* An empty load_result_t */
108 static load_result_t load_result_null
= {
109 .mach_header
= MACH_VM_MIN_ADDRESS
,
110 .entry_point
= MACH_VM_MIN_ADDRESS
,
111 .user_stack
= MACH_VM_MIN_ADDRESS
,
112 .all_image_info_addr
= MACH_VM_MIN_ADDRESS
,
113 .all_image_info_size
= 0,
121 .min_vm_addr
= MACH_VM_MAX_ADDRESS
,
122 .max_vm_addr
= MACH_VM_MIN_ADDRESS
126 * Prototypes of static functions.
133 struct mach_header
*header
,
138 load_result_t
*result
143 struct load_command
*lcp
,
151 load_result_t
*result
156 struct linkedit_data_command
*lcp
,
161 load_result_t
*result
);
163 #if CONFIG_CODE_DECRYPTION
166 struct encryption_info_command
*lcp
,
174 struct thread_command
*tcp
,
177 load_result_t
*result
192 mach_vm_offset_t
*user_stack
,
201 mach_vm_offset_t
*entry_point
206 struct dylinker_command
*lcp
,
212 load_result_t
*result
221 struct mach_header
*mach_header
,
224 struct macho_data
*macho_data
,
229 widen_segment_command(const struct segment_command
*scp32
,
230 struct segment_command_64
*scp
)
232 scp
->cmd
= scp32
->cmd
;
233 scp
->cmdsize
= scp32
->cmdsize
;
234 bcopy(scp32
->segname
, scp
->segname
, sizeof(scp
->segname
));
235 scp
->vmaddr
= scp32
->vmaddr
;
236 scp
->vmsize
= scp32
->vmsize
;
237 scp
->fileoff
= scp32
->fileoff
;
238 scp
->filesize
= scp32
->filesize
;
239 scp
->maxprot
= scp32
->maxprot
;
240 scp
->initprot
= scp32
->initprot
;
241 scp
->nsects
= scp32
->nsects
;
242 scp
->flags
= scp32
->flags
;
246 note_all_image_info_section(const struct segment_command_64
*scp
,
247 boolean_t is64
, size_t section_size
, const void *sections
,
248 int64_t slide
, load_result_t
*result
)
252 struct section_64 s64
;
256 if (strncmp(scp
->segname
, "__DATA", sizeof(scp
->segname
)) != 0)
258 for (i
= 0; i
< scp
->nsects
; ++i
) {
259 sectionp
= (const void *)
260 ((const char *)sections
+ section_size
* i
);
261 if (0 == strncmp(sectionp
->s64
.sectname
, "__all_image_info",
262 sizeof(sectionp
->s64
.sectname
))) {
263 result
->all_image_info_addr
=
264 is64
? sectionp
->s64
.addr
: sectionp
->s32
.addr
;
265 result
->all_image_info_addr
+= slide
;
266 result
->all_image_info_size
=
267 is64
? sectionp
->s64
.size
: sectionp
->s32
.size
;
275 struct image_params
*imgp
,
276 struct mach_header
*header
,
279 load_result_t
*result
282 struct vnode
*vp
= imgp
->ip_vp
;
283 off_t file_offset
= imgp
->ip_arch_offset
;
284 off_t macho_size
= imgp
->ip_arch_size
;
286 pmap_t pmap
= 0; /* protected by create_map */
289 task_t old_task
= TASK_NULL
; /* protected by create_map */
290 load_result_t myresult
;
292 boolean_t create_map
= FALSE
;
293 int spawn
= (imgp
->ip_flags
& IMGPF_SPAWN
);
294 task_t task
= current_task();
295 mach_vm_offset_t aslr_offset
= 0;
298 if (new_map
== VM_MAP_NULL
) {
300 old_task
= current_task();
304 * If we are spawning, we have created backing objects for the process
305 * already, which include non-lazily creating the task map. So we
306 * are going to switch out the task map with one appropriate for the
307 * bitness of the image being loaded.
311 old_task
= get_threadtask(thread
);
315 pmap
= pmap_create((vm_map_size_t
) 0, (imgp
->ip_flags
& IMGPF_IS_64BIT
));
316 pal_switch_pmap(thread
, pmap
, imgp
->ip_flags
& IMGPF_IS_64BIT
);
317 map
= vm_map_create(pmap
,
319 vm_compute_max_offset((imgp
->ip_flags
& IMGPF_IS_64BIT
)),
325 #ifndef CONFIG_ENFORCE_SIGNED_CODE
326 /* This turns off faulting for executable pages, which allows to
327 * circumvent Code Signing Enforcement */
328 if ( (header
->flags
& MH_ALLOW_STACK_EXECUTION
) )
329 vm_map_disable_NX(map
);
332 /* Forcibly disallow execution from data pages on even if the arch
333 * normally permits it. */
334 if ((header
->flags
& MH_NO_HEAP_EXECUTION
) && !(imgp
->ip_flags
& IMGPF_ALLOW_DATA_EXEC
))
335 vm_map_disallow_data_exec(map
);
338 * Compute a random offset for ASLR.
340 if (!(imgp
->ip_flags
& IMGPF_DISABLE_ASLR
)) {
341 aslr_offset
= random();
342 aslr_offset
%= 1 << ((imgp
->ip_flags
& IMGPF_IS_64BIT
) ? 16 : 8);
343 aslr_offset
<<= PAGE_SHIFT
;
349 *result
= load_result_null
;
351 lret
= parse_machfile(vp
, map
, thread
, header
, file_offset
, macho_size
,
352 0, (int64_t)aslr_offset
, result
);
354 if (lret
!= LOAD_SUCCESS
) {
356 vm_map_deallocate(map
); /* will lose pmap reference too */
362 * For 64-bit users, check for presence of a 4GB page zero
363 * which will enable the kernel to share the user's address space
364 * and hence avoid TLB flushes on kernel entry/exit
367 if ((imgp
->ip_flags
& IMGPF_IS_64BIT
) &&
368 vm_map_has_4GB_pagezero(map
))
369 vm_map_set_4GB_pagezero(map
);
374 * Swap the new map for the old, which consumes our new map
375 * reference but each leaves us responsible for the old_map reference.
376 * That lets us get off the pmap associated with it, and
377 * then we can release it.
382 * If this is an exec, then we are going to destroy the old
383 * task, and it's correct to halt it; if it's spawn, the
384 * task is not yet running, and it makes no sense.
388 * Mark the task as halting and start the other
389 * threads towards terminating themselves. Then
390 * make sure any threads waiting for a process
391 * transition get informed that we are committed to
392 * this transition, and then finally complete the
393 * task halting (wait for threads and then cleanup
396 kret
= task_start_halt(task
);
397 if (kret
!= KERN_SUCCESS
) {
400 proc_transcommit(current_proc(), 0);
401 task_complete_halt(task
);
402 workqueue_exit(current_proc());
404 old_map
= swap_task_map(old_task
, thread
, map
, !spawn
);
405 vm_map_clear_4GB_pagezero(old_map
);
406 vm_map_deallocate(old_map
);
408 return(LOAD_SUCCESS
);
412 * The file size of a mach-o file is limited to 32 bits; this is because
413 * this is the limit on the kalloc() of enough bytes for a mach_header and
414 * the contents of its sizeofcmds, which is currently constrained to 32
415 * bits in the file format itself. We read into the kernel buffer the
416 * commands section, and then parse it in order to parse the mach-o file
417 * format load_command segment(s). We are only interested in a subset of
418 * the total set of possible commands. If "map"==VM_MAP_NULL or
419 * "thread"==THREAD_NULL, do not make permament VM modifications,
420 * just preflight the parse.
428 struct mach_header
*header
,
433 load_result_t
*result
437 struct load_command
*lcp
;
438 struct dylinker_command
*dlp
= 0;
439 struct uuid_command
*uulp
= 0;
440 integer_t dlarchbits
= 0;
442 load_return_t ret
= LOAD_SUCCESS
;
445 vm_size_t size
,kl_size
;
447 size_t oldoffset
; /* for overflow check */
449 proc_t p
= current_proc(); /* XXXX */
452 size_t mach_header_sz
= sizeof(struct mach_header
);
454 boolean_t got_code_signatures
= FALSE
;
457 if (header
->magic
== MH_MAGIC_64
||
458 header
->magic
== MH_CIGAM_64
) {
459 mach_header_sz
= sizeof(struct mach_header_64
);
463 * Break infinite recursion
466 return(LOAD_FAILURE
);
472 * Check to see if right machine type.
474 if (((cpu_type_t
)(header
->cputype
& ~CPU_ARCH_MASK
) != cpu_type()) ||
475 !grade_binary(header
->cputype
,
476 header
->cpusubtype
& ~CPU_SUBTYPE_MASK
))
477 return(LOAD_BADARCH
);
479 abi64
= ((header
->cputype
& CPU_ARCH_ABI64
) == CPU_ARCH_ABI64
);
481 switch (header
->filetype
) {
487 return (LOAD_FAILURE
);
494 return (LOAD_FAILURE
);
500 return (LOAD_FAILURE
);
505 return (LOAD_FAILURE
);
509 * Get the pager for the file.
511 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
514 * Map portion that must be accessible directly into
517 if ((off_t
)(mach_header_sz
+ header
->sizeofcmds
) > macho_size
)
518 return(LOAD_BADMACHO
);
521 * Round size of Mach-O commands up to page boundry.
523 size
= round_page(mach_header_sz
+ header
->sizeofcmds
);
525 return(LOAD_BADMACHO
);
528 * Map the load commands into kernel memory.
532 kl_addr
= kalloc(size
);
533 addr
= (caddr_t
)kl_addr
;
535 return(LOAD_NOSPACE
);
537 error
= vn_rdwr(UIO_READ
, vp
, addr
, size
, file_offset
,
538 UIO_SYSSPACE
, 0, kauth_cred_get(), &resid
, p
);
541 kfree(kl_addr
, kl_size
);
542 return(LOAD_IOERROR
);
546 * For PIE and dyld, slide everything by the ASLR offset.
548 if ((header
->flags
& MH_PIE
) || (header
->filetype
== MH_DYLINKER
)) {
553 * Scan through the commands, processing each one as necessary.
555 for (pass
= 1; pass
<= 3; pass
++) {
559 * Check that the entry point is contained in an executable segments
561 if ((pass
== 3) && (result
->validentry
== 0)) {
562 thread_state_initialize(thread
);
569 * Loop through each of the load_commands indicated by the
570 * Mach-O header; if an absurd value is provided, we just
571 * run off the end of the reserved section by incrementing
572 * the offset too far, so we are implicitly fail-safe.
574 offset
= mach_header_sz
;
575 ncmds
= header
->ncmds
;
579 * Get a pointer to the command.
581 lcp
= (struct load_command
*)(addr
+ offset
);
583 offset
+= lcp
->cmdsize
;
586 * Perform prevalidation of the struct load_command
587 * before we attempt to use its contents. Invalid
588 * values are ones which result in an overflow, or
589 * which can not possibly be valid commands, or which
590 * straddle or exist past the reserved section at the
591 * start of the image.
593 if (oldoffset
> offset
||
594 lcp
->cmdsize
< sizeof(struct load_command
) ||
595 offset
> header
->sizeofcmds
+ mach_header_sz
) {
601 * Act on struct load_command's for which kernel
602 * intervention is required.
609 ret
= load_segment(lcp
,
622 ret
= load_unixthread(
623 (struct thread_command
*) lcp
,
628 case LC_LOAD_DYLINKER
:
631 if ((depth
== 1) && (dlp
== 0)) {
632 dlp
= (struct dylinker_command
*)lcp
;
633 dlarchbits
= (header
->cputype
& CPU_ARCH_MASK
);
639 if (pass
== 1 && depth
== 1) {
640 uulp
= (struct uuid_command
*)lcp
;
641 memcpy(&result
->uuid
[0], &uulp
->uuid
[0], sizeof(result
->uuid
));
644 case LC_CODE_SIGNATURE
:
649 load signatures & store in uip
650 set VM object "signed_pages"
652 ret
= load_code_signature(
653 (struct linkedit_data_command
*) lcp
,
658 (depth
== 1) ? result
: NULL
);
659 if (ret
!= LOAD_SUCCESS
) {
660 printf("proc %d: load code signature error %d "
662 p
->p_pid
, ret
, vp
->v_name
);
663 ret
= LOAD_SUCCESS
; /* ignore error */
665 got_code_signatures
= TRUE
;
668 #if CONFIG_CODE_DECRYPTION
669 case LC_ENCRYPTION_INFO
:
672 ret
= set_code_unprotect(
673 (struct encryption_info_command
*) lcp
,
675 if (ret
!= LOAD_SUCCESS
) {
676 printf("proc %d: set_code_unprotect() error %d "
678 p
->p_pid
, ret
, vp
->v_name
);
679 /* Don't let the app run if it's
680 * encrypted but we failed to set up the
687 /* Other commands are ignored by the kernel */
691 if (ret
!= LOAD_SUCCESS
)
694 if (ret
!= LOAD_SUCCESS
)
697 if (ret
== LOAD_SUCCESS
) {
698 if (! got_code_signatures
) {
699 struct cs_blob
*blob
;
700 /* no embedded signatures: look for detached ones */
701 blob
= ubc_cs_blob_get(vp
, -1, file_offset
);
703 /* get flags to be applied to the process */
704 result
->csflags
|= blob
->csb_flags
;
709 /* load the dylinker, and always slide it by the ASLR
710 * offset regardless of PIE */
711 ret
= load_dylinker(dlp
, dlarchbits
, map
, thread
, depth
, aslr_offset
, result
);
715 if (result
->thread_count
== 0) {
722 kfree(kl_addr
, kl_size
);
727 #if CONFIG_CODE_DECRYPTION
729 #define APPLE_UNPROTECTED_HEADER_SIZE (3 * PAGE_SIZE_64)
738 vm_map_offset_t map_addr
,
739 vm_map_size_t map_size
)
744 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
745 * this part of a Universal binary) are not protected...
746 * The rest needs to be "transformed".
748 if (file_off
<= APPLE_UNPROTECTED_HEADER_SIZE
&&
749 file_off
+ file_size
<= APPLE_UNPROTECTED_HEADER_SIZE
) {
750 /* it's all unprotected, nothing to do... */
753 if (file_off
<= APPLE_UNPROTECTED_HEADER_SIZE
) {
755 * We start mapping in the unprotected area.
756 * Skip the unprotected part...
758 vm_map_offset_t delta
;
760 delta
= APPLE_UNPROTECTED_HEADER_SIZE
;
765 /* ... transform the rest of the mapping. */
766 struct pager_crypt_info crypt_info
;
767 crypt_info
.page_decrypt
= dsmos_page_transform
;
768 crypt_info
.crypt_ops
= NULL
;
769 crypt_info
.crypt_end
= NULL
;
770 #pragma unused(vp, macho_offset)
771 crypt_info
.crypt_ops
= (void *)0x2e69cf40;
772 kr
= vm_map_apple_protected(map
,
778 if (kr
!= KERN_SUCCESS
) {
783 #else /* CONFIG_CODE_DECRYPTION */
786 __unused
uint64_t file_off
,
787 __unused
uint64_t file_size
,
788 __unused
struct vnode
*vp
,
789 __unused off_t macho_offset
,
790 __unused vm_map_t map
,
791 __unused vm_map_offset_t map_addr
,
792 __unused vm_map_size_t map_size
)
796 #endif /* CONFIG_CODE_DECRYPTION */
801 struct load_command
*lcp
,
809 load_result_t
*result
812 struct segment_command_64 segment_command
, *scp
;
814 mach_vm_offset_t map_addr
, map_offset
;
815 mach_vm_size_t map_size
, seg_size
, delta_size
;
818 size_t segment_command_size
, total_section_size
,
821 if (LC_SEGMENT_64
== lcp
->cmd
) {
822 segment_command_size
= sizeof(struct segment_command_64
);
823 single_section_size
= sizeof(struct section_64
);
825 segment_command_size
= sizeof(struct segment_command
);
826 single_section_size
= sizeof(struct section
);
828 if (lcp
->cmdsize
< segment_command_size
)
829 return (LOAD_BADMACHO
);
830 total_section_size
= lcp
->cmdsize
- segment_command_size
;
832 if (LC_SEGMENT_64
== lcp
->cmd
)
833 scp
= (struct segment_command_64
*)lcp
;
835 scp
= &segment_command
;
836 widen_segment_command((struct segment_command
*)lcp
, scp
);
840 * Make sure what we get from the file is really ours (as specified
843 if (scp
->fileoff
+ scp
->filesize
< scp
->fileoff
||
844 scp
->fileoff
+ scp
->filesize
> (uint64_t)macho_size
)
845 return (LOAD_BADMACHO
);
847 * Ensure that the number of sections specified would fit
848 * within the load command size.
850 if (total_section_size
/ single_section_size
< scp
->nsects
)
851 return (LOAD_BADMACHO
);
853 * Make sure the segment is page-aligned in the file.
855 if ((scp
->fileoff
& PAGE_MASK_64
) != 0)
856 return (LOAD_BADMACHO
);
859 * Round sizes to page size.
861 seg_size
= round_page_64(scp
->vmsize
);
862 map_size
= round_page_64(scp
->filesize
);
863 map_addr
= trunc_page_64(scp
->vmaddr
); /* JVXXX note that in XNU TOT this is round instead of trunc for 64 bits */
865 return (KERN_SUCCESS
);
869 (scp
->initprot
& VM_PROT_ALL
) == VM_PROT_NONE
&&
870 (scp
->maxprot
& VM_PROT_ALL
) == VM_PROT_NONE
) {
872 * For PIE, extend page zero rather than moving it. Extending
873 * page zero keeps early allocations from falling predictably
874 * between the end of page zero and the beginning of the first
880 /* XXX (4596982) this interferes with Rosetta, so limit to 64-bit tasks */
881 if (scp
->cmd
== LC_SEGMENT_64
) {
883 * This is a "page zero" segment: it starts at address 0,
884 * is not mapped from the binary file and is not accessible.
885 * User-space should never be able to access that memory, so
886 * make it completely off limits by raising the VM map's
889 ret
= vm_map_raise_min_offset(map
, seg_size
);
890 if (ret
!= KERN_SUCCESS
) {
891 return (LOAD_FAILURE
);
893 return (LOAD_SUCCESS
);
897 /* If a non-zero slide was specified by the caller, apply now */
900 if (map_addr
< result
->min_vm_addr
)
901 result
->min_vm_addr
= map_addr
;
902 if (map_addr
+seg_size
> result
->max_vm_addr
)
903 result
->max_vm_addr
= map_addr
+seg_size
;
905 if (map
== VM_MAP_NULL
)
906 return (LOAD_SUCCESS
);
908 map_offset
= pager_offset
+ scp
->fileoff
; /* limited to 32 bits */
911 initprot
= (scp
->initprot
) & VM_PROT_ALL
;
912 maxprot
= (scp
->maxprot
) & VM_PROT_ALL
;
914 * Map a copy of the file into the address space.
916 ret
= vm_map_enter_mem_object_control(map
,
917 &map_addr
, map_size
, (mach_vm_offset_t
)0,
918 VM_FLAGS_FIXED
, control
, map_offset
, TRUE
,
921 if (ret
!= KERN_SUCCESS
)
922 return (LOAD_NOSPACE
);
925 * If the file didn't end on a page boundary,
926 * we need to zero the leftover.
928 delta_size
= map_size
- scp
->filesize
;
930 if (delta_size
> 0) {
931 mach_vm_offset_t tmp
;
933 ret
= mach_vm_allocate(kernel_map
, &tmp
, delta_size
, VM_FLAGS_ANYWHERE
);
934 if (ret
!= KERN_SUCCESS
)
935 return(LOAD_RESOURCE
);
937 if (copyout(tmp
, map_addr
+ scp
->filesize
,
939 (void) mach_vm_deallocate(
940 kernel_map
, tmp
, delta_size
);
941 return (LOAD_FAILURE
);
944 (void) mach_vm_deallocate(kernel_map
, tmp
, delta_size
);
950 * If the virtual size of the segment is greater
951 * than the size from the file, we need to allocate
952 * zero fill memory for the rest.
954 delta_size
= seg_size
- map_size
;
955 if (delta_size
> 0) {
956 mach_vm_offset_t tmp
= map_addr
+ map_size
;
958 ret
= mach_vm_map(map
, &tmp
, delta_size
, 0, VM_FLAGS_FIXED
,
960 scp
->initprot
, scp
->maxprot
,
962 if (ret
!= KERN_SUCCESS
)
963 return(LOAD_NOSPACE
);
966 if ( (scp
->fileoff
== 0) && (scp
->filesize
!= 0) )
967 result
->mach_header
= map_addr
;
969 if (scp
->flags
& SG_PROTECTED_VERSION_1
) {
970 ret
= unprotect_segment(scp
->fileoff
,
980 if (LOAD_SUCCESS
== ret
&& filetype
== MH_DYLINKER
&&
981 result
->all_image_info_addr
== MACH_VM_MIN_ADDRESS
)
982 note_all_image_info_section(scp
,
983 LC_SEGMENT_64
== lcp
->cmd
, single_section_size
,
984 (const char *)lcp
+ segment_command_size
, slide
, result
);
986 if ((result
->entry_point
>= map_addr
) && (result
->entry_point
< (map_addr
+ map_size
)))
987 result
->validentry
= 1;
995 struct thread_command
*tcp
,
998 load_result_t
*result
1004 if (tcp
->cmdsize
< sizeof(*tcp
))
1005 return (LOAD_BADMACHO
);
1006 if (result
->thread_count
!= 0) {
1007 printf("load_unixthread: already have a thread!");
1008 return (LOAD_FAILURE
);
1011 if (thread
== THREAD_NULL
)
1012 return (LOAD_SUCCESS
);
1014 ret
= load_threadstack(thread
,
1015 (uint32_t *)(((vm_offset_t
)tcp
) +
1016 sizeof(struct thread_command
)),
1017 tcp
->cmdsize
- sizeof(struct thread_command
),
1018 &result
->user_stack
,
1020 if (ret
!= LOAD_SUCCESS
)
1024 result
->customstack
= 1;
1026 result
->customstack
= 0;
1028 result
->user_stack
+= slide
;
1030 ret
= load_threadentry(thread
,
1031 (uint32_t *)(((vm_offset_t
)tcp
) +
1032 sizeof(struct thread_command
)),
1033 tcp
->cmdsize
- sizeof(struct thread_command
),
1034 &result
->entry_point
);
1035 if (ret
!= LOAD_SUCCESS
)
1038 result
->entry_point
+= slide
;
1040 ret
= load_threadstate(thread
,
1041 (uint32_t *)(((vm_offset_t
)tcp
) +
1042 sizeof(struct thread_command
)),
1043 tcp
->cmdsize
- sizeof(struct thread_command
));
1044 if (ret
!= LOAD_SUCCESS
)
1047 result
->unixproc
= TRUE
;
1048 result
->thread_count
++;
1050 return(LOAD_SUCCESS
);
1064 uint32_t thread_size
;
1066 ret
= thread_state_initialize( thread
);
1067 if (ret
!= KERN_SUCCESS
) {
1068 return(LOAD_FAILURE
);
1072 * Set the new thread state; iterate through the state flavors in
1075 while (total_size
> 0) {
1078 if (UINT32_MAX
-2 < size
||
1079 UINT32_MAX
/sizeof(uint32_t) < size
+2)
1080 return (LOAD_BADMACHO
);
1081 thread_size
= (size
+2)*sizeof(uint32_t);
1082 if (thread_size
> total_size
)
1083 return(LOAD_BADMACHO
);
1084 total_size
-= thread_size
;
1086 * Third argument is a kernel space pointer; it gets cast
1087 * to the appropriate type in machine_thread_set_state()
1088 * based on the value of flavor.
1090 ret
= thread_setstatus(thread
, flavor
, (thread_state_t
)ts
, size
);
1091 if (ret
!= KERN_SUCCESS
) {
1092 return(LOAD_FAILURE
);
1094 ts
+= size
; /* ts is a (uint32_t *) */
1096 return(LOAD_SUCCESS
);
1104 uint32_t total_size
,
1105 mach_vm_offset_t
*user_stack
,
1112 uint32_t stack_size
;
1114 while (total_size
> 0) {
1117 if (UINT32_MAX
-2 < size
||
1118 UINT32_MAX
/sizeof(uint32_t) < size
+2)
1119 return (LOAD_BADMACHO
);
1120 stack_size
= (size
+2)*sizeof(uint32_t);
1121 if (stack_size
> total_size
)
1122 return(LOAD_BADMACHO
);
1123 total_size
-= stack_size
;
1126 * Third argument is a kernel space pointer; it gets cast
1127 * to the appropriate type in thread_userstack() based on
1128 * the value of flavor.
1130 ret
= thread_userstack(thread
, flavor
, (thread_state_t
)ts
, size
, user_stack
, customstack
);
1131 if (ret
!= KERN_SUCCESS
) {
1132 return(LOAD_FAILURE
);
1134 ts
+= size
; /* ts is a (uint32_t *) */
1136 return(LOAD_SUCCESS
);
1144 uint32_t total_size
,
1145 mach_vm_offset_t
*entry_point
1151 uint32_t entry_size
;
1154 * Set the thread state.
1156 *entry_point
= MACH_VM_MIN_ADDRESS
;
1157 while (total_size
> 0) {
1160 if (UINT32_MAX
-2 < size
||
1161 UINT32_MAX
/sizeof(uint32_t) < size
+2)
1162 return (LOAD_BADMACHO
);
1163 entry_size
= (size
+2)*sizeof(uint32_t);
1164 if (entry_size
> total_size
)
1165 return(LOAD_BADMACHO
);
1166 total_size
-= entry_size
;
1168 * Third argument is a kernel space pointer; it gets cast
1169 * to the appropriate type in thread_entrypoint() based on
1170 * the value of flavor.
1172 ret
= thread_entrypoint(thread
, flavor
, (thread_state_t
)ts
, size
, entry_point
);
1173 if (ret
!= KERN_SUCCESS
) {
1174 return(LOAD_FAILURE
);
1176 ts
+= size
; /* ts is a (uint32_t *) */
1178 return(LOAD_SUCCESS
);
1182 struct nameidata __nid
;
1183 union macho_vnode_header
{
1184 struct mach_header mach_header
;
1185 struct fat_header fat_header
;
1190 static load_return_t
1192 struct dylinker_command
*lcp
,
1198 load_result_t
*result
1203 struct vnode
*vp
= NULLVP
; /* set by get_macho_vnode() */
1204 struct mach_header
*header
;
1205 off_t file_offset
= 0; /* set by get_macho_vnode() */
1206 off_t macho_size
= 0; /* set by get_macho_vnode() */
1207 load_result_t
*myresult
;
1209 struct macho_data
*macho_data
;
1211 struct mach_header __header
;
1212 load_result_t __myresult
;
1213 struct macho_data __macho_data
;
1216 if (lcp
->cmdsize
< sizeof(*lcp
))
1217 return (LOAD_BADMACHO
);
1219 name
= (char *)lcp
+ lcp
->name
.offset
;
1221 * Check for a proper null terminated string.
1225 if (p
>= (char *)lcp
+ lcp
->cmdsize
)
1226 return(LOAD_BADMACHO
);
1229 /* Allocate wad-of-data from heap to reduce excessively deep stacks */
1231 MALLOC(dyld_data
, void *, sizeof (*dyld_data
), M_TEMP
, M_WAITOK
);
1232 header
= &dyld_data
->__header
;
1233 myresult
= &dyld_data
->__myresult
;
1234 macho_data
= &dyld_data
->__macho_data
;
1236 ret
= get_macho_vnode(name
, archbits
, header
,
1237 &file_offset
, &macho_size
, macho_data
, &vp
);
1241 *myresult
= load_result_null
;
1244 * First try to map dyld in directly. This should work most of
1245 * the time since there shouldn't normally be something already
1246 * mapped to its address.
1249 ret
= parse_machfile(vp
, map
, thread
, header
, file_offset
,
1250 macho_size
, depth
, slide
, myresult
);
1253 * If it turned out something was in the way, then we'll take
1254 * take this longer path to preflight dyld's vm ranges, then
1255 * map it at a free location in the address space.
1258 if (ret
== LOAD_NOSPACE
) {
1259 mach_vm_offset_t dyl_start
, map_addr
;
1260 mach_vm_size_t dyl_length
;
1261 int64_t slide_amount
;
1263 *myresult
= load_result_null
;
1266 * Preflight parsing the Mach-O file with a NULL
1267 * map, which will return the ranges needed for a
1268 * subsequent map attempt (with a slide) in "myresult"
1270 ret
= parse_machfile(vp
, VM_MAP_NULL
, THREAD_NULL
, header
,
1271 file_offset
, macho_size
, depth
, 0 /* slide */, myresult
);
1273 if (ret
!= LOAD_SUCCESS
) {
1277 dyl_start
= myresult
->min_vm_addr
;
1278 dyl_length
= myresult
->max_vm_addr
- myresult
->min_vm_addr
;
1280 dyl_length
+= slide
;
1282 /* To find an appropriate load address, do a quick allocation */
1283 map_addr
= dyl_start
;
1284 ret
= mach_vm_allocate(map
, &map_addr
, dyl_length
, VM_FLAGS_ANYWHERE
);
1285 if (ret
!= KERN_SUCCESS
) {
1290 ret
= mach_vm_deallocate(map
, map_addr
, dyl_length
);
1291 if (ret
!= KERN_SUCCESS
) {
1296 if (map_addr
< dyl_start
)
1297 slide_amount
= -(int64_t)(dyl_start
- map_addr
);
1299 slide_amount
= (int64_t)(map_addr
- dyl_start
);
1301 slide_amount
+= slide
;
1303 *myresult
= load_result_null
;
1305 ret
= parse_machfile(vp
, map
, thread
, header
,
1306 file_offset
, macho_size
, depth
, slide_amount
, myresult
);
1313 if (ret
== LOAD_SUCCESS
) {
1314 result
->dynlinker
= TRUE
;
1315 result
->entry_point
= myresult
->entry_point
;
1316 result
->all_image_info_addr
= myresult
->all_image_info_addr
;
1317 result
->all_image_info_size
= myresult
->all_image_info_size
;
1322 FREE(dyld_data
, M_TEMP
);
1327 static load_return_t
1328 load_code_signature(
1329 struct linkedit_data_command
*lcp
,
1334 load_result_t
*result
)
1340 struct cs_blob
*blob
;
1342 vm_size_t blob_size
;
1347 if (lcp
->cmdsize
!= sizeof (struct linkedit_data_command
) ||
1348 lcp
->dataoff
+ lcp
->datasize
> macho_size
) {
1349 ret
= LOAD_BADMACHO
;
1353 blob
= ubc_cs_blob_get(vp
, cputype
, -1);
1355 /* we already have a blob for this vnode and cputype */
1356 if (blob
->csb_cpu_type
== cputype
&&
1357 blob
->csb_base_offset
== macho_offset
&&
1358 blob
->csb_mem_size
== lcp
->datasize
) {
1359 /* it matches the blob we want here: we're done */
1362 /* the blob has changed for this vnode: fail ! */
1363 ret
= LOAD_BADMACHO
;
1368 blob_size
= lcp
->datasize
;
1369 kr
= ubc_cs_blob_allocate(&addr
, &blob_size
);
1370 if (kr
!= KERN_SUCCESS
) {
1376 error
= vn_rdwr(UIO_READ
,
1380 macho_offset
+ lcp
->dataoff
,
1386 if (error
|| resid
!= 0) {
1391 if (ubc_cs_blob_add(vp
,
1399 /* ubc_cs_blob_add() has consumed "addr" */
1403 #if CHECK_CS_VALIDATION_BITMAP
1404 ubc_cs_validation_bitmap_allocate( vp
);
1407 blob
= ubc_cs_blob_get(vp
, cputype
, -1);
1411 if (result
&& ret
== LOAD_SUCCESS
) {
1412 result
->csflags
|= blob
->csb_flags
;
1415 ubc_cs_blob_deallocate(addr
, blob_size
);
1423 #if CONFIG_CODE_DECRYPTION
1425 static load_return_t
1427 struct encryption_info_command
*eip
,
1433 pager_crypt_info_t crypt_info
;
1434 const char * cryptname
= 0;
1438 struct segment_command_64
*seg64
;
1439 struct segment_command
*seg32
;
1440 vm_map_offset_t map_offset
, map_size
;
1443 if (eip
->cmdsize
< sizeof(*eip
)) return LOAD_BADMACHO
;
1445 switch(eip
->cryptid
) {
1447 /* not encrypted, just an empty load command */
1448 return LOAD_SUCCESS
;
1450 cryptname
="com.apple.unfree";
1453 /* some random cryptid that you could manually put into
1454 * your binary if you want NULL */
1455 cryptname
="com.apple.null";
1458 return LOAD_BADMACHO
;
1461 if (map
== VM_MAP_NULL
) return (LOAD_SUCCESS
);
1462 if (NULL
== text_crypter_create
) return LOAD_FAILURE
;
1464 MALLOC_ZONE(vpath
, char *, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
1465 if(vpath
== NULL
) return LOAD_FAILURE
;
1468 result
= vn_getpath(vp
, vpath
, &len
);
1470 FREE_ZONE(vpath
, MAXPATHLEN
, M_NAMEI
);
1471 return LOAD_FAILURE
;
1474 /* set up decrypter first */
1475 kr
=text_crypter_create(&crypt_info
, cryptname
, (void*)vpath
);
1476 FREE_ZONE(vpath
, MAXPATHLEN
, M_NAMEI
);
1479 printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n",
1481 return LOAD_RESOURCE
;
1484 /* this is terrible, but we have to rescan the load commands to find the
1485 * virtual address of this encrypted stuff. This code is gonna look like
1486 * the dyld source one day... */
1487 struct mach_header
*header
= (struct mach_header
*)addr
;
1488 size_t mach_header_sz
= sizeof(struct mach_header
);
1489 if (header
->magic
== MH_MAGIC_64
||
1490 header
->magic
== MH_CIGAM_64
) {
1491 mach_header_sz
= sizeof(struct mach_header_64
);
1493 offset
= mach_header_sz
;
1494 uint32_t ncmds
= header
->ncmds
;
1497 * Get a pointer to the command.
1499 struct load_command
*lcp
= (struct load_command
*)(addr
+ offset
);
1500 offset
+= lcp
->cmdsize
;
1504 seg64
= (struct segment_command_64
*)lcp
;
1505 if ((seg64
->fileoff
<= eip
->cryptoff
) &&
1506 (seg64
->fileoff
+seg64
->filesize
>=
1507 eip
->cryptoff
+eip
->cryptsize
)) {
1508 map_offset
= seg64
->vmaddr
+ eip
->cryptoff
- seg64
->fileoff
;
1509 map_size
= eip
->cryptsize
;
1513 seg32
= (struct segment_command
*)lcp
;
1514 if ((seg32
->fileoff
<= eip
->cryptoff
) &&
1515 (seg32
->fileoff
+seg32
->filesize
>=
1516 eip
->cryptoff
+eip
->cryptsize
)) {
1517 map_offset
= seg32
->vmaddr
+ eip
->cryptoff
- seg32
->fileoff
;
1518 map_size
= eip
->cryptsize
;
1524 /* if we get here, did not find anything */
1525 return LOAD_BADMACHO
;
1528 /* now remap using the decrypter */
1529 kr
= vm_map_apple_protected(map
, map_offset
, map_offset
+map_size
, &crypt_info
);
1531 printf("set_code_unprotect(): mapping failed with %x\n", kr
);
1532 crypt_info
.crypt_end(crypt_info
.crypt_ops
);
1533 return LOAD_PROTECT
;
1536 return LOAD_SUCCESS
;
1542 * This routine exists to support the load_dylinker().
1544 * This routine has its own, separate, understanding of the FAT file format,
1545 * which is terrifically unfortunate.
1552 struct mach_header
*mach_header
,
1555 struct macho_data
*data
,
1560 vfs_context_t ctx
= vfs_context_current();
1561 proc_t p
= vfs_context_proc(ctx
);
1562 kauth_cred_t kerncred
;
1563 struct nameidata
*ndp
= &data
->__nid
;
1565 struct fat_arch fat_arch
;
1568 union macho_vnode_header
*header
= &data
->__header
;
1569 off_t fsize
= (off_t
)0;
1572 * Capture the kernel credential for use in the actual read of the
1573 * file, since the user doing the execution may have execute rights
1574 * but not read rights, but to exec something, we have to either map
1575 * or read it into the new process address space, which requires
1576 * read rights. This is to deal with lack of common credential
1577 * serialization code which would treat NOCRED as "serialize 'root'".
1579 kerncred
= vfs_context_ucred(vfs_context_kernel());
1581 /* init the namei data to point the file user's program name */
1582 NDINIT(ndp
, LOOKUP
, OP_OPEN
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE
, CAST_USER_ADDR_T(path
), ctx
);
1584 if ((error
= namei(ndp
)) != 0) {
1585 if (error
== ENOENT
) {
1586 error
= LOAD_ENOENT
;
1588 error
= LOAD_FAILURE
;
1595 /* check for regular file */
1596 if (vp
->v_type
!= VREG
) {
1597 error
= LOAD_PROTECT
;
1602 if ((error
= vnode_size(vp
, &fsize
, ctx
)) != 0) {
1603 error
= LOAD_FAILURE
;
1607 /* Check mount point */
1608 if (vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) {
1609 error
= LOAD_PROTECT
;
1614 if ((error
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_EXECUTE
, ctx
)) != 0) {
1615 error
= LOAD_PROTECT
;
1619 /* try to open it */
1620 if ((error
= VNOP_OPEN(vp
, FREAD
, ctx
)) != 0) {
1621 error
= LOAD_PROTECT
;
1625 if ((error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)header
, sizeof (*header
), 0,
1626 UIO_SYSSPACE
, IO_NODELOCKED
, kerncred
, &resid
, p
)) != 0) {
1627 error
= LOAD_IOERROR
;
1631 if (header
->mach_header
.magic
== MH_MAGIC
||
1632 header
->mach_header
.magic
== MH_MAGIC_64
) {
1634 } else if (header
->fat_header
.magic
== FAT_MAGIC
||
1635 header
->fat_header
.magic
== FAT_CIGAM
) {
1638 error
= LOAD_BADMACHO
;
1643 /* Look up our architecture in the fat file. */
1644 error
= fatfile_getarch_with_bits(vp
, archbits
,
1645 (vm_offset_t
)(&header
->fat_header
), &fat_arch
);
1646 if (error
!= LOAD_SUCCESS
)
1649 /* Read the Mach-O header out of it */
1650 error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
->mach_header
,
1651 sizeof (header
->mach_header
), fat_arch
.offset
,
1652 UIO_SYSSPACE
, IO_NODELOCKED
, kerncred
, &resid
, p
);
1654 error
= LOAD_IOERROR
;
1658 /* Is this really a Mach-O? */
1659 if (header
->mach_header
.magic
!= MH_MAGIC
&&
1660 header
->mach_header
.magic
!= MH_MAGIC_64
) {
1661 error
= LOAD_BADMACHO
;
1665 *file_offset
= fat_arch
.offset
;
1666 *macho_size
= fat_arch
.size
;
1669 * Force get_macho_vnode() to fail if the architecture bits
1670 * do not match the expected architecture bits. This in
1671 * turn causes load_dylinker() to fail for the same reason,
1672 * so it ensures the dynamic linker and the binary are in
1673 * lock-step. This is potentially bad, if we ever add to
1674 * the CPU_ARCH_* bits any bits that are desirable but not
1675 * required, since the dynamic linker might work, but we will
1676 * refuse to load it because of this check.
1678 if ((cpu_type_t
)(header
->mach_header
.cputype
& CPU_ARCH_MASK
) != archbits
) {
1679 error
= LOAD_BADARCH
;
1684 *macho_size
= fsize
;
1687 *mach_header
= header
->mach_header
;
1690 ubc_setsize(vp
, fsize
);
1694 (void) VNOP_CLOSE(vp
, FREAD
, ctx
);