2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (C) 1988, 1989, NeXT, Inc.
25 * File: kern/mach_loader.c
26 * Author: Avadis Tevanian, Jr.
28 * Mach object file loader (kernel version, for now).
30 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
34 #include <sys/param.h>
35 #include <sys/vnode_internal.h>
37 #include <sys/namei.h>
38 #include <sys/proc_internal.h>
39 #include <sys/kauth.h>
41 #include <sys/malloc.h>
42 #include <sys/mount_internal.h>
43 #include <sys/fcntl.h>
44 #include <sys/ubc_internal.h>
45 #include <sys/imgact.h>
47 #include <mach/mach_types.h>
48 #include <mach/vm_map.h> /* vm_allocate() */
49 #include <mach/mach_vm.h> /* mach_vm_allocate() */
50 #include <mach/vm_statistics.h>
51 #include <mach/shared_memory_server.h>
52 #include <mach/task.h>
53 #include <mach/thread_act.h>
55 #include <machine/vmparam.h>
57 #include <kern/kern_types.h>
58 #include <kern/cpu_number.h>
59 #include <kern/mach_loader.h>
60 #include <kern/kalloc.h>
61 #include <kern/task.h>
62 #include <kern/thread.h>
64 #include <mach-o/fat.h>
65 #include <mach-o/loader.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_pager.h>
71 #include <vm/vnode_pager.h>
72 #include <vm/vm_shared_memory_server.h>
73 #include <vm/vm_protos.h>
76 * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
77 * when KERNEL is defined.
79 extern pmap_t
pmap_create(vm_map_size_t size
, boolean_t is_64bit
);
80 extern void pmap_switch(pmap_t
);
81 extern void pmap_map_sharedpage(task_t task
, pmap_t pmap
);
84 * XXX kern/thread.h should not treat these prototypes as MACH_KERNEL_PRIVATE
85 * when KERNEL is defined.
87 extern kern_return_t
thread_setstatus(thread_t thread
, int flavor
,
88 thread_state_t tstate
,
89 mach_msg_type_number_t count
);
91 extern kern_return_t
thread_state_initialize(thread_t thread
);
94 /* XXX should have prototypes in a shared header file */
95 extern int get_map_nentries(vm_map_t
);
96 extern kern_return_t
thread_userstack(thread_t
, int, thread_state_t
,
97 unsigned int, mach_vm_offset_t
*, int *);
98 extern kern_return_t
thread_entrypoint(thread_t
, int, thread_state_t
,
99 unsigned int, mach_vm_offset_t
*);
102 /* An empty load_result_t */
103 static load_result_t load_result_null
= {
114 * Prototypes of static functions.
121 struct mach_header
*header
,
124 boolean_t shared_regions
,
125 boolean_t clean_regions
,
127 load_result_t
*result
132 struct segment_command
*scp
,
138 load_result_t
*result
143 struct segment_command_64
*scp64
,
149 load_result_t
*result
154 struct thread_command
*tcp
,
156 load_result_t
*result
161 struct thread_command
*tcp
,
163 load_result_t
*result
170 unsigned long total_size
177 unsigned long total_size
,
178 mach_vm_offset_t
*user_stack
,
186 unsigned long total_size
,
187 mach_vm_offset_t
*entry_point
192 struct dylinker_command
*lcp
,
197 load_result_t
*result
,
198 boolean_t clean_regions
,
206 struct mach_header
*mach_header
,
214 struct image_params
*imgp
,
215 struct mach_header
*header
,
218 boolean_t clean_regions
,
219 load_result_t
*result
222 struct vnode
*vp
= imgp
->ip_vp
;
223 off_t file_offset
= imgp
->ip_arch_offset
;
224 off_t macho_size
= imgp
->ip_arch_size
;
226 pmap_t pmap
= 0; /* protected by create_map */
229 load_result_t myresult
;
231 boolean_t create_map
= TRUE
;
233 if (new_map
!= VM_MAP_NULL
) {
238 old_map
= current_map();
239 #ifdef NO_NESTED_PMAP
240 pmap
= get_task_pmap(current_task());
241 pmap_reference(pmap
);
242 #else /* NO_NESTED_PMAP */
243 pmap
= pmap_create((vm_map_size_t
) 0, (imgp
->ip_flags
& IMGPF_IS_64BIT
));
244 #endif /* NO_NESTED_PMAP */
245 map
= vm_map_create(pmap
,
247 vm_compute_max_offset((imgp
->ip_flags
& IMGPF_IS_64BIT
)),
252 if ( (header
->flags
& MH_ALLOW_STACK_EXECUTION
) )
253 vm_map_disable_NX(map
);
258 *result
= load_result_null
;
260 lret
= parse_machfile(vp
, map
, thr_act
, header
, file_offset
, macho_size
,
261 ((imgp
->ip_flags
& IMGPF_IS_64BIT
) == 0), /* shared regions? */
262 clean_regions
, 0, result
);
264 if (lret
!= LOAD_SUCCESS
) {
266 vm_map_deallocate(map
); /* will lose pmap reference too */
272 * For 64-bit users, check for presence of a 4GB page zero
273 * which will enable the kernel to share the user's address space
274 * and hence avoid TLB flushes on kernel entry/exit
276 if ((imgp
->ip_flags
& IMGPF_IS_64BIT
) &&
277 vm_map_has_4GB_pagezero(map
))
278 vm_map_set_4GB_pagezero(map
);
281 * Commit to new map. First make sure that the current
282 * users of the task get done with it, and that we clean
283 * up the old contents of IPC and memory. The task is
284 * guaranteed to be single threaded upon return (us).
286 * Swap the new map for the old, which consumes our new map
287 * reference but each leaves us responsible for the old_map reference.
288 * That lets us get off the pmap associated with it, and
289 * then we can release it.
293 task_halt(current_task());
295 old_map
= swap_task_map(current_task(), map
);
296 vm_map_clear_4GB_pagezero(old_map
);
297 #ifndef NO_NESTED_PMAP
298 pmap_switch(pmap
); /* Make sure we are using the new pmap */
299 #endif /* !NO_NESTED_PMAP */
300 vm_map_deallocate(old_map
);
302 return(LOAD_SUCCESS
);
308 * The file size of a mach-o file is limited to 32 bits; this is because
309 * this is the limit on the kalloc() of enough bytes for a mach_header and
310 * the contents of its sizeofcmds, which is currently constrained to 32
311 * bits in the file format itself. We read into the kernel buffer the
312 * commands section, and then parse it in order to parse the mach-o file
313 * format load_command segment(s). We are only interested in a subset of
314 * the total set of possible commands.
322 struct mach_header
*header
,
325 boolean_t shared_regions
,
326 boolean_t clean_regions
,
328 load_result_t
*result
332 struct load_command
*lcp
;
333 struct dylinker_command
*dlp
= 0;
334 integer_t dlarchbits
= 0;
336 load_return_t ret
= LOAD_SUCCESS
;
339 vm_size_t size
,kl_size
;
341 size_t oldoffset
; /* for overflow check */
343 struct proc
*p
= current_proc(); /* XXXX */
347 size_t mach_header_sz
= sizeof(struct mach_header
);
350 if (header
->magic
== MH_MAGIC_64
||
351 header
->magic
== MH_CIGAM_64
) {
352 mach_header_sz
= sizeof(struct mach_header_64
);
356 * Break infinite recursion
359 return(LOAD_FAILURE
);
361 task
= (task_t
)get_threadtask(thr_act
);
366 * Check to see if right machine type.
368 if (((cpu_type_t
)(header
->cputype
& ~CPU_ARCH_MASK
) != cpu_type()) ||
369 !grade_binary(header
->cputype
, header
->cpusubtype
))
370 return(LOAD_BADARCH
);
372 abi64
= ((header
->cputype
& CPU_ARCH_ABI64
) == CPU_ARCH_ABI64
);
374 switch (header
->filetype
) {
380 return (LOAD_FAILURE
);
386 return (LOAD_FAILURE
);
391 return (LOAD_FAILURE
);
395 return (LOAD_FAILURE
);
399 * Get the pager for the file.
401 UBCINFOCHECK("parse_machfile", vp
);
402 pager
= (void *) ubc_getpager(vp
);
405 * Map portion that must be accessible directly into
408 if ((mach_header_sz
+ header
->sizeofcmds
) > macho_size
)
409 return(LOAD_BADMACHO
);
412 * Round size of Mach-O commands up to page boundry.
414 size
= round_page(mach_header_sz
+ header
->sizeofcmds
);
416 return(LOAD_BADMACHO
);
419 * Map the load commands into kernel memory.
423 kl_addr
= kalloc(size
);
424 addr
= (caddr_t
)kl_addr
;
426 return(LOAD_NOSPACE
);
428 error
= vn_rdwr(UIO_READ
, vp
, addr
, size
, file_offset
,
429 UIO_SYSSPACE32
, 0, kauth_cred_get(), &resid
, p
);
432 kfree(kl_addr
, kl_size
);
433 return(LOAD_IOERROR
);
435 /* (void)ubc_map(vp, PROT_EXEC); */ /* NOT HERE */
438 * Scan through the commands, processing each one as necessary.
440 for (pass
= 1; pass
<= 2; pass
++) {
442 * Loop through each of the load_commands indicated by the
443 * Mach-O header; if an absurd value is provided, we just
444 * run off the end of the reserved section by incrementing
445 * the offset too far, so we are implicitly fail-safe.
447 offset
= mach_header_sz
;
448 ncmds
= header
->ncmds
;
451 * Get a pointer to the command.
453 lcp
= (struct load_command
*)(addr
+ offset
);
455 offset
+= lcp
->cmdsize
;
458 * Perform prevalidation of the struct load_command
459 * before we attempt to use its contents. Invalid
460 * values are ones which result in an overflow, or
461 * which can not possibly be valid commands, or which
462 * straddle or exist past the reserved section at the
463 * start of the image.
465 if (oldoffset
> offset
||
466 lcp
->cmdsize
< sizeof(struct load_command
) ||
467 offset
> header
->sizeofcmds
+ mach_header_sz
) {
473 * Act on struct load_command's for which kernel
474 * intervention is required.
480 ret
= load_segment_64(
481 (struct segment_command_64
*)lcp
,
493 (struct segment_command
*) lcp
,
504 ret
= load_thread((struct thread_command
*)lcp
,
511 ret
= load_unixthread(
512 (struct thread_command
*) lcp
,
516 case LC_LOAD_DYLINKER
:
519 if ((depth
== 1) && (dlp
== 0)) {
520 dlp
= (struct dylinker_command
*)lcp
;
521 dlarchbits
= (header
->cputype
& CPU_ARCH_MASK
);
527 /* Other commands are ignored by the kernel */
531 if (ret
!= LOAD_SUCCESS
)
534 if (ret
!= LOAD_SUCCESS
)
537 if (ret
== LOAD_SUCCESS
) {
539 if (shared_regions
) {
541 shared_region_mapping_t shared_region
;
542 struct shared_region_task_mappings map_info
;
543 shared_region_mapping_t next
;
546 vm_get_shared_region(task
, &shared_region
);
547 map_info
.self
= (vm_offset_t
)shared_region
;
548 shared_region_mapping_info(shared_region
,
549 &(map_info
.text_region
),
550 &(map_info
.text_size
),
551 &(map_info
.data_region
),
552 &(map_info
.data_size
),
553 &(map_info
.region_mappings
),
554 &(map_info
.client_base
),
555 &(map_info
.alternate_base
),
556 &(map_info
.alternate_next
),
559 &(map_info
.flags
), &next
);
561 if((map_info
.flags
& SHARED_REGION_FULL
) ||
562 (map_info
.flags
& SHARED_REGION_STALE
)) {
563 shared_region_mapping_t system_region
;
564 system_region
= lookup_default_shared_region(
565 map_info
.fs_base
, map_info
.system
);
566 if((map_info
.self
!= (vm_offset_t
)system_region
) &&
567 (map_info
.flags
& SHARED_REGION_SYSTEM
)) {
568 if(system_region
== NULL
) {
569 shared_file_boot_time_init(
570 map_info
.fs_base
, map_info
.system
);
572 vm_set_shared_region(task
, system_region
);
574 shared_region_mapping_dealloc(
575 (shared_region_mapping_t
)map_info
.self
);
577 } else if (map_info
.flags
& SHARED_REGION_SYSTEM
) {
578 shared_region_mapping_dealloc(system_region
);
579 shared_file_boot_time_init(
580 map_info
.fs_base
, map_info
.system
);
581 shared_region_mapping_dealloc(
582 (shared_region_mapping_t
)map_info
.self
);
584 shared_region_mapping_dealloc(system_region
);
589 p
->p_flag
|= P_NOSHLIB
; /* no shlibs in use */
590 vmaddr
= map_info
.client_base
;
592 vm_map(map
, &vmaddr
, map_info
.text_size
,
593 0, SHARED_LIB_ALIAS
|VM_FLAGS_FIXED
,
594 map_info
.text_region
, 0, FALSE
,
595 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
597 vm_map(map
, &vmaddr
, map_info
.text_size
, 0,
598 (VM_MEMORY_SHARED_PMAP
<< 24)
599 | SHARED_LIB_ALIAS
| VM_FLAGS_FIXED
,
600 map_info
.text_region
, 0, FALSE
,
601 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
603 vmaddr
= map_info
.client_base
+ map_info
.text_size
;
604 vm_map(map
, &vmaddr
, map_info
.data_size
,
605 0, SHARED_LIB_ALIAS
| VM_FLAGS_FIXED
,
606 map_info
.data_region
, 0, TRUE
,
607 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
610 /* this should be fleshed out for the general case */
611 /* but this is not necessary for now. Indeed we */
612 /* are handling the com page inside of the */
613 /* shared_region mapping create calls for now for */
614 /* simplicities sake. If more general support is */
615 /* needed the code to manipulate the shared range */
616 /* chain can be pulled out and moved to the callers*/
617 shared_region_mapping_info(next
,
618 &(map_info
.text_region
),
619 &(map_info
.text_size
),
620 &(map_info
.data_region
),
621 &(map_info
.data_size
),
622 &(map_info
.region_mappings
),
623 &(map_info
.client_base
),
624 &(map_info
.alternate_base
),
625 &(map_info
.alternate_next
),
628 &(map_info
.flags
), &next
);
630 vmaddr
= map_info
.client_base
;
631 vm_map(map
, &vmaddr
, map_info
.text_size
,
632 0, SHARED_LIB_ALIAS
| VM_FLAGS_FIXED
,
633 map_info
.text_region
, 0, FALSE
,
634 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
639 ret
= load_dylinker(dlp
, dlarchbits
, map
, thr_act
, depth
, result
, clean_regions
, abi64
);
642 if (result
->thread_count
== 0)
645 /* Map in 64-bit commpage */
646 /* LP64todo - make this clean */
647 pmap_map_sharedpage(current_task(), get_map_pmap(map
));
648 vm_map_commpage64(map
);
652 * On Intel, the comm page doesn't get mapped
653 * automatically because it goes beyond the current end
654 * of the VM map in the current 3GB/1GB address space
656 * XXX This will probably become unnecessary when we
657 * switch to the 4GB/4GB address space model.
659 vm_map_commpage32(map
);
660 #endif /* __i386__ */
666 kfree(kl_addr
, kl_size
);
668 if (ret
== LOAD_SUCCESS
)
669 (void)ubc_map(vp
, PROT_EXEC
);
674 #ifndef SG_PROTECTED_VERSION_1
675 #define SG_PROTECTED_VERSION_1 0x8
676 #endif /* SG_PROTECTED_VERSION_1 */
680 #define APPLE_UNPROTECTED_HEADER_SIZE (3 * PAGE_SIZE_64)
683 unprotect_segment_64(
687 vm_map_offset_t map_addr
,
688 vm_map_size_t map_size
)
693 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
694 * this part of a Universal binary) are not protected...
695 * The rest needs to be "transformed".
697 if (file_off
<= APPLE_UNPROTECTED_HEADER_SIZE
&&
698 file_off
+ file_size
<= APPLE_UNPROTECTED_HEADER_SIZE
) {
699 /* it's all unprotected, nothing to do... */
702 if (file_off
<= APPLE_UNPROTECTED_HEADER_SIZE
) {
704 * We start mapping in the unprotected area.
705 * Skip the unprotected part...
707 vm_map_offset_t delta
;
709 delta
= APPLE_UNPROTECTED_HEADER_SIZE
;
714 /* ... transform the rest of the mapping. */
715 kr
= vm_map_apple_protected(map
,
717 map_addr
+ map_size
);
720 if (kr
!= KERN_SUCCESS
) {
726 #define unprotect_segment_64(file_off, file_size, map, map_addr, map_size) \
728 #endif /* __i386__ */
733 struct segment_command
*scp
,
737 __unused off_t end_of_file
,
739 load_result_t
*result
743 vm_offset_t map_addr
, map_offset
;
744 vm_size_t map_size
, seg_size
, delta_size
;
749 * Make sure what we get from the file is really ours (as specified
752 if (scp
->fileoff
+ scp
->filesize
> macho_size
)
753 return (LOAD_BADMACHO
);
755 seg_size
= round_page(scp
->vmsize
);
757 return(KERN_SUCCESS
);
760 * Round sizes to page size.
762 map_size
= round_page(scp
->filesize
);
763 map_addr
= trunc_page(scp
->vmaddr
);
765 #if 0 /* XXX (4596982) this interferes with Rosetta */
769 (scp
->initprot
& VM_PROT_ALL
) == VM_PROT_NONE
&&
770 (scp
->maxprot
& VM_PROT_ALL
) == VM_PROT_NONE
) {
772 * This is a "page zero" segment: it starts at address 0,
773 * is not mapped from the binary file and is not accessible.
774 * User-space should never be able to access that memory, so
775 * make it completely off limits by raising the VM map's
778 ret
= vm_map_raise_min_offset(map
, (vm_map_offset_t
) seg_size
);
779 if (ret
!= KERN_SUCCESS
) {
786 map_offset
= pager_offset
+ scp
->fileoff
;
789 initprot
= (scp
->initprot
) & VM_PROT_ALL
;
790 maxprot
= (scp
->maxprot
) & VM_PROT_ALL
;
792 * Map a copy of the file into the address space.
795 &map_addr
, map_size
, (vm_offset_t
)0,
796 VM_FLAGS_FIXED
, pager
, map_offset
, TRUE
,
799 if (ret
!= KERN_SUCCESS
)
800 return(LOAD_NOSPACE
);
803 * If the file didn't end on a page boundary,
804 * we need to zero the leftover.
806 delta_size
= map_size
- scp
->filesize
;
808 if (delta_size
> 0) {
811 ret
= vm_allocate(kernel_map
, &tmp
, delta_size
, VM_FLAGS_ANYWHERE
);
812 if (ret
!= KERN_SUCCESS
)
813 return(LOAD_RESOURCE
);
815 if (copyout(tmp
, map_addr
+ scp
->filesize
,
817 (void) vm_deallocate(
818 kernel_map
, tmp
, delta_size
);
819 return(LOAD_FAILURE
);
822 (void) vm_deallocate(kernel_map
, tmp
, delta_size
);
828 * If the virtual size of the segment is greater
829 * than the size from the file, we need to allocate
830 * zero fill memory for the rest.
832 delta_size
= seg_size
- map_size
;
833 if (delta_size
> 0) {
834 vm_offset_t tmp
= map_addr
+ map_size
;
836 ret
= vm_map(map
, &tmp
, delta_size
, 0, VM_FLAGS_FIXED
,
838 scp
->initprot
, scp
->maxprot
,
840 if (ret
!= KERN_SUCCESS
)
841 return(LOAD_NOSPACE
);
844 if ( (scp
->fileoff
== 0) && (scp
->filesize
!= 0) )
845 result
->mach_header
= map_addr
;
847 if (scp
->flags
& SG_PROTECTED_VERSION_1
) {
848 ret
= unprotect_segment_64((uint64_t) scp
->fileoff
,
849 (uint64_t) scp
->filesize
,
851 (vm_map_offset_t
) map_addr
,
852 (vm_map_size_t
) map_size
);
863 struct segment_command_64
*scp64
,
867 __unused off_t end_of_file
,
869 load_result_t
*result
873 mach_vm_offset_t map_addr
, map_offset
;
874 mach_vm_size_t map_size
, seg_size
, delta_size
;
879 * Make sure what we get from the file is really ours (as specified
882 if (scp64
->fileoff
+ scp64
->filesize
> (uint64_t)macho_size
)
883 return (LOAD_BADMACHO
);
885 seg_size
= round_page_64(scp64
->vmsize
);
887 return(KERN_SUCCESS
);
890 * Round sizes to page size.
892 map_size
= round_page_64(scp64
->filesize
); /* limited to 32 bits */
893 map_addr
= round_page_64(scp64
->vmaddr
);
898 (scp64
->initprot
& VM_PROT_ALL
) == VM_PROT_NONE
&&
899 (scp64
->maxprot
& VM_PROT_ALL
) == VM_PROT_NONE
) {
901 * This is a "page zero" segment: it starts at address 0,
902 * is not mapped from the binary file and is not accessible.
903 * User-space should never be able to access that memory, so
904 * make it completely off limits by raising the VM map's
907 ret
= vm_map_raise_min_offset(map
, seg_size
);
908 if (ret
!= KERN_SUCCESS
) {
914 map_offset
= pager_offset
+ scp64
->fileoff
; /* limited to 32 bits */
917 initprot
= (scp64
->initprot
) & VM_PROT_ALL
;
918 maxprot
= (scp64
->maxprot
) & VM_PROT_ALL
;
920 * Map a copy of the file into the address space.
922 ret
= mach_vm_map(map
,
923 &map_addr
, map_size
, (mach_vm_offset_t
)0,
924 VM_FLAGS_FIXED
, pager
, map_offset
, TRUE
,
927 if (ret
!= KERN_SUCCESS
)
928 return(LOAD_NOSPACE
);
931 * If the file didn't end on a page boundary,
932 * we need to zero the leftover.
934 delta_size
= map_size
- scp64
->filesize
;
936 if (delta_size
> 0) {
937 mach_vm_offset_t tmp
;
939 ret
= vm_allocate(kernel_map
, &tmp
, delta_size
, VM_FLAGS_ANYWHERE
);
940 if (ret
!= KERN_SUCCESS
)
941 return(LOAD_RESOURCE
);
943 if (copyout(tmp
, map_addr
+ scp64
->filesize
,
945 (void) vm_deallocate(
946 kernel_map
, tmp
, delta_size
);
947 return (LOAD_FAILURE
);
950 (void) vm_deallocate(kernel_map
, tmp
, delta_size
);
956 * If the virtual size of the segment is greater
957 * than the size from the file, we need to allocate
958 * zero fill memory for the rest.
960 delta_size
= seg_size
- map_size
;
961 if (delta_size
> 0) {
962 mach_vm_offset_t tmp
= map_addr
+ map_size
;
964 ret
= mach_vm_map(map
, &tmp
, delta_size
, 0, VM_FLAGS_FIXED
,
966 scp64
->initprot
, scp64
->maxprot
,
968 if (ret
!= KERN_SUCCESS
)
969 return(LOAD_NOSPACE
);
972 if ( (scp64
->fileoff
== 0) && (scp64
->filesize
!= 0) )
973 result
->mach_header
= map_addr
;
975 if (scp64
->flags
& SG_PROTECTED_VERSION_1
) {
976 ret
= unprotect_segment_64(scp64
->fileoff
,
991 struct thread_command
*tcp
,
993 load_result_t
*result
1001 task
= get_threadtask(thread
);
1003 /* if count is 0; same as thr_act */
1004 if (result
->thread_count
!= 0) {
1005 kret
= thread_create(task
, &thread
);
1006 if (kret
!= KERN_SUCCESS
)
1007 return(LOAD_RESOURCE
);
1008 thread_deallocate(thread
);
1011 lret
= load_threadstate(thread
,
1012 (unsigned long *)(((vm_offset_t
)tcp
) +
1013 sizeof(struct thread_command
)),
1014 tcp
->cmdsize
- sizeof(struct thread_command
));
1015 if (lret
!= LOAD_SUCCESS
)
1018 if (result
->thread_count
== 0) {
1019 lret
= load_threadstack(thread
,
1020 (unsigned long *)(((vm_offset_t
)tcp
) +
1021 sizeof(struct thread_command
)),
1022 tcp
->cmdsize
- sizeof(struct thread_command
),
1023 &result
->user_stack
,
1026 result
->customstack
= 1;
1028 result
->customstack
= 0;
1030 if (lret
!= LOAD_SUCCESS
)
1033 lret
= load_threadentry(thread
,
1034 (unsigned long *)(((vm_offset_t
)tcp
) +
1035 sizeof(struct thread_command
)),
1036 tcp
->cmdsize
- sizeof(struct thread_command
),
1037 &result
->entry_point
);
1038 if (lret
!= LOAD_SUCCESS
)
1042 * Resume thread now, note that this means that the thread
1043 * commands should appear after all the load commands to
1044 * be sure they don't reference anything not yet mapped.
1047 thread_resume(thread
);
1049 result
->thread_count
++;
1051 return(LOAD_SUCCESS
);
1057 struct thread_command
*tcp
,
1059 load_result_t
*result
1065 if (result
->thread_count
!= 0)
1066 return (LOAD_FAILURE
);
1068 ret
= load_threadstack(thread
,
1069 (unsigned long *)(((vm_offset_t
)tcp
) +
1070 sizeof(struct thread_command
)),
1071 tcp
->cmdsize
- sizeof(struct thread_command
),
1072 &result
->user_stack
,
1074 if (ret
!= LOAD_SUCCESS
)
1078 result
->customstack
= 1;
1080 result
->customstack
= 0;
1081 ret
= load_threadentry(thread
,
1082 (unsigned long *)(((vm_offset_t
)tcp
) +
1083 sizeof(struct thread_command
)),
1084 tcp
->cmdsize
- sizeof(struct thread_command
),
1085 &result
->entry_point
);
1086 if (ret
!= LOAD_SUCCESS
)
1089 ret
= load_threadstate(thread
,
1090 (unsigned long *)(((vm_offset_t
)tcp
) +
1091 sizeof(struct thread_command
)),
1092 tcp
->cmdsize
- sizeof(struct thread_command
));
1093 if (ret
!= LOAD_SUCCESS
)
1096 result
->unixproc
= TRUE
;
1097 result
->thread_count
++;
1099 return(LOAD_SUCCESS
);
1107 unsigned long total_size
1113 unsigned long thread_size
;
1115 ret
= thread_state_initialize( thread
);
1116 if (ret
!= KERN_SUCCESS
)
1117 return(LOAD_FAILURE
);
1120 * Set the new thread state; iterate through the state flavors in
1123 while (total_size
> 0) {
1126 thread_size
= (size
+2)*sizeof(unsigned long);
1127 if (thread_size
> total_size
)
1128 return(LOAD_BADMACHO
);
1129 total_size
-= thread_size
;
1131 * Third argument is a kernel space pointer; it gets cast
1132 * to the appropriate type in machine_thread_set_state()
1133 * based on the value of flavor.
1135 ret
= thread_setstatus(thread
, flavor
, (thread_state_t
)ts
, size
);
1136 if (ret
!= KERN_SUCCESS
)
1137 return(LOAD_FAILURE
);
1138 ts
+= size
; /* ts is a (unsigned long *) */
1140 return(LOAD_SUCCESS
);
1148 unsigned long total_size
,
1149 user_addr_t
*user_stack
,
1156 unsigned long stack_size
;
1158 while (total_size
> 0) {
1161 stack_size
= (size
+2)*sizeof(unsigned long);
1162 if (stack_size
> total_size
)
1163 return(LOAD_BADMACHO
);
1164 total_size
-= stack_size
;
1167 * Third argument is a kernel space pointer; it gets cast
1168 * to the appropriate type in thread_userstack() based on
1169 * the value of flavor.
1171 ret
= thread_userstack(thread
, flavor
, (thread_state_t
)ts
, size
, user_stack
, customstack
);
1172 if (ret
!= KERN_SUCCESS
)
1173 return(LOAD_FAILURE
);
1174 ts
+= size
; /* ts is a (unsigned long *) */
1176 return(LOAD_SUCCESS
);
1184 unsigned long total_size
,
1185 mach_vm_offset_t
*entry_point
1191 unsigned long entry_size
;
1194 * Set the thread state.
1196 *entry_point
= MACH_VM_MIN_ADDRESS
;
1197 while (total_size
> 0) {
1200 entry_size
= (size
+2)*sizeof(unsigned long);
1201 if (entry_size
> total_size
)
1202 return(LOAD_BADMACHO
);
1203 total_size
-= entry_size
;
1205 * Third argument is a kernel space pointer; it gets cast
1206 * to the appropriate type in thread_entrypoint() based on
1207 * the value of flavor.
1209 ret
= thread_entrypoint(thread
, flavor
, (thread_state_t
)ts
, size
, entry_point
);
1210 if (ret
!= KERN_SUCCESS
)
1211 return(LOAD_FAILURE
);
1212 ts
+= size
; /* ts is a (unsigned long *) */
1214 return(LOAD_SUCCESS
);
1221 struct dylinker_command
*lcp
,
1226 load_result_t
*result
,
1227 boolean_t clean_regions
,
1234 struct mach_header header
;
1238 load_result_t myresult
;
1241 mach_vm_offset_t dyl_start
, map_addr
;
1242 mach_vm_size_t dyl_length
;
1244 name
= (char *)lcp
+ lcp
->name
.offset
;
1246 * Check for a proper null terminated string.
1250 if (p
>= (char *)lcp
+ lcp
->cmdsize
)
1251 return(LOAD_BADMACHO
);
1254 ret
= get_macho_vnode(name
, archbits
, &header
, &file_offset
, &macho_size
, &vp
);
1260 * Use a temporary map to do the work.
1262 copy_map
= vm_map_create(pmap_create(vm_map_round_page(macho_size
),
1264 get_map_min(map
), get_map_max(map
), TRUE
);
1265 if (VM_MAP_NULL
== copy_map
) {
1266 ret
= LOAD_RESOURCE
;
1270 myresult
= load_result_null
;
1272 ret
= parse_machfile(vp
, copy_map
, thr_act
, &header
,
1273 file_offset
, macho_size
,
1274 FALSE
, clean_regions
, depth
, &myresult
);
1279 if (get_map_nentries(copy_map
) > 0) {
1281 dyl_start
= mach_get_vm_start(copy_map
);
1282 dyl_length
= mach_get_vm_end(copy_map
) - dyl_start
;
1284 map_addr
= dyl_start
;
1285 ret
= mach_vm_allocate(map
, &map_addr
, dyl_length
, VM_FLAGS_FIXED
);
1286 if (ret
!= KERN_SUCCESS
) {
1287 ret
= mach_vm_allocate(map
, &map_addr
, dyl_length
, VM_FLAGS_ANYWHERE
);
1290 if (ret
!= KERN_SUCCESS
) {
1295 ret
= vm_map_copyin(copy_map
,
1296 (vm_map_address_t
)dyl_start
,
1297 (vm_map_size_t
)dyl_length
,
1299 if (ret
!= KERN_SUCCESS
) {
1300 (void) vm_map_remove(map
,
1301 vm_map_trunc_page(map_addr
),
1302 vm_map_round_page(map_addr
+ dyl_length
),
1307 ret
= vm_map_copy_overwrite(map
,
1308 (vm_map_address_t
)map_addr
,
1310 if (ret
!= KERN_SUCCESS
) {
1311 vm_map_copy_discard(tmp
);
1312 (void) vm_map_remove(map
,
1313 vm_map_trunc_page(map_addr
),
1314 vm_map_round_page(map_addr
+ dyl_length
),
1319 if (map_addr
!= dyl_start
)
1320 myresult
.entry_point
+= (map_addr
- dyl_start
);
1324 if (ret
== LOAD_SUCCESS
) {
1325 result
->dynlinker
= TRUE
;
1326 result
->entry_point
= myresult
.entry_point
;
1327 (void)ubc_map(vp
, PROT_EXEC
);
1330 vm_map_deallocate(copy_map
);
1338 * This routine exists to support the load_dylinker().
1340 * This routine has its own, separate, understanding of the FAT file format,
1341 * which is terrifically unfortunate.
1348 struct mach_header
*mach_header
,
1355 struct vfs_context context
;
1356 struct nameidata nid
, *ndp
;
1357 struct proc
*p
= current_proc(); /* XXXX */
1359 struct fat_arch fat_arch
;
1360 int error
= LOAD_SUCCESS
;
1363 struct mach_header mach_header
;
1364 struct fat_header fat_header
;
1367 off_t fsize
= (off_t
)0;
1368 struct ucred
*cred
= kauth_cred_get();
1371 context
.vc_proc
= p
;
1372 context
.vc_ucred
= cred
;
1376 /* init the namei data to point the file user's program name */
1377 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE32
, CAST_USER_ADDR_T(path
), &context
);
1379 if ((error
= namei(ndp
)) != 0) {
1380 if (error
== ENOENT
)
1381 error
= LOAD_ENOENT
;
1383 error
= LOAD_FAILURE
;
1389 /* check for regular file */
1390 if (vp
->v_type
!= VREG
) {
1391 error
= LOAD_PROTECT
;
1396 if ((error
= vnode_size(vp
, &fsize
, &context
)) != 0) {
1397 error
= LOAD_FAILURE
;
1401 /* Check mount point */
1402 if (vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) {
1403 error
= LOAD_PROTECT
;
1408 if ((error
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_EXECUTE
, &context
)) != 0) {
1409 error
= LOAD_PROTECT
;
1413 /* try to open it */
1414 if ((error
= VNOP_OPEN(vp
, FREAD
, &context
)) != 0) {
1415 error
= LOAD_PROTECT
;
1419 if ((error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
, sizeof(header
), 0,
1420 UIO_SYSSPACE32
, IO_NODELOCKED
, cred
, &resid
, p
)) != 0) {
1421 error
= LOAD_IOERROR
;
1425 if (header
.mach_header
.magic
== MH_MAGIC
||
1426 header
.mach_header
.magic
== MH_MAGIC_64
)
1428 else if (header
.fat_header
.magic
== FAT_MAGIC
||
1429 header
.fat_header
.magic
== FAT_CIGAM
)
1432 error
= LOAD_BADMACHO
;
1437 /* Look up our architecture in the fat file. */
1438 error
= fatfile_getarch_with_bits(vp
, archbits
, (vm_offset_t
)(&header
.fat_header
), &fat_arch
);
1439 if (error
!= LOAD_SUCCESS
)
1442 /* Read the Mach-O header out of it */
1443 error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
.mach_header
,
1444 sizeof(header
.mach_header
), fat_arch
.offset
,
1445 UIO_SYSSPACE32
, IO_NODELOCKED
, cred
, &resid
, p
);
1447 error
= LOAD_IOERROR
;
1451 /* Is this really a Mach-O? */
1452 if (header
.mach_header
.magic
!= MH_MAGIC
&&
1453 header
.mach_header
.magic
!= MH_MAGIC_64
) {
1454 error
= LOAD_BADMACHO
;
1458 *file_offset
= fat_arch
.offset
;
1459 *macho_size
= fsize
= fat_arch
.size
;
1462 * Force get_macho_vnode() to fail if the architecture bits
1463 * do not match the expected architecture bits. This in
1464 * turn causes load_dylinker() to fail for the same reason,
1465 * so it ensures the dynamic linker and the binary are in
1466 * lock-step. This is potentially bad, if we ever add to
1467 * the CPU_ARCH_* bits any bits that are desirable but not
1468 * required, since the dynamic linker might work, but we will
1469 * refuse to load it because of this check.
1471 if ((cpu_type_t
)(header
.mach_header
.cputype
& CPU_ARCH_MASK
) != archbits
)
1472 return(LOAD_BADARCH
);
1475 *macho_size
= fsize
;
1478 *mach_header
= header
.mach_header
;
1481 ubc_setsize(vp
, fsize
);
1486 err2
= VNOP_CLOSE(vp
, FREAD
, &context
);