2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 * Copyright (C) 1988, 1989, NeXT, Inc.
33 * File: kern/mach_loader.c
34 * Author: Avadis Tevanian, Jr.
36 * Mach object file loader (kernel version, for now).
38 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
42 #include <sys/param.h>
43 #include <sys/vnode_internal.h>
45 #include <sys/namei.h>
46 #include <sys/proc_internal.h>
47 #include <sys/kauth.h>
49 #include <sys/malloc.h>
50 #include <sys/mount_internal.h>
51 #include <sys/fcntl.h>
52 #include <sys/ubc_internal.h>
53 #include <sys/imgact.h>
55 #include <mach/mach_types.h>
56 #include <mach/vm_map.h> /* vm_allocate() */
57 #include <mach/mach_vm.h> /* mach_vm_allocate() */
58 #include <mach/vm_statistics.h>
59 #include <mach/shared_memory_server.h>
60 #include <mach/task.h>
61 #include <mach/thread_act.h>
63 #include <machine/vmparam.h>
65 #include <kern/kern_types.h>
66 #include <kern/cpu_number.h>
67 #include <kern/mach_loader.h>
68 #include <kern/kalloc.h>
69 #include <kern/task.h>
70 #include <kern/thread.h>
72 #include <mach-o/fat.h>
73 #include <mach-o/loader.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_pager.h>
79 #include <vm/vnode_pager.h>
80 #include <vm/vm_shared_memory_server.h>
81 #include <vm/vm_protos.h>
84 * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
85 * when KERNEL is defined.
87 extern pmap_t
pmap_create(vm_map_size_t size
, boolean_t is_64bit
);
88 extern void pmap_switch(pmap_t
);
89 extern void pmap_map_sharedpage(task_t task
, pmap_t pmap
);
92 * XXX kern/thread.h should not treat these prototypes as MACH_KERNEL_PRIVATE
93 * when KERNEL is defined.
95 extern kern_return_t
thread_setstatus(thread_t thread
, int flavor
,
96 thread_state_t tstate
,
97 mach_msg_type_number_t count
);
99 extern kern_return_t
thread_state_initialize(thread_t thread
);
102 /* XXX should have prototypes in a shared header file */
103 extern int get_map_nentries(vm_map_t
);
104 extern kern_return_t
thread_userstack(thread_t
, int, thread_state_t
,
105 unsigned int, mach_vm_offset_t
*, int *);
106 extern kern_return_t
thread_entrypoint(thread_t
, int, thread_state_t
,
107 unsigned int, mach_vm_offset_t
*);
110 /* An empty load_result_t */
111 static load_result_t load_result_null
= {
122 * Prototypes of static functions.
129 struct mach_header
*header
,
132 boolean_t shared_regions
,
133 boolean_t clean_regions
,
135 load_result_t
*result
140 struct segment_command
*scp
,
146 load_result_t
*result
151 struct segment_command_64
*scp64
,
157 load_result_t
*result
162 struct thread_command
*tcp
,
164 load_result_t
*result
169 struct thread_command
*tcp
,
171 load_result_t
*result
178 unsigned long total_size
185 unsigned long total_size
,
186 mach_vm_offset_t
*user_stack
,
194 unsigned long total_size
,
195 mach_vm_offset_t
*entry_point
200 struct dylinker_command
*lcp
,
205 load_result_t
*result
,
206 boolean_t clean_regions
,
214 struct mach_header
*mach_header
,
222 struct image_params
*imgp
,
223 struct mach_header
*header
,
226 boolean_t clean_regions
,
227 load_result_t
*result
230 struct vnode
*vp
= imgp
->ip_vp
;
231 off_t file_offset
= imgp
->ip_arch_offset
;
232 off_t macho_size
= imgp
->ip_arch_size
;
234 pmap_t pmap
= 0; /* protected by create_map */
237 load_result_t myresult
;
239 boolean_t create_map
= TRUE
;
241 if (new_map
!= VM_MAP_NULL
) {
246 old_map
= current_map();
247 #ifdef NO_NESTED_PMAP
248 pmap
= get_task_pmap(current_task());
249 pmap_reference(pmap
);
250 #else /* NO_NESTED_PMAP */
251 pmap
= pmap_create((vm_map_size_t
) 0, (imgp
->ip_flags
& IMGPF_IS_64BIT
));
252 #endif /* NO_NESTED_PMAP */
253 map
= vm_map_create(pmap
,
255 vm_compute_max_offset((imgp
->ip_flags
& IMGPF_IS_64BIT
)),
260 if ( (header
->flags
& MH_ALLOW_STACK_EXECUTION
) )
261 vm_map_disable_NX(map
);
266 *result
= load_result_null
;
268 lret
= parse_machfile(vp
, map
, thr_act
, header
, file_offset
, macho_size
,
269 ((imgp
->ip_flags
& IMGPF_IS_64BIT
) == 0), /* shared regions? */
270 clean_regions
, 0, result
);
272 if (lret
!= LOAD_SUCCESS
) {
274 vm_map_deallocate(map
); /* will lose pmap reference too */
280 * For 64-bit users, check for presence of a 4GB page zero
281 * which will enable the kernel to share the user's address space
282 * and hence avoid TLB flushes on kernel entry/exit
284 if ((imgp
->ip_flags
& IMGPF_IS_64BIT
) &&
285 vm_map_has_4GB_pagezero(map
))
286 vm_map_set_4GB_pagezero(map
);
289 * Commit to new map. First make sure that the current
290 * users of the task get done with it, and that we clean
291 * up the old contents of IPC and memory. The task is
292 * guaranteed to be single threaded upon return (us).
294 * Swap the new map for the old, which consumes our new map
295 * reference but each leaves us responsible for the old_map reference.
296 * That lets us get off the pmap associated with it, and
297 * then we can release it.
301 task_halt(current_task());
303 old_map
= swap_task_map(current_task(), map
);
304 vm_map_clear_4GB_pagezero(old_map
);
305 #ifndef NO_NESTED_PMAP
306 pmap_switch(pmap
); /* Make sure we are using the new pmap */
307 #endif /* !NO_NESTED_PMAP */
308 vm_map_deallocate(old_map
);
310 return(LOAD_SUCCESS
);
316 * The file size of a mach-o file is limited to 32 bits; this is because
317 * this is the limit on the kalloc() of enough bytes for a mach_header and
318 * the contents of its sizeofcmds, which is currently constrained to 32
319 * bits in the file format itself. We read into the kernel buffer the
320 * commands section, and then parse it in order to parse the mach-o file
321 * format load_command segment(s). We are only interested in a subset of
322 * the total set of possible commands.
330 struct mach_header
*header
,
333 boolean_t shared_regions
,
334 boolean_t clean_regions
,
336 load_result_t
*result
340 struct load_command
*lcp
;
341 struct dylinker_command
*dlp
= 0;
342 integer_t dlarchbits
= 0;
344 load_return_t ret
= LOAD_SUCCESS
;
347 vm_size_t size
,kl_size
;
349 size_t oldoffset
; /* for overflow check */
351 struct proc
*p
= current_proc(); /* XXXX */
355 size_t mach_header_sz
= sizeof(struct mach_header
);
358 if (header
->magic
== MH_MAGIC_64
||
359 header
->magic
== MH_CIGAM_64
) {
360 mach_header_sz
= sizeof(struct mach_header_64
);
364 * Break infinite recursion
367 return(LOAD_FAILURE
);
369 task
= (task_t
)get_threadtask(thr_act
);
374 * Check to see if right machine type.
376 if (((cpu_type_t
)(header
->cputype
& ~CPU_ARCH_MASK
) != cpu_type()) ||
377 !grade_binary(header
->cputype
, header
->cpusubtype
))
378 return(LOAD_BADARCH
);
380 abi64
= ((header
->cputype
& CPU_ARCH_ABI64
) == CPU_ARCH_ABI64
);
382 switch (header
->filetype
) {
388 return (LOAD_FAILURE
);
394 return (LOAD_FAILURE
);
399 return (LOAD_FAILURE
);
403 return (LOAD_FAILURE
);
407 * Get the pager for the file.
409 UBCINFOCHECK("parse_machfile", vp
);
410 pager
= (void *) ubc_getpager(vp
);
413 * Map portion that must be accessible directly into
416 if ((mach_header_sz
+ header
->sizeofcmds
) > macho_size
)
417 return(LOAD_BADMACHO
);
420 * Round size of Mach-O commands up to page boundry.
422 size
= round_page(mach_header_sz
+ header
->sizeofcmds
);
424 return(LOAD_BADMACHO
);
427 * Map the load commands into kernel memory.
431 kl_addr
= kalloc(size
);
432 addr
= (caddr_t
)kl_addr
;
434 return(LOAD_NOSPACE
);
436 error
= vn_rdwr(UIO_READ
, vp
, addr
, size
, file_offset
,
437 UIO_SYSSPACE32
, 0, kauth_cred_get(), &resid
, p
);
440 kfree(kl_addr
, kl_size
);
441 return(LOAD_IOERROR
);
443 /* (void)ubc_map(vp, PROT_EXEC); */ /* NOT HERE */
446 * Scan through the commands, processing each one as necessary.
448 for (pass
= 1; pass
<= 2; pass
++) {
450 * Loop through each of the load_commands indicated by the
451 * Mach-O header; if an absurd value is provided, we just
452 * run off the end of the reserved section by incrementing
453 * the offset too far, so we are implicitly fail-safe.
455 offset
= mach_header_sz
;
456 ncmds
= header
->ncmds
;
459 * Get a pointer to the command.
461 lcp
= (struct load_command
*)(addr
+ offset
);
463 offset
+= lcp
->cmdsize
;
466 * Perform prevalidation of the struct load_command
467 * before we attempt to use its contents. Invalid
468 * values are ones which result in an overflow, or
469 * which can not possibly be valid commands, or which
470 * straddle or exist past the reserved section at the
471 * start of the image.
473 if (oldoffset
> offset
||
474 lcp
->cmdsize
< sizeof(struct load_command
) ||
475 offset
> header
->sizeofcmds
+ mach_header_sz
) {
481 * Act on struct load_command's for which kernel
482 * intervention is required.
488 ret
= load_segment_64(
489 (struct segment_command_64
*)lcp
,
501 (struct segment_command
*) lcp
,
512 ret
= load_thread((struct thread_command
*)lcp
,
519 ret
= load_unixthread(
520 (struct thread_command
*) lcp
,
524 case LC_LOAD_DYLINKER
:
527 if ((depth
== 1) && (dlp
== 0)) {
528 dlp
= (struct dylinker_command
*)lcp
;
529 dlarchbits
= (header
->cputype
& CPU_ARCH_MASK
);
535 /* Other commands are ignored by the kernel */
539 if (ret
!= LOAD_SUCCESS
)
542 if (ret
!= LOAD_SUCCESS
)
545 if (ret
== LOAD_SUCCESS
) {
547 if (shared_regions
) {
549 shared_region_mapping_t shared_region
;
550 struct shared_region_task_mappings map_info
;
551 shared_region_mapping_t next
;
554 vm_get_shared_region(task
, &shared_region
);
555 map_info
.self
= (vm_offset_t
)shared_region
;
556 shared_region_mapping_info(shared_region
,
557 &(map_info
.text_region
),
558 &(map_info
.text_size
),
559 &(map_info
.data_region
),
560 &(map_info
.data_size
),
561 &(map_info
.region_mappings
),
562 &(map_info
.client_base
),
563 &(map_info
.alternate_base
),
564 &(map_info
.alternate_next
),
567 &(map_info
.flags
), &next
);
569 if((map_info
.flags
& SHARED_REGION_FULL
) ||
570 (map_info
.flags
& SHARED_REGION_STALE
)) {
571 shared_region_mapping_t system_region
;
572 system_region
= lookup_default_shared_region(
573 map_info
.fs_base
, map_info
.system
);
574 if((map_info
.self
!= (vm_offset_t
)system_region
) &&
575 (map_info
.flags
& SHARED_REGION_SYSTEM
)) {
576 if(system_region
== NULL
) {
577 shared_file_boot_time_init(
578 map_info
.fs_base
, map_info
.system
);
580 vm_set_shared_region(task
, system_region
);
582 shared_region_mapping_dealloc(
583 (shared_region_mapping_t
)map_info
.self
);
585 } else if (map_info
.flags
& SHARED_REGION_SYSTEM
) {
586 shared_region_mapping_dealloc(system_region
);
587 shared_file_boot_time_init(
588 map_info
.fs_base
, map_info
.system
);
589 shared_region_mapping_dealloc(
590 (shared_region_mapping_t
)map_info
.self
);
592 shared_region_mapping_dealloc(system_region
);
597 p
->p_flag
|= P_NOSHLIB
; /* no shlibs in use */
598 vmaddr
= map_info
.client_base
;
600 vm_map(map
, &vmaddr
, map_info
.text_size
,
601 0, SHARED_LIB_ALIAS
|VM_FLAGS_FIXED
,
602 map_info
.text_region
, 0, FALSE
,
603 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
605 vm_map(map
, &vmaddr
, map_info
.text_size
, 0,
606 (VM_MEMORY_SHARED_PMAP
<< 24)
607 | SHARED_LIB_ALIAS
| VM_FLAGS_FIXED
,
608 map_info
.text_region
, 0, FALSE
,
609 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
611 vmaddr
= map_info
.client_base
+ map_info
.text_size
;
612 vm_map(map
, &vmaddr
, map_info
.data_size
,
613 0, SHARED_LIB_ALIAS
| VM_FLAGS_FIXED
,
614 map_info
.data_region
, 0, TRUE
,
615 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
618 /* this should be fleshed out for the general case */
619 /* but this is not necessary for now. Indeed we */
620 /* are handling the com page inside of the */
621 /* shared_region mapping create calls for now for */
622 /* simplicities sake. If more general support is */
623 /* needed the code to manipulate the shared range */
624 /* chain can be pulled out and moved to the callers*/
625 shared_region_mapping_info(next
,
626 &(map_info
.text_region
),
627 &(map_info
.text_size
),
628 &(map_info
.data_region
),
629 &(map_info
.data_size
),
630 &(map_info
.region_mappings
),
631 &(map_info
.client_base
),
632 &(map_info
.alternate_base
),
633 &(map_info
.alternate_next
),
636 &(map_info
.flags
), &next
);
638 vmaddr
= map_info
.client_base
;
639 vm_map(map
, &vmaddr
, map_info
.text_size
,
640 0, SHARED_LIB_ALIAS
| VM_FLAGS_FIXED
,
641 map_info
.text_region
, 0, FALSE
,
642 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
647 ret
= load_dylinker(dlp
, dlarchbits
, map
, thr_act
, depth
, result
, clean_regions
, abi64
);
650 if (result
->thread_count
== 0)
653 /* Map in 64-bit commpage */
654 /* LP64todo - make this clean */
655 pmap_map_sharedpage(current_task(), get_map_pmap(map
));
656 vm_map_commpage64(map
);
660 * On Intel, the comm page doesn't get mapped
661 * automatically because it goes beyond the current end
662 * of the VM map in the current 3GB/1GB address space
664 * XXX This will probably become unnecessary when we
665 * switch to the 4GB/4GB address space model.
667 vm_map_commpage32(map
);
668 #endif /* __i386__ */
674 kfree(kl_addr
, kl_size
);
676 if (ret
== LOAD_SUCCESS
)
677 (void)ubc_map(vp
, PROT_EXEC
);
682 #ifndef SG_PROTECTED_VERSION_1
683 #define SG_PROTECTED_VERSION_1 0x8
684 #endif /* SG_PROTECTED_VERSION_1 */
688 #define APPLE_UNPROTECTED_HEADER_SIZE (3 * PAGE_SIZE_64)
691 unprotect_segment_64(
695 vm_map_offset_t map_addr
,
696 vm_map_size_t map_size
)
701 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
702 * this part of a Universal binary) are not protected...
703 * The rest needs to be "transformed".
705 if (file_off
<= APPLE_UNPROTECTED_HEADER_SIZE
&&
706 file_off
+ file_size
<= APPLE_UNPROTECTED_HEADER_SIZE
) {
707 /* it's all unprotected, nothing to do... */
710 if (file_off
<= APPLE_UNPROTECTED_HEADER_SIZE
) {
712 * We start mapping in the unprotected area.
713 * Skip the unprotected part...
715 vm_map_offset_t delta
;
717 delta
= APPLE_UNPROTECTED_HEADER_SIZE
;
722 /* ... transform the rest of the mapping. */
723 kr
= vm_map_apple_protected(map
,
725 map_addr
+ map_size
);
728 if (kr
!= KERN_SUCCESS
) {
734 #define unprotect_segment_64(file_off, file_size, map, map_addr, map_size) \
736 #endif /* __i386__ */
741 struct segment_command
*scp
,
745 __unused off_t end_of_file
,
747 load_result_t
*result
751 vm_offset_t map_addr
, map_offset
;
752 vm_size_t map_size
, seg_size
, delta_size
;
757 * Make sure what we get from the file is really ours (as specified
760 if (scp
->fileoff
+ scp
->filesize
> macho_size
)
761 return (LOAD_BADMACHO
);
763 seg_size
= round_page(scp
->vmsize
);
765 return(KERN_SUCCESS
);
768 * Round sizes to page size.
770 map_size
= round_page(scp
->filesize
);
771 map_addr
= trunc_page(scp
->vmaddr
);
773 #if 0 /* XXX (4596982) this interferes with Rosetta */
777 (scp
->initprot
& VM_PROT_ALL
) == VM_PROT_NONE
&&
778 (scp
->maxprot
& VM_PROT_ALL
) == VM_PROT_NONE
) {
780 * This is a "page zero" segment: it starts at address 0,
781 * is not mapped from the binary file and is not accessible.
782 * User-space should never be able to access that memory, so
783 * make it completely off limits by raising the VM map's
786 ret
= vm_map_raise_min_offset(map
, (vm_map_offset_t
) seg_size
);
787 if (ret
!= KERN_SUCCESS
) {
794 map_offset
= pager_offset
+ scp
->fileoff
;
797 initprot
= (scp
->initprot
) & VM_PROT_ALL
;
798 maxprot
= (scp
->maxprot
) & VM_PROT_ALL
;
800 * Map a copy of the file into the address space.
803 &map_addr
, map_size
, (vm_offset_t
)0,
804 VM_FLAGS_FIXED
, pager
, map_offset
, TRUE
,
807 if (ret
!= KERN_SUCCESS
)
808 return(LOAD_NOSPACE
);
811 * If the file didn't end on a page boundary,
812 * we need to zero the leftover.
814 delta_size
= map_size
- scp
->filesize
;
816 if (delta_size
> 0) {
819 ret
= vm_allocate(kernel_map
, &tmp
, delta_size
, VM_FLAGS_ANYWHERE
);
820 if (ret
!= KERN_SUCCESS
)
821 return(LOAD_RESOURCE
);
823 if (copyout(tmp
, map_addr
+ scp
->filesize
,
825 (void) vm_deallocate(
826 kernel_map
, tmp
, delta_size
);
827 return(LOAD_FAILURE
);
830 (void) vm_deallocate(kernel_map
, tmp
, delta_size
);
836 * If the virtual size of the segment is greater
837 * than the size from the file, we need to allocate
838 * zero fill memory for the rest.
840 delta_size
= seg_size
- map_size
;
841 if (delta_size
> 0) {
842 vm_offset_t tmp
= map_addr
+ map_size
;
844 ret
= vm_map(map
, &tmp
, delta_size
, 0, VM_FLAGS_FIXED
,
846 scp
->initprot
, scp
->maxprot
,
848 if (ret
!= KERN_SUCCESS
)
849 return(LOAD_NOSPACE
);
852 if ( (scp
->fileoff
== 0) && (scp
->filesize
!= 0) )
853 result
->mach_header
= map_addr
;
855 if (scp
->flags
& SG_PROTECTED_VERSION_1
) {
856 ret
= unprotect_segment_64((uint64_t) scp
->fileoff
,
857 (uint64_t) scp
->filesize
,
859 (vm_map_offset_t
) map_addr
,
860 (vm_map_size_t
) map_size
);
871 struct segment_command_64
*scp64
,
875 __unused off_t end_of_file
,
877 load_result_t
*result
881 mach_vm_offset_t map_addr
, map_offset
;
882 mach_vm_size_t map_size
, seg_size
, delta_size
;
887 * Make sure what we get from the file is really ours (as specified
890 if (scp64
->fileoff
+ scp64
->filesize
> (uint64_t)macho_size
)
891 return (LOAD_BADMACHO
);
893 seg_size
= round_page_64(scp64
->vmsize
);
895 return(KERN_SUCCESS
);
898 * Round sizes to page size.
900 map_size
= round_page_64(scp64
->filesize
); /* limited to 32 bits */
901 map_addr
= round_page_64(scp64
->vmaddr
);
906 (scp64
->initprot
& VM_PROT_ALL
) == VM_PROT_NONE
&&
907 (scp64
->maxprot
& VM_PROT_ALL
) == VM_PROT_NONE
) {
909 * This is a "page zero" segment: it starts at address 0,
910 * is not mapped from the binary file and is not accessible.
911 * User-space should never be able to access that memory, so
912 * make it completely off limits by raising the VM map's
915 ret
= vm_map_raise_min_offset(map
, seg_size
);
916 if (ret
!= KERN_SUCCESS
) {
922 map_offset
= pager_offset
+ scp64
->fileoff
; /* limited to 32 bits */
925 initprot
= (scp64
->initprot
) & VM_PROT_ALL
;
926 maxprot
= (scp64
->maxprot
) & VM_PROT_ALL
;
928 * Map a copy of the file into the address space.
930 ret
= mach_vm_map(map
,
931 &map_addr
, map_size
, (mach_vm_offset_t
)0,
932 VM_FLAGS_FIXED
, pager
, map_offset
, TRUE
,
935 if (ret
!= KERN_SUCCESS
)
936 return(LOAD_NOSPACE
);
939 * If the file didn't end on a page boundary,
940 * we need to zero the leftover.
942 delta_size
= map_size
- scp64
->filesize
;
944 if (delta_size
> 0) {
945 mach_vm_offset_t tmp
;
947 ret
= vm_allocate(kernel_map
, &tmp
, delta_size
, VM_FLAGS_ANYWHERE
);
948 if (ret
!= KERN_SUCCESS
)
949 return(LOAD_RESOURCE
);
951 if (copyout(tmp
, map_addr
+ scp64
->filesize
,
953 (void) vm_deallocate(
954 kernel_map
, tmp
, delta_size
);
955 return (LOAD_FAILURE
);
958 (void) vm_deallocate(kernel_map
, tmp
, delta_size
);
964 * If the virtual size of the segment is greater
965 * than the size from the file, we need to allocate
966 * zero fill memory for the rest.
968 delta_size
= seg_size
- map_size
;
969 if (delta_size
> 0) {
970 mach_vm_offset_t tmp
= map_addr
+ map_size
;
972 ret
= mach_vm_map(map
, &tmp
, delta_size
, 0, VM_FLAGS_FIXED
,
974 scp64
->initprot
, scp64
->maxprot
,
976 if (ret
!= KERN_SUCCESS
)
977 return(LOAD_NOSPACE
);
980 if ( (scp64
->fileoff
== 0) && (scp64
->filesize
!= 0) )
981 result
->mach_header
= map_addr
;
983 if (scp64
->flags
& SG_PROTECTED_VERSION_1
) {
984 ret
= unprotect_segment_64(scp64
->fileoff
,
999 struct thread_command
*tcp
,
1001 load_result_t
*result
1009 task
= get_threadtask(thread
);
1011 /* if count is 0; same as thr_act */
1012 if (result
->thread_count
!= 0) {
1013 kret
= thread_create(task
, &thread
);
1014 if (kret
!= KERN_SUCCESS
)
1015 return(LOAD_RESOURCE
);
1016 thread_deallocate(thread
);
1019 lret
= load_threadstate(thread
,
1020 (unsigned long *)(((vm_offset_t
)tcp
) +
1021 sizeof(struct thread_command
)),
1022 tcp
->cmdsize
- sizeof(struct thread_command
));
1023 if (lret
!= LOAD_SUCCESS
)
1026 if (result
->thread_count
== 0) {
1027 lret
= load_threadstack(thread
,
1028 (unsigned long *)(((vm_offset_t
)tcp
) +
1029 sizeof(struct thread_command
)),
1030 tcp
->cmdsize
- sizeof(struct thread_command
),
1031 &result
->user_stack
,
1034 result
->customstack
= 1;
1036 result
->customstack
= 0;
1038 if (lret
!= LOAD_SUCCESS
)
1041 lret
= load_threadentry(thread
,
1042 (unsigned long *)(((vm_offset_t
)tcp
) +
1043 sizeof(struct thread_command
)),
1044 tcp
->cmdsize
- sizeof(struct thread_command
),
1045 &result
->entry_point
);
1046 if (lret
!= LOAD_SUCCESS
)
1050 * Resume thread now, note that this means that the thread
1051 * commands should appear after all the load commands to
1052 * be sure they don't reference anything not yet mapped.
1055 thread_resume(thread
);
1057 result
->thread_count
++;
1059 return(LOAD_SUCCESS
);
1065 struct thread_command
*tcp
,
1067 load_result_t
*result
1073 if (result
->thread_count
!= 0)
1074 return (LOAD_FAILURE
);
1076 ret
= load_threadstack(thread
,
1077 (unsigned long *)(((vm_offset_t
)tcp
) +
1078 sizeof(struct thread_command
)),
1079 tcp
->cmdsize
- sizeof(struct thread_command
),
1080 &result
->user_stack
,
1082 if (ret
!= LOAD_SUCCESS
)
1086 result
->customstack
= 1;
1088 result
->customstack
= 0;
1089 ret
= load_threadentry(thread
,
1090 (unsigned long *)(((vm_offset_t
)tcp
) +
1091 sizeof(struct thread_command
)),
1092 tcp
->cmdsize
- sizeof(struct thread_command
),
1093 &result
->entry_point
);
1094 if (ret
!= LOAD_SUCCESS
)
1097 ret
= load_threadstate(thread
,
1098 (unsigned long *)(((vm_offset_t
)tcp
) +
1099 sizeof(struct thread_command
)),
1100 tcp
->cmdsize
- sizeof(struct thread_command
));
1101 if (ret
!= LOAD_SUCCESS
)
1104 result
->unixproc
= TRUE
;
1105 result
->thread_count
++;
1107 return(LOAD_SUCCESS
);
1115 unsigned long total_size
1121 unsigned long thread_size
;
1123 ret
= thread_state_initialize( thread
);
1124 if (ret
!= KERN_SUCCESS
)
1125 return(LOAD_FAILURE
);
1128 * Set the new thread state; iterate through the state flavors in
1131 while (total_size
> 0) {
1134 thread_size
= (size
+2)*sizeof(unsigned long);
1135 if (thread_size
> total_size
)
1136 return(LOAD_BADMACHO
);
1137 total_size
-= thread_size
;
1139 * Third argument is a kernel space pointer; it gets cast
1140 * to the appropriate type in machine_thread_set_state()
1141 * based on the value of flavor.
1143 ret
= thread_setstatus(thread
, flavor
, (thread_state_t
)ts
, size
);
1144 if (ret
!= KERN_SUCCESS
)
1145 return(LOAD_FAILURE
);
1146 ts
+= size
; /* ts is a (unsigned long *) */
1148 return(LOAD_SUCCESS
);
1156 unsigned long total_size
,
1157 user_addr_t
*user_stack
,
1164 unsigned long stack_size
;
1166 while (total_size
> 0) {
1169 stack_size
= (size
+2)*sizeof(unsigned long);
1170 if (stack_size
> total_size
)
1171 return(LOAD_BADMACHO
);
1172 total_size
-= stack_size
;
1175 * Third argument is a kernel space pointer; it gets cast
1176 * to the appropriate type in thread_userstack() based on
1177 * the value of flavor.
1179 ret
= thread_userstack(thread
, flavor
, (thread_state_t
)ts
, size
, user_stack
, customstack
);
1180 if (ret
!= KERN_SUCCESS
)
1181 return(LOAD_FAILURE
);
1182 ts
+= size
; /* ts is a (unsigned long *) */
1184 return(LOAD_SUCCESS
);
1192 unsigned long total_size
,
1193 mach_vm_offset_t
*entry_point
1199 unsigned long entry_size
;
1202 * Set the thread state.
1204 *entry_point
= MACH_VM_MIN_ADDRESS
;
1205 while (total_size
> 0) {
1208 entry_size
= (size
+2)*sizeof(unsigned long);
1209 if (entry_size
> total_size
)
1210 return(LOAD_BADMACHO
);
1211 total_size
-= entry_size
;
1213 * Third argument is a kernel space pointer; it gets cast
1214 * to the appropriate type in thread_entrypoint() based on
1215 * the value of flavor.
1217 ret
= thread_entrypoint(thread
, flavor
, (thread_state_t
)ts
, size
, entry_point
);
1218 if (ret
!= KERN_SUCCESS
)
1219 return(LOAD_FAILURE
);
1220 ts
+= size
; /* ts is a (unsigned long *) */
1222 return(LOAD_SUCCESS
);
1229 struct dylinker_command
*lcp
,
1234 load_result_t
*result
,
1235 boolean_t clean_regions
,
1242 struct mach_header header
;
1246 load_result_t myresult
;
1249 mach_vm_offset_t dyl_start
, map_addr
;
1250 mach_vm_size_t dyl_length
;
1252 name
= (char *)lcp
+ lcp
->name
.offset
;
1254 * Check for a proper null terminated string.
1258 if (p
>= (char *)lcp
+ lcp
->cmdsize
)
1259 return(LOAD_BADMACHO
);
1262 ret
= get_macho_vnode(name
, archbits
, &header
, &file_offset
, &macho_size
, &vp
);
1268 * Use a temporary map to do the work.
1270 copy_map
= vm_map_create(pmap_create(vm_map_round_page(macho_size
),
1272 get_map_min(map
), get_map_max(map
), TRUE
);
1273 if (VM_MAP_NULL
== copy_map
) {
1274 ret
= LOAD_RESOURCE
;
1278 myresult
= load_result_null
;
1280 ret
= parse_machfile(vp
, copy_map
, thr_act
, &header
,
1281 file_offset
, macho_size
,
1282 FALSE
, clean_regions
, depth
, &myresult
);
1287 if (get_map_nentries(copy_map
) > 0) {
1289 dyl_start
= mach_get_vm_start(copy_map
);
1290 dyl_length
= mach_get_vm_end(copy_map
) - dyl_start
;
1292 map_addr
= dyl_start
;
1293 ret
= mach_vm_allocate(map
, &map_addr
, dyl_length
, VM_FLAGS_FIXED
);
1294 if (ret
!= KERN_SUCCESS
) {
1295 ret
= mach_vm_allocate(map
, &map_addr
, dyl_length
, VM_FLAGS_ANYWHERE
);
1298 if (ret
!= KERN_SUCCESS
) {
1303 ret
= vm_map_copyin(copy_map
,
1304 (vm_map_address_t
)dyl_start
,
1305 (vm_map_size_t
)dyl_length
,
1307 if (ret
!= KERN_SUCCESS
) {
1308 (void) vm_map_remove(map
,
1309 vm_map_trunc_page(map_addr
),
1310 vm_map_round_page(map_addr
+ dyl_length
),
1315 ret
= vm_map_copy_overwrite(map
,
1316 (vm_map_address_t
)map_addr
,
1318 if (ret
!= KERN_SUCCESS
) {
1319 vm_map_copy_discard(tmp
);
1320 (void) vm_map_remove(map
,
1321 vm_map_trunc_page(map_addr
),
1322 vm_map_round_page(map_addr
+ dyl_length
),
1327 if (map_addr
!= dyl_start
)
1328 myresult
.entry_point
+= (map_addr
- dyl_start
);
1332 if (ret
== LOAD_SUCCESS
) {
1333 result
->dynlinker
= TRUE
;
1334 result
->entry_point
= myresult
.entry_point
;
1335 (void)ubc_map(vp
, PROT_EXEC
);
1338 vm_map_deallocate(copy_map
);
1346 * This routine exists to support the load_dylinker().
1348 * This routine has its own, separate, understanding of the FAT file format,
1349 * which is terrifically unfortunate.
1356 struct mach_header
*mach_header
,
1363 struct vfs_context context
;
1364 struct nameidata nid
, *ndp
;
1365 struct proc
*p
= current_proc(); /* XXXX */
1367 struct fat_arch fat_arch
;
1368 int error
= LOAD_SUCCESS
;
1371 struct mach_header mach_header
;
1372 struct fat_header fat_header
;
1375 off_t fsize
= (off_t
)0;
1376 struct ucred
*cred
= kauth_cred_get();
1379 context
.vc_proc
= p
;
1380 context
.vc_ucred
= cred
;
1384 /* init the namei data to point the file user's program name */
1385 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE32
, CAST_USER_ADDR_T(path
), &context
);
1387 if ((error
= namei(ndp
)) != 0) {
1388 if (error
== ENOENT
)
1389 error
= LOAD_ENOENT
;
1391 error
= LOAD_FAILURE
;
1397 /* check for regular file */
1398 if (vp
->v_type
!= VREG
) {
1399 error
= LOAD_PROTECT
;
1404 if ((error
= vnode_size(vp
, &fsize
, &context
)) != 0) {
1405 error
= LOAD_FAILURE
;
1409 /* Check mount point */
1410 if (vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) {
1411 error
= LOAD_PROTECT
;
1416 if ((error
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_EXECUTE
, &context
)) != 0) {
1417 error
= LOAD_PROTECT
;
1421 /* try to open it */
1422 if ((error
= VNOP_OPEN(vp
, FREAD
, &context
)) != 0) {
1423 error
= LOAD_PROTECT
;
1427 if ((error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
, sizeof(header
), 0,
1428 UIO_SYSSPACE32
, IO_NODELOCKED
, cred
, &resid
, p
)) != 0) {
1429 error
= LOAD_IOERROR
;
1433 if (header
.mach_header
.magic
== MH_MAGIC
||
1434 header
.mach_header
.magic
== MH_MAGIC_64
)
1436 else if (header
.fat_header
.magic
== FAT_MAGIC
||
1437 header
.fat_header
.magic
== FAT_CIGAM
)
1440 error
= LOAD_BADMACHO
;
1445 /* Look up our architecture in the fat file. */
1446 error
= fatfile_getarch_with_bits(vp
, archbits
, (vm_offset_t
)(&header
.fat_header
), &fat_arch
);
1447 if (error
!= LOAD_SUCCESS
)
1450 /* Read the Mach-O header out of it */
1451 error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
.mach_header
,
1452 sizeof(header
.mach_header
), fat_arch
.offset
,
1453 UIO_SYSSPACE32
, IO_NODELOCKED
, cred
, &resid
, p
);
1455 error
= LOAD_IOERROR
;
1459 /* Is this really a Mach-O? */
1460 if (header
.mach_header
.magic
!= MH_MAGIC
&&
1461 header
.mach_header
.magic
!= MH_MAGIC_64
) {
1462 error
= LOAD_BADMACHO
;
1466 *file_offset
= fat_arch
.offset
;
1467 *macho_size
= fsize
= fat_arch
.size
;
1470 * Force get_macho_vnode() to fail if the architecture bits
1471 * do not match the expected architecture bits. This in
1472 * turn causes load_dylinker() to fail for the same reason,
1473 * so it ensures the dynamic linker and the binary are in
1474 * lock-step. This is potentially bad, if we ever add to
1475 * the CPU_ARCH_* bits any bits that are desirable but not
1476 * required, since the dynamic linker might work, but we will
1477 * refuse to load it because of this check.
1479 if ((cpu_type_t
)(header
.mach_header
.cputype
& CPU_ARCH_MASK
) != archbits
)
1480 return(LOAD_BADARCH
);
1483 *macho_size
= fsize
;
1486 *mach_header
= header
.mach_header
;
1489 ubc_setsize(vp
, fsize
);
1494 err2
= VNOP_CLOSE(vp
, FREAD
, &context
);