2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (C) 1988, 1989, NeXT, Inc.
31 * File: kern/mach_loader.c
32 * Author: Avadis Tevanian, Jr.
34 * Mach object file loader (kernel version, for now).
36 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
40 #include <sys/param.h>
41 #include <sys/vnode_internal.h>
43 #include <sys/namei.h>
44 #include <sys/proc_internal.h>
45 #include <sys/kauth.h>
47 #include <sys/malloc.h>
48 #include <sys/mount_internal.h>
49 #include <sys/fcntl.h>
50 #include <sys/ubc_internal.h>
51 #include <sys/imgact.h>
53 #include <mach/mach_types.h>
54 #include <mach/vm_map.h> /* vm_allocate() */
55 #include <mach/mach_vm.h> /* mach_vm_allocate() */
56 #include <mach/vm_statistics.h>
57 #include <mach/shared_memory_server.h>
58 #include <mach/task.h>
59 #include <mach/thread_act.h>
61 #include <machine/vmparam.h>
63 #include <kern/kern_types.h>
64 #include <kern/cpu_number.h>
65 #include <kern/mach_loader.h>
66 #include <kern/kalloc.h>
67 #include <kern/task.h>
68 #include <kern/thread.h>
70 #include <mach-o/fat.h>
71 #include <mach-o/loader.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_pager.h>
77 #include <vm/vnode_pager.h>
78 #include <vm/vm_shared_memory_server.h>
79 #include <vm/vm_protos.h>
82 * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
83 * when KERNEL is defined.
85 extern pmap_t
pmap_create(vm_map_size_t size
, boolean_t is_64bit
);
86 extern void pmap_switch(pmap_t
);
87 extern void pmap_map_sharedpage(task_t task
, pmap_t pmap
);
90 * XXX kern/thread.h should not treat these prototypes as MACH_KERNEL_PRIVATE
91 * when KERNEL is defined.
93 extern kern_return_t
thread_setstatus(thread_t thread
, int flavor
,
94 thread_state_t tstate
,
95 mach_msg_type_number_t count
);
97 extern kern_return_t
thread_state_initialize(thread_t thread
);
100 /* XXX should have prototypes in a shared header file */
101 extern int get_map_nentries(vm_map_t
);
102 extern kern_return_t
thread_userstack(thread_t
, int, thread_state_t
,
103 unsigned int, mach_vm_offset_t
*, int *);
104 extern kern_return_t
thread_entrypoint(thread_t
, int, thread_state_t
,
105 unsigned int, mach_vm_offset_t
*);
108 /* An empty load_result_t */
109 static load_result_t load_result_null
= {
120 * Prototypes of static functions.
127 struct mach_header
*header
,
130 boolean_t shared_regions
,
131 boolean_t clean_regions
,
133 load_result_t
*result
138 struct segment_command
*scp
,
144 load_result_t
*result
149 struct segment_command_64
*scp64
,
155 load_result_t
*result
160 struct thread_command
*tcp
,
162 load_result_t
*result
167 struct thread_command
*tcp
,
169 load_result_t
*result
176 unsigned long total_size
183 unsigned long total_size
,
184 mach_vm_offset_t
*user_stack
,
192 unsigned long total_size
,
193 mach_vm_offset_t
*entry_point
198 struct dylinker_command
*lcp
,
203 load_result_t
*result
,
204 boolean_t clean_regions
,
212 struct mach_header
*mach_header
,
220 struct image_params
*imgp
,
221 struct mach_header
*header
,
224 boolean_t clean_regions
,
225 load_result_t
*result
228 struct vnode
*vp
= imgp
->ip_vp
;
229 off_t file_offset
= imgp
->ip_arch_offset
;
230 off_t macho_size
= imgp
->ip_arch_size
;
232 pmap_t pmap
= 0; /* protected by create_map */
235 load_result_t myresult
;
237 boolean_t create_map
= TRUE
;
239 if (new_map
!= VM_MAP_NULL
) {
244 old_map
= current_map();
245 #ifdef NO_NESTED_PMAP
246 pmap
= get_task_pmap(current_task());
247 pmap_reference(pmap
);
248 #else /* NO_NESTED_PMAP */
249 pmap
= pmap_create((vm_map_size_t
) 0, (imgp
->ip_flags
& IMGPF_IS_64BIT
));
250 #endif /* NO_NESTED_PMAP */
251 map
= vm_map_create(pmap
,
253 vm_compute_max_offset((imgp
->ip_flags
& IMGPF_IS_64BIT
)),
258 if ( (header
->flags
& MH_ALLOW_STACK_EXECUTION
) )
259 vm_map_disable_NX(map
);
264 *result
= load_result_null
;
266 lret
= parse_machfile(vp
, map
, thr_act
, header
, file_offset
, macho_size
,
267 ((imgp
->ip_flags
& IMGPF_IS_64BIT
) == 0), /* shared regions? */
268 clean_regions
, 0, result
);
270 if (lret
!= LOAD_SUCCESS
) {
272 vm_map_deallocate(map
); /* will lose pmap reference too */
278 * For 64-bit users, check for presence of a 4GB page zero
279 * which will enable the kernel to share the user's address space
280 * and hence avoid TLB flushes on kernel entry/exit
282 if ((imgp
->ip_flags
& IMGPF_IS_64BIT
) &&
283 vm_map_has_4GB_pagezero(map
))
284 vm_map_set_4GB_pagezero(map
);
287 * Commit to new map. First make sure that the current
288 * users of the task get done with it, and that we clean
289 * up the old contents of IPC and memory. The task is
290 * guaranteed to be single threaded upon return (us).
292 * Swap the new map for the old, which consumes our new map
293 * reference but each leaves us responsible for the old_map reference.
294 * That lets us get off the pmap associated with it, and
295 * then we can release it.
299 task_halt(current_task());
301 old_map
= swap_task_map(current_task(), map
);
302 vm_map_clear_4GB_pagezero(old_map
);
303 #ifndef NO_NESTED_PMAP
304 pmap_switch(pmap
); /* Make sure we are using the new pmap */
305 #endif /* !NO_NESTED_PMAP */
306 vm_map_deallocate(old_map
);
308 return(LOAD_SUCCESS
);
314 * The file size of a mach-o file is limited to 32 bits; this is because
315 * this is the limit on the kalloc() of enough bytes for a mach_header and
316 * the contents of its sizeofcmds, which is currently constrained to 32
317 * bits in the file format itself. We read into the kernel buffer the
318 * commands section, and then parse it in order to parse the mach-o file
319 * format load_command segment(s). We are only interested in a subset of
320 * the total set of possible commands.
328 struct mach_header
*header
,
331 boolean_t shared_regions
,
332 boolean_t clean_regions
,
334 load_result_t
*result
338 struct load_command
*lcp
;
339 struct dylinker_command
*dlp
= 0;
340 integer_t dlarchbits
= 0;
342 load_return_t ret
= LOAD_SUCCESS
;
345 vm_size_t size
,kl_size
;
347 size_t oldoffset
; /* for overflow check */
349 struct proc
*p
= current_proc(); /* XXXX */
353 size_t mach_header_sz
= sizeof(struct mach_header
);
356 if (header
->magic
== MH_MAGIC_64
||
357 header
->magic
== MH_CIGAM_64
) {
358 mach_header_sz
= sizeof(struct mach_header_64
);
362 * Break infinite recursion
365 return(LOAD_FAILURE
);
367 task
= (task_t
)get_threadtask(thr_act
);
372 * Check to see if right machine type.
374 if (((cpu_type_t
)(header
->cputype
& ~CPU_ARCH_MASK
) != cpu_type()) ||
375 !grade_binary(header
->cputype
, header
->cpusubtype
))
376 return(LOAD_BADARCH
);
378 abi64
= ((header
->cputype
& CPU_ARCH_ABI64
) == CPU_ARCH_ABI64
);
380 switch (header
->filetype
) {
386 return (LOAD_FAILURE
);
392 return (LOAD_FAILURE
);
397 return (LOAD_FAILURE
);
401 return (LOAD_FAILURE
);
405 * Get the pager for the file.
407 UBCINFOCHECK("parse_machfile", vp
);
408 pager
= (void *) ubc_getpager(vp
);
411 * Map portion that must be accessible directly into
414 if ((mach_header_sz
+ header
->sizeofcmds
) > macho_size
)
415 return(LOAD_BADMACHO
);
418 * Round size of Mach-O commands up to page boundry.
420 size
= round_page(mach_header_sz
+ header
->sizeofcmds
);
422 return(LOAD_BADMACHO
);
425 * Map the load commands into kernel memory.
429 kl_addr
= kalloc(size
);
430 addr
= (caddr_t
)kl_addr
;
432 return(LOAD_NOSPACE
);
434 error
= vn_rdwr(UIO_READ
, vp
, addr
, size
, file_offset
,
435 UIO_SYSSPACE32
, 0, kauth_cred_get(), &resid
, p
);
438 kfree(kl_addr
, kl_size
);
439 return(LOAD_IOERROR
);
441 /* (void)ubc_map(vp, PROT_EXEC); */ /* NOT HERE */
444 * Scan through the commands, processing each one as necessary.
446 for (pass
= 1; pass
<= 2; pass
++) {
448 * Loop through each of the load_commands indicated by the
449 * Mach-O header; if an absurd value is provided, we just
450 * run off the end of the reserved section by incrementing
451 * the offset too far, so we are implicitly fail-safe.
453 offset
= mach_header_sz
;
454 ncmds
= header
->ncmds
;
457 * Get a pointer to the command.
459 lcp
= (struct load_command
*)(addr
+ offset
);
461 offset
+= lcp
->cmdsize
;
464 * Perform prevalidation of the struct load_command
465 * before we attempt to use its contents. Invalid
466 * values are ones which result in an overflow, or
467 * which can not possibly be valid commands, or which
468 * straddle or exist past the reserved section at the
469 * start of the image.
471 if (oldoffset
> offset
||
472 lcp
->cmdsize
< sizeof(struct load_command
) ||
473 offset
> header
->sizeofcmds
+ mach_header_sz
) {
479 * Act on struct load_command's for which kernel
480 * intervention is required.
486 ret
= load_segment_64(
487 (struct segment_command_64
*)lcp
,
499 (struct segment_command
*) lcp
,
510 ret
= load_thread((struct thread_command
*)lcp
,
517 ret
= load_unixthread(
518 (struct thread_command
*) lcp
,
522 case LC_LOAD_DYLINKER
:
525 if ((depth
== 1) && (dlp
== 0)) {
526 dlp
= (struct dylinker_command
*)lcp
;
527 dlarchbits
= (header
->cputype
& CPU_ARCH_MASK
);
533 /* Other commands are ignored by the kernel */
537 if (ret
!= LOAD_SUCCESS
)
540 if (ret
!= LOAD_SUCCESS
)
543 if (ret
== LOAD_SUCCESS
) {
545 if (shared_regions
) {
547 shared_region_mapping_t shared_region
;
548 struct shared_region_task_mappings map_info
;
549 shared_region_mapping_t next
;
552 vm_get_shared_region(task
, &shared_region
);
553 map_info
.self
= (vm_offset_t
)shared_region
;
554 shared_region_mapping_info(shared_region
,
555 &(map_info
.text_region
),
556 &(map_info
.text_size
),
557 &(map_info
.data_region
),
558 &(map_info
.data_size
),
559 &(map_info
.region_mappings
),
560 &(map_info
.client_base
),
561 &(map_info
.alternate_base
),
562 &(map_info
.alternate_next
),
565 &(map_info
.flags
), &next
);
567 if((map_info
.flags
& SHARED_REGION_FULL
) ||
568 (map_info
.flags
& SHARED_REGION_STALE
)) {
569 shared_region_mapping_t system_region
;
570 system_region
= lookup_default_shared_region(
571 map_info
.fs_base
, map_info
.system
);
572 if((map_info
.self
!= (vm_offset_t
)system_region
) &&
573 (map_info
.flags
& SHARED_REGION_SYSTEM
)) {
574 if(system_region
== NULL
) {
575 shared_file_boot_time_init(
576 map_info
.fs_base
, map_info
.system
);
578 vm_set_shared_region(task
, system_region
);
580 shared_region_mapping_dealloc(
581 (shared_region_mapping_t
)map_info
.self
);
583 } else if (map_info
.flags
& SHARED_REGION_SYSTEM
) {
584 shared_region_mapping_dealloc(system_region
);
585 shared_file_boot_time_init(
586 map_info
.fs_base
, map_info
.system
);
587 shared_region_mapping_dealloc(
588 (shared_region_mapping_t
)map_info
.self
);
590 shared_region_mapping_dealloc(system_region
);
595 p
->p_flag
|= P_NOSHLIB
; /* no shlibs in use */
596 vmaddr
= map_info
.client_base
;
598 vm_map(map
, &vmaddr
, map_info
.text_size
,
599 0, SHARED_LIB_ALIAS
|VM_FLAGS_FIXED
,
600 map_info
.text_region
, 0, FALSE
,
601 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
603 vm_map(map
, &vmaddr
, map_info
.text_size
, 0,
604 (VM_MEMORY_SHARED_PMAP
<< 24)
605 | SHARED_LIB_ALIAS
| VM_FLAGS_FIXED
,
606 map_info
.text_region
, 0, FALSE
,
607 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
609 vmaddr
= map_info
.client_base
+ map_info
.text_size
;
610 vm_map(map
, &vmaddr
, map_info
.data_size
,
611 0, SHARED_LIB_ALIAS
| VM_FLAGS_FIXED
,
612 map_info
.data_region
, 0, TRUE
,
613 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
616 /* this should be fleshed out for the general case */
617 /* but this is not necessary for now. Indeed we */
618 /* are handling the com page inside of the */
619 /* shared_region mapping create calls for now for */
620 /* simplicities sake. If more general support is */
621 /* needed the code to manipulate the shared range */
622 /* chain can be pulled out and moved to the callers*/
623 shared_region_mapping_info(next
,
624 &(map_info
.text_region
),
625 &(map_info
.text_size
),
626 &(map_info
.data_region
),
627 &(map_info
.data_size
),
628 &(map_info
.region_mappings
),
629 &(map_info
.client_base
),
630 &(map_info
.alternate_base
),
631 &(map_info
.alternate_next
),
634 &(map_info
.flags
), &next
);
636 vmaddr
= map_info
.client_base
;
637 vm_map(map
, &vmaddr
, map_info
.text_size
,
638 0, SHARED_LIB_ALIAS
| VM_FLAGS_FIXED
,
639 map_info
.text_region
, 0, FALSE
,
640 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
645 ret
= load_dylinker(dlp
, dlarchbits
, map
, thr_act
, depth
, result
, clean_regions
, abi64
);
648 if (result
->thread_count
== 0)
651 /* Map in 64-bit commpage */
652 /* LP64todo - make this clean */
653 pmap_map_sharedpage(current_task(), get_map_pmap(map
));
654 vm_map_commpage64(map
);
658 * On Intel, the comm page doesn't get mapped
659 * automatically because it goes beyond the current end
660 * of the VM map in the current 3GB/1GB address space
662 * XXX This will probably become unnecessary when we
663 * switch to the 4GB/4GB address space model.
665 vm_map_commpage32(map
);
666 #endif /* __i386__ */
672 kfree(kl_addr
, kl_size
);
674 if (ret
== LOAD_SUCCESS
)
675 (void)ubc_map(vp
, PROT_EXEC
);
680 #ifndef SG_PROTECTED_VERSION_1
681 #define SG_PROTECTED_VERSION_1 0x8
682 #endif /* SG_PROTECTED_VERSION_1 */
686 #define APPLE_UNPROTECTED_HEADER_SIZE (3 * PAGE_SIZE_64)
689 unprotect_segment_64(
693 vm_map_offset_t map_addr
,
694 vm_map_size_t map_size
)
699 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
700 * this part of a Universal binary) are not protected...
701 * The rest needs to be "transformed".
703 if (file_off
<= APPLE_UNPROTECTED_HEADER_SIZE
&&
704 file_off
+ file_size
<= APPLE_UNPROTECTED_HEADER_SIZE
) {
705 /* it's all unprotected, nothing to do... */
708 if (file_off
<= APPLE_UNPROTECTED_HEADER_SIZE
) {
710 * We start mapping in the unprotected area.
711 * Skip the unprotected part...
713 vm_map_offset_t delta
;
715 delta
= APPLE_UNPROTECTED_HEADER_SIZE
;
720 /* ... transform the rest of the mapping. */
721 kr
= vm_map_apple_protected(map
,
723 map_addr
+ map_size
);
726 if (kr
!= KERN_SUCCESS
) {
732 #define unprotect_segment_64(file_off, file_size, map, map_addr, map_size) \
734 #endif /* __i386__ */
739 struct segment_command
*scp
,
743 __unused off_t end_of_file
,
745 load_result_t
*result
749 vm_offset_t map_addr
, map_offset
;
750 vm_size_t map_size
, seg_size
, delta_size
;
755 * Make sure what we get from the file is really ours (as specified
758 if (scp
->fileoff
+ scp
->filesize
> macho_size
)
759 return (LOAD_BADMACHO
);
761 seg_size
= round_page(scp
->vmsize
);
763 return(KERN_SUCCESS
);
766 * Round sizes to page size.
768 map_size
= round_page(scp
->filesize
);
769 map_addr
= trunc_page(scp
->vmaddr
);
771 #if 0 /* XXX (4596982) this interferes with Rosetta */
775 (scp
->initprot
& VM_PROT_ALL
) == VM_PROT_NONE
&&
776 (scp
->maxprot
& VM_PROT_ALL
) == VM_PROT_NONE
) {
778 * This is a "page zero" segment: it starts at address 0,
779 * is not mapped from the binary file and is not accessible.
780 * User-space should never be able to access that memory, so
781 * make it completely off limits by raising the VM map's
784 ret
= vm_map_raise_min_offset(map
, (vm_map_offset_t
) seg_size
);
785 if (ret
!= KERN_SUCCESS
) {
792 map_offset
= pager_offset
+ scp
->fileoff
;
795 initprot
= (scp
->initprot
) & VM_PROT_ALL
;
796 maxprot
= (scp
->maxprot
) & VM_PROT_ALL
;
798 * Map a copy of the file into the address space.
801 &map_addr
, map_size
, (vm_offset_t
)0,
802 VM_FLAGS_FIXED
, pager
, map_offset
, TRUE
,
805 if (ret
!= KERN_SUCCESS
)
806 return(LOAD_NOSPACE
);
809 * If the file didn't end on a page boundary,
810 * we need to zero the leftover.
812 delta_size
= map_size
- scp
->filesize
;
814 if (delta_size
> 0) {
817 ret
= vm_allocate(kernel_map
, &tmp
, delta_size
, VM_FLAGS_ANYWHERE
);
818 if (ret
!= KERN_SUCCESS
)
819 return(LOAD_RESOURCE
);
821 if (copyout(tmp
, map_addr
+ scp
->filesize
,
823 (void) vm_deallocate(
824 kernel_map
, tmp
, delta_size
);
825 return(LOAD_FAILURE
);
828 (void) vm_deallocate(kernel_map
, tmp
, delta_size
);
834 * If the virtual size of the segment is greater
835 * than the size from the file, we need to allocate
836 * zero fill memory for the rest.
838 delta_size
= seg_size
- map_size
;
839 if (delta_size
> 0) {
840 vm_offset_t tmp
= map_addr
+ map_size
;
842 ret
= vm_map(map
, &tmp
, delta_size
, 0, VM_FLAGS_FIXED
,
844 scp
->initprot
, scp
->maxprot
,
846 if (ret
!= KERN_SUCCESS
)
847 return(LOAD_NOSPACE
);
850 if ( (scp
->fileoff
== 0) && (scp
->filesize
!= 0) )
851 result
->mach_header
= map_addr
;
853 if (scp
->flags
& SG_PROTECTED_VERSION_1
) {
854 ret
= unprotect_segment_64((uint64_t) scp
->fileoff
,
855 (uint64_t) scp
->filesize
,
857 (vm_map_offset_t
) map_addr
,
858 (vm_map_size_t
) map_size
);
869 struct segment_command_64
*scp64
,
873 __unused off_t end_of_file
,
875 load_result_t
*result
879 mach_vm_offset_t map_addr
, map_offset
;
880 mach_vm_size_t map_size
, seg_size
, delta_size
;
885 * Make sure what we get from the file is really ours (as specified
888 if (scp64
->fileoff
+ scp64
->filesize
> (uint64_t)macho_size
)
889 return (LOAD_BADMACHO
);
891 seg_size
= round_page_64(scp64
->vmsize
);
893 return(KERN_SUCCESS
);
896 * Round sizes to page size.
898 map_size
= round_page_64(scp64
->filesize
); /* limited to 32 bits */
899 map_addr
= round_page_64(scp64
->vmaddr
);
904 (scp64
->initprot
& VM_PROT_ALL
) == VM_PROT_NONE
&&
905 (scp64
->maxprot
& VM_PROT_ALL
) == VM_PROT_NONE
) {
907 * This is a "page zero" segment: it starts at address 0,
908 * is not mapped from the binary file and is not accessible.
909 * User-space should never be able to access that memory, so
910 * make it completely off limits by raising the VM map's
913 ret
= vm_map_raise_min_offset(map
, seg_size
);
914 if (ret
!= KERN_SUCCESS
) {
920 map_offset
= pager_offset
+ scp64
->fileoff
; /* limited to 32 bits */
923 initprot
= (scp64
->initprot
) & VM_PROT_ALL
;
924 maxprot
= (scp64
->maxprot
) & VM_PROT_ALL
;
926 * Map a copy of the file into the address space.
928 ret
= mach_vm_map(map
,
929 &map_addr
, map_size
, (mach_vm_offset_t
)0,
930 VM_FLAGS_FIXED
, pager
, map_offset
, TRUE
,
933 if (ret
!= KERN_SUCCESS
)
934 return(LOAD_NOSPACE
);
937 * If the file didn't end on a page boundary,
938 * we need to zero the leftover.
940 delta_size
= map_size
- scp64
->filesize
;
942 if (delta_size
> 0) {
943 mach_vm_offset_t tmp
;
945 ret
= vm_allocate(kernel_map
, &tmp
, delta_size
, VM_FLAGS_ANYWHERE
);
946 if (ret
!= KERN_SUCCESS
)
947 return(LOAD_RESOURCE
);
949 if (copyout(tmp
, map_addr
+ scp64
->filesize
,
951 (void) vm_deallocate(
952 kernel_map
, tmp
, delta_size
);
953 return (LOAD_FAILURE
);
956 (void) vm_deallocate(kernel_map
, tmp
, delta_size
);
962 * If the virtual size of the segment is greater
963 * than the size from the file, we need to allocate
964 * zero fill memory for the rest.
966 delta_size
= seg_size
- map_size
;
967 if (delta_size
> 0) {
968 mach_vm_offset_t tmp
= map_addr
+ map_size
;
970 ret
= mach_vm_map(map
, &tmp
, delta_size
, 0, VM_FLAGS_FIXED
,
972 scp64
->initprot
, scp64
->maxprot
,
974 if (ret
!= KERN_SUCCESS
)
975 return(LOAD_NOSPACE
);
978 if ( (scp64
->fileoff
== 0) && (scp64
->filesize
!= 0) )
979 result
->mach_header
= map_addr
;
981 if (scp64
->flags
& SG_PROTECTED_VERSION_1
) {
982 ret
= unprotect_segment_64(scp64
->fileoff
,
997 struct thread_command
*tcp
,
999 load_result_t
*result
1007 task
= get_threadtask(thread
);
1009 /* if count is 0; same as thr_act */
1010 if (result
->thread_count
!= 0) {
1011 kret
= thread_create(task
, &thread
);
1012 if (kret
!= KERN_SUCCESS
)
1013 return(LOAD_RESOURCE
);
1014 thread_deallocate(thread
);
1017 lret
= load_threadstate(thread
,
1018 (unsigned long *)(((vm_offset_t
)tcp
) +
1019 sizeof(struct thread_command
)),
1020 tcp
->cmdsize
- sizeof(struct thread_command
));
1021 if (lret
!= LOAD_SUCCESS
)
1024 if (result
->thread_count
== 0) {
1025 lret
= load_threadstack(thread
,
1026 (unsigned long *)(((vm_offset_t
)tcp
) +
1027 sizeof(struct thread_command
)),
1028 tcp
->cmdsize
- sizeof(struct thread_command
),
1029 &result
->user_stack
,
1032 result
->customstack
= 1;
1034 result
->customstack
= 0;
1036 if (lret
!= LOAD_SUCCESS
)
1039 lret
= load_threadentry(thread
,
1040 (unsigned long *)(((vm_offset_t
)tcp
) +
1041 sizeof(struct thread_command
)),
1042 tcp
->cmdsize
- sizeof(struct thread_command
),
1043 &result
->entry_point
);
1044 if (lret
!= LOAD_SUCCESS
)
1048 * Resume thread now, note that this means that the thread
1049 * commands should appear after all the load commands to
1050 * be sure they don't reference anything not yet mapped.
1053 thread_resume(thread
);
1055 result
->thread_count
++;
1057 return(LOAD_SUCCESS
);
1063 struct thread_command
*tcp
,
1065 load_result_t
*result
1071 if (result
->thread_count
!= 0)
1072 return (LOAD_FAILURE
);
1074 ret
= load_threadstack(thread
,
1075 (unsigned long *)(((vm_offset_t
)tcp
) +
1076 sizeof(struct thread_command
)),
1077 tcp
->cmdsize
- sizeof(struct thread_command
),
1078 &result
->user_stack
,
1080 if (ret
!= LOAD_SUCCESS
)
1084 result
->customstack
= 1;
1086 result
->customstack
= 0;
1087 ret
= load_threadentry(thread
,
1088 (unsigned long *)(((vm_offset_t
)tcp
) +
1089 sizeof(struct thread_command
)),
1090 tcp
->cmdsize
- sizeof(struct thread_command
),
1091 &result
->entry_point
);
1092 if (ret
!= LOAD_SUCCESS
)
1095 ret
= load_threadstate(thread
,
1096 (unsigned long *)(((vm_offset_t
)tcp
) +
1097 sizeof(struct thread_command
)),
1098 tcp
->cmdsize
- sizeof(struct thread_command
));
1099 if (ret
!= LOAD_SUCCESS
)
1102 result
->unixproc
= TRUE
;
1103 result
->thread_count
++;
1105 return(LOAD_SUCCESS
);
1113 unsigned long total_size
1119 unsigned long thread_size
;
1121 ret
= thread_state_initialize( thread
);
1122 if (ret
!= KERN_SUCCESS
)
1123 return(LOAD_FAILURE
);
1126 * Set the new thread state; iterate through the state flavors in
1129 while (total_size
> 0) {
1132 thread_size
= (size
+2)*sizeof(unsigned long);
1133 if (thread_size
> total_size
)
1134 return(LOAD_BADMACHO
);
1135 total_size
-= thread_size
;
1137 * Third argument is a kernel space pointer; it gets cast
1138 * to the appropriate type in machine_thread_set_state()
1139 * based on the value of flavor.
1141 ret
= thread_setstatus(thread
, flavor
, (thread_state_t
)ts
, size
);
1142 if (ret
!= KERN_SUCCESS
)
1143 return(LOAD_FAILURE
);
1144 ts
+= size
; /* ts is a (unsigned long *) */
1146 return(LOAD_SUCCESS
);
1154 unsigned long total_size
,
1155 user_addr_t
*user_stack
,
1162 unsigned long stack_size
;
1164 while (total_size
> 0) {
1167 stack_size
= (size
+2)*sizeof(unsigned long);
1168 if (stack_size
> total_size
)
1169 return(LOAD_BADMACHO
);
1170 total_size
-= stack_size
;
1173 * Third argument is a kernel space pointer; it gets cast
1174 * to the appropriate type in thread_userstack() based on
1175 * the value of flavor.
1177 ret
= thread_userstack(thread
, flavor
, (thread_state_t
)ts
, size
, user_stack
, customstack
);
1178 if (ret
!= KERN_SUCCESS
)
1179 return(LOAD_FAILURE
);
1180 ts
+= size
; /* ts is a (unsigned long *) */
1182 return(LOAD_SUCCESS
);
1190 unsigned long total_size
,
1191 mach_vm_offset_t
*entry_point
1197 unsigned long entry_size
;
1200 * Set the thread state.
1202 *entry_point
= MACH_VM_MIN_ADDRESS
;
1203 while (total_size
> 0) {
1206 entry_size
= (size
+2)*sizeof(unsigned long);
1207 if (entry_size
> total_size
)
1208 return(LOAD_BADMACHO
);
1209 total_size
-= entry_size
;
1211 * Third argument is a kernel space pointer; it gets cast
1212 * to the appropriate type in thread_entrypoint() based on
1213 * the value of flavor.
1215 ret
= thread_entrypoint(thread
, flavor
, (thread_state_t
)ts
, size
, entry_point
);
1216 if (ret
!= KERN_SUCCESS
)
1217 return(LOAD_FAILURE
);
1218 ts
+= size
; /* ts is a (unsigned long *) */
1220 return(LOAD_SUCCESS
);
1227 struct dylinker_command
*lcp
,
1232 load_result_t
*result
,
1233 boolean_t clean_regions
,
1240 struct mach_header header
;
1244 load_result_t myresult
;
1247 mach_vm_offset_t dyl_start
, map_addr
;
1248 mach_vm_size_t dyl_length
;
1250 name
= (char *)lcp
+ lcp
->name
.offset
;
1252 * Check for a proper null terminated string.
1256 if (p
>= (char *)lcp
+ lcp
->cmdsize
)
1257 return(LOAD_BADMACHO
);
1260 ret
= get_macho_vnode(name
, archbits
, &header
, &file_offset
, &macho_size
, &vp
);
1266 * Use a temporary map to do the work.
1268 copy_map
= vm_map_create(pmap_create(vm_map_round_page(macho_size
),
1270 get_map_min(map
), get_map_max(map
), TRUE
);
1271 if (VM_MAP_NULL
== copy_map
) {
1272 ret
= LOAD_RESOURCE
;
1276 myresult
= load_result_null
;
1278 ret
= parse_machfile(vp
, copy_map
, thr_act
, &header
,
1279 file_offset
, macho_size
,
1280 FALSE
, clean_regions
, depth
, &myresult
);
1285 if (get_map_nentries(copy_map
) > 0) {
1287 dyl_start
= mach_get_vm_start(copy_map
);
1288 dyl_length
= mach_get_vm_end(copy_map
) - dyl_start
;
1290 map_addr
= dyl_start
;
1291 ret
= mach_vm_allocate(map
, &map_addr
, dyl_length
, VM_FLAGS_FIXED
);
1292 if (ret
!= KERN_SUCCESS
) {
1293 ret
= mach_vm_allocate(map
, &map_addr
, dyl_length
, VM_FLAGS_ANYWHERE
);
1296 if (ret
!= KERN_SUCCESS
) {
1301 ret
= vm_map_copyin(copy_map
,
1302 (vm_map_address_t
)dyl_start
,
1303 (vm_map_size_t
)dyl_length
,
1305 if (ret
!= KERN_SUCCESS
) {
1306 (void) vm_map_remove(map
,
1307 vm_map_trunc_page(map_addr
),
1308 vm_map_round_page(map_addr
+ dyl_length
),
1313 ret
= vm_map_copy_overwrite(map
,
1314 (vm_map_address_t
)map_addr
,
1316 if (ret
!= KERN_SUCCESS
) {
1317 vm_map_copy_discard(tmp
);
1318 (void) vm_map_remove(map
,
1319 vm_map_trunc_page(map_addr
),
1320 vm_map_round_page(map_addr
+ dyl_length
),
1325 if (map_addr
!= dyl_start
)
1326 myresult
.entry_point
+= (map_addr
- dyl_start
);
1330 if (ret
== LOAD_SUCCESS
) {
1331 result
->dynlinker
= TRUE
;
1332 result
->entry_point
= myresult
.entry_point
;
1333 (void)ubc_map(vp
, PROT_EXEC
);
1336 vm_map_deallocate(copy_map
);
1344 * This routine exists to support the load_dylinker().
1346 * This routine has its own, separate, understanding of the FAT file format,
1347 * which is terrifically unfortunate.
1354 struct mach_header
*mach_header
,
1361 struct vfs_context context
;
1362 struct nameidata nid
, *ndp
;
1363 struct proc
*p
= current_proc(); /* XXXX */
1365 struct fat_arch fat_arch
;
1366 int error
= LOAD_SUCCESS
;
1369 struct mach_header mach_header
;
1370 struct fat_header fat_header
;
1373 off_t fsize
= (off_t
)0;
1374 struct ucred
*cred
= kauth_cred_get();
1377 context
.vc_proc
= p
;
1378 context
.vc_ucred
= cred
;
1382 /* init the namei data to point the file user's program name */
1383 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE32
, CAST_USER_ADDR_T(path
), &context
);
1385 if ((error
= namei(ndp
)) != 0) {
1386 if (error
== ENOENT
)
1387 error
= LOAD_ENOENT
;
1389 error
= LOAD_FAILURE
;
1395 /* check for regular file */
1396 if (vp
->v_type
!= VREG
) {
1397 error
= LOAD_PROTECT
;
1402 if ((error
= vnode_size(vp
, &fsize
, &context
)) != 0) {
1403 error
= LOAD_FAILURE
;
1407 /* Check mount point */
1408 if (vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) {
1409 error
= LOAD_PROTECT
;
1414 if ((error
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_EXECUTE
, &context
)) != 0) {
1415 error
= LOAD_PROTECT
;
1419 /* try to open it */
1420 if ((error
= VNOP_OPEN(vp
, FREAD
, &context
)) != 0) {
1421 error
= LOAD_PROTECT
;
1425 if ((error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
, sizeof(header
), 0,
1426 UIO_SYSSPACE32
, IO_NODELOCKED
, cred
, &resid
, p
)) != 0) {
1427 error
= LOAD_IOERROR
;
1431 if (header
.mach_header
.magic
== MH_MAGIC
||
1432 header
.mach_header
.magic
== MH_MAGIC_64
)
1434 else if (header
.fat_header
.magic
== FAT_MAGIC
||
1435 header
.fat_header
.magic
== FAT_CIGAM
)
1438 error
= LOAD_BADMACHO
;
1443 /* Look up our architecture in the fat file. */
1444 error
= fatfile_getarch_with_bits(vp
, archbits
, (vm_offset_t
)(&header
.fat_header
), &fat_arch
);
1445 if (error
!= LOAD_SUCCESS
)
1448 /* Read the Mach-O header out of it */
1449 error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
.mach_header
,
1450 sizeof(header
.mach_header
), fat_arch
.offset
,
1451 UIO_SYSSPACE32
, IO_NODELOCKED
, cred
, &resid
, p
);
1453 error
= LOAD_IOERROR
;
1457 /* Is this really a Mach-O? */
1458 if (header
.mach_header
.magic
!= MH_MAGIC
&&
1459 header
.mach_header
.magic
!= MH_MAGIC_64
) {
1460 error
= LOAD_BADMACHO
;
1464 *file_offset
= fat_arch
.offset
;
1465 *macho_size
= fsize
= fat_arch
.size
;
1468 * Force get_macho_vnode() to fail if the architecture bits
1469 * do not match the expected architecture bits. This in
1470 * turn causes load_dylinker() to fail for the same reason,
1471 * so it ensures the dynamic linker and the binary are in
1472 * lock-step. This is potentially bad, if we ever add to
1473 * the CPU_ARCH_* bits any bits that are desirable but not
1474 * required, since the dynamic linker might work, but we will
1475 * refuse to load it because of this check.
1477 if ((cpu_type_t
)(header
.mach_header
.cputype
& CPU_ARCH_MASK
) != archbits
)
1478 return(LOAD_BADARCH
);
1481 *macho_size
= fsize
;
1484 *mach_header
= header
.mach_header
;
1487 ubc_setsize(vp
, fsize
);
1492 err2
= VNOP_CLOSE(vp
, FREAD
, &context
);