2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (C) 1988, 1989, NeXT, Inc.
25 * File: kern/mach_loader.c
26 * Author: Avadis Tevanian, Jr.
28 * Mach object file loader (kernel version, for now).
30 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
33 #include <sys/param.h>
34 #include <sys/vnode.h>
36 #include <sys/namei.h>
39 #include <sys/malloc.h>
40 #include <sys/mount.h>
41 #include <sys/fcntl.h>
44 #include <mach/mach_types.h>
46 #include <kern/mach_loader.h>
48 #include <mach-o/fat.h>
49 #include <mach-o/loader.h>
51 #include <kern/cpu_number.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_pager.h>
56 #include <vm/vnode_pager.h>
57 #include <mach/vm_statistics.h>
59 #include <mach/shared_memory_server.h>
60 #include <vm/vm_shared_memory_server.h>
62 #include <machine/vmparam.h>
65 * Prototypes of static functions.
73 struct mach_header
*header
,
74 unsigned long file_offset
,
75 unsigned long macho_size
,
80 struct segment_command
*scp
,
82 unsigned long pager_offset
,
83 unsigned long macho_size
,
84 unsigned long end_of_file
,
89 struct thread_command
*tcp
,
94 struct thread_command
*tcp
,
101 unsigned long total_size
106 unsigned long total_size
,
107 vm_offset_t
*user_stack
,
113 unsigned long total_size
,
114 vm_offset_t
*entry_point
117 struct dylinker_command
*lcp
,
119 thread_act_t thr_act
,
121 load_result_t
*result
125 struct mach_header
*mach_header
,
126 unsigned long *file_offset
,
127 unsigned long *macho_size
,
134 struct mach_header
*header
,
135 unsigned long file_offset
,
136 unsigned long macho_size
,
137 load_result_t
*result
,
138 thread_act_t thr_act
,
145 load_result_t myresult
;
148 boolean_t create_map
= TRUE
;
150 if (new_map
!= VM_MAP_NULL
) {
155 old_map
= current_map();
157 pmap
= get_task_pmap(current_task());
158 pmap_reference(pmap
);
160 pmap
= pmap_create((vm_size_t
) 0);
162 map
= vm_map_create(pmap
,
163 get_map_min(old_map
),
164 get_map_max(old_map
),
165 TRUE
); /**** FIXME ****/
172 *result
= (load_result_t
) { 0 };
174 lret
= parse_machfile(vp
, map
, thr_act
, header
, file_offset
, macho_size
,
177 if (lret
!= LOAD_SUCCESS
) {
179 vm_map_deallocate(map
); /* will lose pmap reference too */
183 * Commit to new map. First make sure that the current
184 * users of the task get done with it, and that we clean
185 * up the old contents of IPC and memory. The task is
186 * guaranteed to be single threaded upon return (us).
188 * Swap the new map for the old at the task level and at
189 * our activation. The latter consumes our new map reference
190 * but each leaves us responsible for the old_map reference.
191 * That lets us get off the pmap associated with it, and
192 * then we can release it.
195 task_halt(current_task());
197 old_map
= swap_task_map(current_task(), map
);
198 vm_map_deallocate(old_map
);
200 old_map
= swap_act_map(current_act(), map
);
203 pmap_switch(pmap
); /* Make sure we are using the new pmap */
205 vm_map_deallocate(old_map
);
207 return(LOAD_SUCCESS
);
211 extern vm_offset_t system_shared_region
;
218 thread_act_t thr_act
,
219 struct mach_header
*header
,
220 unsigned long file_offset
,
221 unsigned long macho_size
,
223 load_result_t
*result
226 struct machine_slot
*ms
;
228 struct load_command
*lcp
, *next
;
229 struct dylinker_command
*dlp
= 0;
232 vm_offset_t addr
, kl_addr
;
233 vm_size_t size
,kl_size
;
236 struct proc
*p
= current_proc(); /* XXXX */
242 * Break infinite recursion
245 return(LOAD_FAILURE
);
247 task
= (task_t
)get_threadtask(thr_act
);
252 * Check to see if right machine type.
254 ms
= &machine_slot
[cpu_number()];
255 if ((header
->cputype
!= ms
->cpu_type
) ||
256 !check_cpu_subtype(header
->cpusubtype
))
257 return(LOAD_BADARCH
);
259 switch (header
->filetype
) {
265 return (LOAD_FAILURE
);
271 return (LOAD_FAILURE
);
276 return (LOAD_FAILURE
);
280 return (LOAD_FAILURE
);
284 * Get the pager for the file.
286 UBCINFOCHECK("parse_machfile", vp
);
287 pager
= (void *) ubc_getpager(vp
);
290 * Map portion that must be accessible directly into
293 if ((sizeof (struct mach_header
) + header
->sizeofcmds
) > macho_size
)
294 return(LOAD_BADMACHO
);
297 * Round size of Mach-O commands up to page boundry.
299 size
= round_page(sizeof (struct mach_header
) + header
->sizeofcmds
);
301 return(LOAD_BADMACHO
);
304 * Map the load commands into kernel memory.
308 kl_addr
= kalloc(size
);
311 return(LOAD_NOSPACE
);
313 if(error
= vn_rdwr(UIO_READ
, vp
, addr
, size
, file_offset
,
314 UIO_SYSSPACE
, 0, p
->p_ucred
, &resid
, p
)) {
316 kfree(kl_addr
, kl_size
);
319 /* ubc_map(vp); */ /* NOT HERE */
322 * Scan through the commands, processing each one as necessary.
324 for (pass
= 1; pass
<= 2; pass
++) {
325 offset
= sizeof(struct mach_header
);
326 ncmds
= header
->ncmds
;
329 * Get a pointer to the command.
331 lcp
= (struct load_command
*)(addr
+ offset
);
332 offset
+= lcp
->cmdsize
;
335 * Check for valid lcp pointer by checking
338 if (offset
> header
->sizeofcmds
339 + sizeof(struct mach_header
)) {
341 kfree(kl_addr
, kl_size
);
342 return(LOAD_BADMACHO
);
346 * Check for valid command.
353 (struct segment_command
*) lcp
,
356 (unsigned long)ubc_getsize(vp
),
363 ret
= load_thread((struct thread_command
*)lcp
, thr_act
,
369 ret
= load_unixthread(
370 (struct thread_command
*) lcp
, thr_act
,
373 case LC_LOAD_DYLINKER
:
376 if (depth
== 1 || dlp
== 0)
377 dlp
= (struct dylinker_command
*)lcp
;
382 ret
= KERN_SUCCESS
;/* ignore other stuff */
384 if (ret
!= LOAD_SUCCESS
)
387 if (ret
!= LOAD_SUCCESS
)
390 if (ret
== LOAD_SUCCESS
&& dlp
!= 0) {
392 shared_region_mapping_t shared_region
;
393 struct shared_region_task_mappings map_info
;
394 shared_region_mapping_t next
;
397 vm_get_shared_region(task
, &shared_region
);
398 map_info
.self
= (vm_offset_t
)shared_region
;
399 shared_region_mapping_info(shared_region
,
400 &(map_info
.text_region
),
401 &(map_info
.text_size
),
402 &(map_info
.data_region
),
403 &(map_info
.data_size
),
404 &(map_info
.region_mappings
),
405 &(map_info
.client_base
),
406 &(map_info
.alternate_base
),
407 &(map_info
.alternate_next
),
408 &(map_info
.flags
), &next
);
410 if((map_info
.self
!= (vm_offset_t
)system_shared_region
) &&
411 (map_info
.flags
& SHARED_REGION_SYSTEM
)) {
412 shared_region_mapping_ref(system_shared_region
);
413 vm_set_shared_region(task
, system_shared_region
);
414 shared_region_mapping_dealloc(
415 (shared_region_mapping_t
)map_info
.self
);
421 p
->p_flag
|= P_NOSHLIB
; /* no shlibs in use */
422 addr
= map_info
.client_base
;
423 vm_map(map
, &addr
, map_info
.text_size
, 0,
424 (VM_MEMORY_SHARED_PMAP
<< 24)
426 map_info
.text_region
, 0, FALSE
,
427 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
428 addr
= map_info
.client_base
+ map_info
.text_size
;
429 vm_map(map
, &addr
, map_info
.data_size
,
431 map_info
.data_region
, 0, TRUE
,
432 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
434 ret
= load_dylinker(dlp
, map
, thr_act
, depth
, result
);
438 kfree(kl_addr
, kl_size
);
440 if ((ret
== LOAD_SUCCESS
) && (depth
== 1) &&
441 (result
->thread_count
== 0))
443 if (ret
== LOAD_SUCCESS
)
452 struct segment_command
*scp
,
454 unsigned long pager_offset
,
455 unsigned long macho_size
,
456 unsigned long end_of_file
,
458 load_result_t
*result
462 vm_offset_t map_addr
, map_offset
;
463 vm_size_t map_size
, seg_size
, delta_size
;
468 extern int print_map_addr
;
472 * Make sure what we get from the file is really ours (as specified
475 if (scp
->fileoff
+ scp
->filesize
> macho_size
)
476 return (LOAD_BADMACHO
);
478 seg_size
= round_page(scp
->vmsize
);
480 return(KERN_SUCCESS
);
483 * Round sizes to page size.
485 map_size
= round_page(scp
->filesize
);
486 map_addr
= trunc_page(scp
->vmaddr
);
488 map_offset
= pager_offset
+ scp
->fileoff
;
491 initprot
= (scp
->initprot
) & VM_PROT_ALL
;
492 maxprot
= (scp
->maxprot
) & VM_PROT_ALL
;
494 * Map a copy of the file into the address space.
497 &map_addr
, map_size
, (vm_offset_t
)0, FALSE
,
498 pager
, map_offset
, TRUE
,
501 if (ret
!= KERN_SUCCESS
)
502 return(LOAD_NOSPACE
);
506 printf("LSegment: Mapped addr= %x; size = %x\n", map_addr
, map_size
);
509 * If the file didn't end on a page boundary,
510 * we need to zero the leftover.
512 delta_size
= map_size
- scp
->filesize
;
514 if (delta_size
> 0) {
517 ret
= vm_allocate(kernel_map
, &tmp
, delta_size
, TRUE
);
518 if (ret
!= KERN_SUCCESS
)
519 return(LOAD_RESOURCE
);
521 if (copyout(tmp
, map_addr
+ scp
->filesize
,
523 (void) vm_deallocate(
524 kernel_map
, tmp
, delta_size
);
525 return(LOAD_FAILURE
);
528 (void) vm_deallocate(kernel_map
, tmp
, delta_size
);
534 * If the virtual size of the segment is greater
535 * than the size from the file, we need to allocate
536 * zero fill memory for the rest.
538 delta_size
= seg_size
- map_size
;
539 if (delta_size
> 0) {
540 vm_offset_t tmp
= map_addr
+ map_size
;
542 ret
= vm_allocate(map
, &tmp
, delta_size
, FALSE
);
543 if (ret
!= KERN_SUCCESS
)
544 return(LOAD_NOSPACE
);
548 * Set protection values. (Note: ignore errors!)
551 if (scp
->maxprot
!= VM_PROT_DEFAULT
) {
552 (void) vm_protect(map
,
556 if (scp
->initprot
!= VM_PROT_DEFAULT
) {
557 (void) vm_protect(map
,
559 FALSE
, scp
->initprot
);
561 if ( (scp
->fileoff
== 0) && (scp
->filesize
!= 0) )
562 result
->mach_header
= map_addr
;
563 return(LOAD_SUCCESS
);
569 struct thread_command
*tcp
,
570 thread_act_t thr_act
,
571 load_result_t
*result
574 thread_t thread
= current_thread();
578 if (result
->thread_count
!= 0)
579 return (LOAD_FAILURE
);
581 thread
= getshuttle_thread(thr_act
);
582 ret
= load_threadstack(thread
,
583 (unsigned long *)(((vm_offset_t
)tcp
) +
584 sizeof(struct thread_command
)),
585 tcp
->cmdsize
- sizeof(struct thread_command
),
588 if (ret
!= LOAD_SUCCESS
)
592 result
->customstack
= 1;
594 result
->customstack
= 0;
595 ret
= load_threadentry(thread
,
596 (unsigned long *)(((vm_offset_t
)tcp
) +
597 sizeof(struct thread_command
)),
598 tcp
->cmdsize
- sizeof(struct thread_command
),
599 &result
->entry_point
);
600 if (ret
!= LOAD_SUCCESS
)
603 ret
= load_threadstate(thread
,
604 (unsigned long *)(((vm_offset_t
)tcp
) +
605 sizeof(struct thread_command
)),
606 tcp
->cmdsize
- sizeof(struct thread_command
));
607 if (ret
!= LOAD_SUCCESS
)
610 result
->unixproc
= TRUE
;
611 result
->thread_count
++;
613 return(LOAD_SUCCESS
);
619 struct thread_command
*tcp
,
620 thread_act_t thr_act
,
621 load_result_t
*result
630 task
= get_threadtask(thr_act
);
631 thread
= getshuttle_thread(thr_act
);
633 /* if count is 0; same as thr_act */
634 if (result
->thread_count
!= 0) {
635 kret
= thread_create(task
, &thread
);
636 if (kret
!= KERN_SUCCESS
)
637 return(LOAD_RESOURCE
);
638 thread_deallocate(thread
);
641 lret
= load_threadstate(thread
,
642 (unsigned long *)(((vm_offset_t
)tcp
) +
643 sizeof(struct thread_command
)),
644 tcp
->cmdsize
- sizeof(struct thread_command
));
645 if (lret
!= LOAD_SUCCESS
)
648 if (result
->thread_count
== 0) {
649 lret
= load_threadstack(thread
,
650 (unsigned long *)(((vm_offset_t
)tcp
) +
651 sizeof(struct thread_command
)),
652 tcp
->cmdsize
- sizeof(struct thread_command
),
656 result
->customstack
= 1;
658 result
->customstack
= 0;
660 if (lret
!= LOAD_SUCCESS
)
663 lret
= load_threadentry(thread
,
664 (unsigned long *)(((vm_offset_t
)tcp
) +
665 sizeof(struct thread_command
)),
666 tcp
->cmdsize
- sizeof(struct thread_command
),
667 &result
->entry_point
);
668 if (lret
!= LOAD_SUCCESS
)
672 * Resume thread now, note that this means that the thread
673 * commands should appear after all the load commands to
674 * be sure they don't reference anything not yet mapped.
677 thread_resume(thread
);
679 result
->thread_count
++;
681 return(LOAD_SUCCESS
);
689 unsigned long total_size
697 * Set the thread state.
700 while (total_size
> 0) {
703 total_size
-= (size
+2)*sizeof(unsigned long);
705 return(LOAD_BADMACHO
);
706 ret
= thread_setstatus(getact_thread(thread
), flavor
, ts
, size
);
707 if (ret
!= KERN_SUCCESS
)
708 return(LOAD_FAILURE
);
709 ts
+= size
; /* ts is a (unsigned long *) */
711 return(LOAD_SUCCESS
);
719 unsigned long total_size
,
720 vm_offset_t
*user_stack
,
728 while (total_size
> 0) {
731 total_size
-= (size
+2)*sizeof(unsigned long);
733 return(LOAD_BADMACHO
);
734 *user_stack
= USRSTACK
;
735 ret
= thread_userstack(thread
, flavor
, ts
, size
,
736 user_stack
, customstack
);
737 if (ret
!= KERN_SUCCESS
)
738 return(LOAD_FAILURE
);
739 ts
+= size
; /* ts is a (unsigned long *) */
741 return(LOAD_SUCCESS
);
749 unsigned long total_size
,
750 vm_offset_t
*entry_point
758 * Set the thread state.
761 while (total_size
> 0) {
764 total_size
-= (size
+2)*sizeof(unsigned long);
766 return(LOAD_BADMACHO
);
767 ret
= thread_entrypoint(thread
, flavor
, ts
, size
, entry_point
);
768 if (ret
!= KERN_SUCCESS
)
769 return(LOAD_FAILURE
);
770 ts
+= size
; /* ts is a (unsigned long *) */
772 return(LOAD_SUCCESS
);
779 struct dylinker_command
*lcp
,
781 thread_act_t thr_act
,
783 load_result_t
*result
789 struct mach_header header
;
790 unsigned long file_offset
;
791 unsigned long macho_size
;
793 load_result_t myresult
;
796 vm_offset_t dyl_start
, map_addr
;
797 vm_size_t dyl_length
;
799 name
= (char *)lcp
+ lcp
->name
.offset
;
801 * Check for a proper null terminated string.
805 if (p
>= (char *)lcp
+ lcp
->cmdsize
)
806 return(LOAD_BADMACHO
);
809 ret
= get_macho_vnode(name
, &header
, &file_offset
, &macho_size
, &vp
);
813 myresult
= (load_result_t
) { 0 };
819 copy_map
= vm_map_create(pmap_create(macho_size
),
820 get_map_min(map
), get_map_max( map
), TRUE
);
822 ret
= parse_machfile(vp
, copy_map
, thr_act
, &header
,
823 file_offset
, macho_size
,
829 if (get_map_nentries(copy_map
) > 0) {
831 dyl_start
= get_map_start(copy_map
);
832 dyl_length
= get_map_end(copy_map
) - dyl_start
;
834 map_addr
= dyl_start
;
835 ret
= vm_allocate(map
, &map_addr
, dyl_length
, FALSE
);
836 if (ret
!= KERN_SUCCESS
) {
837 ret
= vm_allocate(map
, &map_addr
, dyl_length
, TRUE
);
840 if (ret
!= KERN_SUCCESS
) {
845 ret
= vm_map_copyin(copy_map
, dyl_start
, dyl_length
, TRUE
,
847 if (ret
!= KERN_SUCCESS
) {
848 (void) vm_map_remove(map
,
850 map_addr
+ dyl_length
,
855 ret
= vm_map_copy_overwrite(map
, map_addr
, tmp
, FALSE
);
856 if (ret
!= KERN_SUCCESS
) {
857 vm_map_copy_discard(tmp
);
858 (void) vm_map_remove(map
,
860 map_addr
+ dyl_length
,
864 if (map_addr
!= dyl_start
)
865 myresult
.entry_point
+= (map_addr
- dyl_start
);
869 if (ret
== LOAD_SUCCESS
) {
870 result
->dynlinker
= TRUE
;
871 result
->entry_point
= myresult
.entry_point
;
875 vm_map_deallocate(copy_map
);
886 struct mach_header
*mach_header
,
887 unsigned long *file_offset
,
888 unsigned long *macho_size
,
893 struct vattr attr
, *atp
;
894 struct nameidata nid
, *ndp
;
895 struct proc
*p
= current_proc(); /* XXXX */
897 struct fat_arch fat_arch
;
898 int error
= KERN_SUCCESS
;
901 struct mach_header mach_header
;
902 struct fat_header fat_header
;
905 off_t fsize
= (off_t
)0;
906 struct ucred
*cred
= p
->p_ucred
;
911 /* init the namei data to point the file user's program name */
912 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE
, path
, p
);
914 if (error
= namei(ndp
))
919 /* check for regular file */
920 if (vp
->v_type
!= VREG
) {
926 if (error
= VOP_GETATTR(vp
, &attr
, cred
, p
))
929 /* Check mount point */
930 if (vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) {
935 if ((vp
->v_mount
->mnt_flag
& MNT_NOSUID
) || (p
->p_flag
& P_TRACED
))
936 atp
->va_mode
&= ~(VSUID
| VSGID
);
938 /* check access. for root we have to see if any exec bit on */
939 if (error
= VOP_ACCESS(vp
, VEXEC
, cred
, p
))
941 if ((atp
->va_mode
& (S_IXUSR
| S_IXGRP
| S_IXOTH
)) == 0) {
946 /* hold the vnode for the IO */
947 if (UBCINFOEXISTS(vp
) && !ubc_hold(vp
)) {
953 if (error
= VOP_OPEN(vp
, FREAD
, cred
, p
)) {
958 if(error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
, sizeof(header
), 0,
959 UIO_SYSSPACE
, IO_NODELOCKED
, cred
, &resid
, p
))
962 if (header
.mach_header
.magic
== MH_MAGIC
)
964 else if (header
.fat_header
.magic
== FAT_MAGIC
||
965 header
.fat_header
.magic
== FAT_CIGAM
)
968 error
= LOAD_BADMACHO
;
973 /* Look up our architecture in the fat file. */
974 error
= fatfile_getarch(vp
, (vm_offset_t
)(&header
.fat_header
), &fat_arch
);
975 if (error
!= LOAD_SUCCESS
)
978 /* Read the Mach-O header out of it */
979 error
= vn_rdwr(UIO_READ
, vp
, &header
.mach_header
,
980 sizeof(header
.mach_header
), fat_arch
.offset
,
981 UIO_SYSSPACE
, IO_NODELOCKED
, cred
, &resid
, p
);
983 error
= LOAD_FAILURE
;
987 /* Is this really a Mach-O? */
988 if (header
.mach_header
.magic
!= MH_MAGIC
) {
989 error
= LOAD_BADMACHO
;
993 *file_offset
= fat_arch
.offset
;
994 *macho_size
= fsize
= fat_arch
.size
;
998 *macho_size
= fsize
= attr
.va_size
;
1001 *mach_header
= header
.mach_header
;
1004 ubc_setsize(vp
, fsize
); /* XXX why? */
1006 VOP_UNLOCK(vp
, 0, p
);
1011 VOP_UNLOCK(vp
, 0, p
);
1012 error
= VOP_CLOSE(vp
, FREAD
, cred
, p
);