2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (C) 1988, 1989, NeXT, Inc.
25 * File: kern/mach_loader.c
26 * Author: Avadis Tevanian, Jr.
28 * Mach object file loader (kernel version, for now).
30 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
33 #include <sys/param.h>
34 #include <sys/vnode.h>
36 #include <sys/namei.h>
39 #include <sys/malloc.h>
40 #include <sys/mount.h>
41 #include <sys/fcntl.h>
44 #include <mach/mach_types.h>
46 #include <kern/mach_loader.h>
48 #include <mach-o/fat.h>
49 #include <mach-o/loader.h>
51 #include <kern/cpu_number.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_pager.h>
56 #include <vm/vnode_pager.h>
57 #include <mach/shared_memory_server.h>
58 #include <mach/vm_statistics.h>
61 * Prototypes of static functions.
69 struct mach_header
*header
,
70 unsigned long file_offset
,
71 unsigned long macho_size
,
76 struct segment_command
*scp
,
78 unsigned long pager_offset
,
79 unsigned long macho_size
,
80 unsigned long end_of_file
,
85 struct thread_command
*tcp
,
90 struct thread_command
*tcp
,
97 unsigned long total_size
102 unsigned long total_size
,
103 vm_offset_t
*user_stack
,
109 unsigned long total_size
,
110 vm_offset_t
*entry_point
113 struct dylinker_command
*lcp
,
115 thread_act_t thr_act
,
117 load_result_t
*result
121 struct mach_header
*mach_header
,
122 unsigned long *file_offset
,
123 unsigned long *macho_size
,
130 struct mach_header
*header
,
131 unsigned long file_offset
,
132 unsigned long macho_size
,
133 load_result_t
*result
,
134 thread_act_t thr_act
,
141 load_result_t myresult
;
144 boolean_t create_map
= TRUE
;
146 if (new_map
!= VM_MAP_NULL
) {
151 old_map
= current_map();
153 pmap
= get_task_pmap(current_task());
154 pmap_reference(pmap
);
156 pmap
= pmap_create((vm_size_t
) 0);
158 map
= vm_map_create(pmap
,
159 get_map_min(old_map
),
160 get_map_max(old_map
),
161 TRUE
); /**** FIXME ****/
168 *result
= (load_result_t
) { 0 };
170 lret
= parse_machfile(vp
, map
, thr_act
, header
, file_offset
, macho_size
,
173 if (lret
!= LOAD_SUCCESS
) {
175 vm_map_deallocate(map
); /* will lose pmap reference too */
179 * Commit to new map. First make sure that the current
180 * users of the task get done with it, and that we clean
181 * up the old contents of IPC and memory. The task is
182 * guaranteed to be single threaded upon return (us).
184 * Swap the new map for the old at the task level and at
185 * our activation. The latter consumes our new map reference
186 * but each leaves us responsible for the old_map reference.
187 * That lets us get off the pmap associated with it, and
188 * then we can release it.
191 task_halt(current_task());
193 old_map
= swap_task_map(current_task(), map
);
194 vm_map_deallocate(old_map
);
196 old_map
= swap_act_map(current_act(), map
);
199 pmap_switch(pmap
); /* Make sure we are using the new pmap */
201 vm_map_deallocate(old_map
);
203 return(LOAD_SUCCESS
);
207 extern vm_offset_t system_shared_region
;
214 thread_act_t thr_act
,
215 struct mach_header
*header
,
216 unsigned long file_offset
,
217 unsigned long macho_size
,
219 load_result_t
*result
222 struct machine_slot
*ms
;
224 struct load_command
*lcp
, *next
;
225 struct dylinker_command
*dlp
= 0;
228 vm_offset_t addr
, kl_addr
;
229 vm_size_t size
,kl_size
;
232 struct proc
*p
= current_proc(); /* XXXX */
238 * Break infinite recursion
241 return(LOAD_FAILURE
);
243 task
= (task_t
)get_threadtask(thr_act
);
248 * Check to see if right machine type.
250 ms
= &machine_slot
[cpu_number()];
251 if ((header
->cputype
!= ms
->cpu_type
) ||
252 !check_cpu_subtype(header
->cpusubtype
))
253 return(LOAD_BADARCH
);
255 switch (header
->filetype
) {
261 return (LOAD_FAILURE
);
267 return (LOAD_FAILURE
);
272 return (LOAD_FAILURE
);
276 return (LOAD_FAILURE
);
280 * Get the pager for the file.
282 UBCINFOCHECK("parse_machfile", vp
);
283 pager
= (void *) ubc_getpager(vp
);
286 * Map portion that must be accessible directly into
289 if ((sizeof (struct mach_header
) + header
->sizeofcmds
) > macho_size
)
290 return(LOAD_BADMACHO
);
293 * Round size of Mach-O commands up to page boundry.
295 size
= round_page(sizeof (struct mach_header
) + header
->sizeofcmds
);
297 return(LOAD_BADMACHO
);
300 * Map the load commands into kernel memory.
304 kl_addr
= kalloc(size
);
307 return(LOAD_NOSPACE
);
309 if(error
= vn_rdwr(UIO_READ
, vp
, addr
, size
, file_offset
,
310 UIO_SYSSPACE
, 0, p
->p_ucred
, &resid
, p
)) {
312 kfree(kl_addr
, kl_size
);
315 /* ubc_map(vp); */ /* NOT HERE */
318 * Scan through the commands, processing each one as necessary.
320 for (pass
= 1; pass
<= 2; pass
++) {
321 offset
= sizeof(struct mach_header
);
322 ncmds
= header
->ncmds
;
325 * Get a pointer to the command.
327 lcp
= (struct load_command
*)(addr
+ offset
);
328 offset
+= lcp
->cmdsize
;
331 * Check for valid lcp pointer by checking
334 if (offset
> header
->sizeofcmds
335 + sizeof(struct mach_header
)) {
337 kfree(kl_addr
, kl_size
);
338 return(LOAD_BADMACHO
);
342 * Check for valid command.
349 (struct segment_command
*) lcp
,
352 (unsigned long)ubc_getsize(vp
),
359 ret
= load_thread((struct thread_command
*)lcp
, thr_act
,
365 ret
= load_unixthread(
366 (struct thread_command
*) lcp
, thr_act
,
369 case LC_LOAD_DYLINKER
:
372 if (depth
== 1 || dlp
== 0)
373 dlp
= (struct dylinker_command
*)lcp
;
378 ret
= KERN_SUCCESS
;/* ignore other stuff */
380 if (ret
!= LOAD_SUCCESS
)
383 if (ret
!= LOAD_SUCCESS
)
386 if (ret
== LOAD_SUCCESS
&& dlp
!= 0) {
388 shared_region_mapping_t shared_region
;
389 struct shared_region_task_mappings map_info
;
390 shared_region_mapping_t next
;
393 vm_get_shared_region(task
, &shared_region
);
394 map_info
.self
= (vm_offset_t
)shared_region
;
395 shared_region_mapping_info(shared_region
,
396 &(map_info
.text_region
),
397 &(map_info
.text_size
),
398 &(map_info
.data_region
),
399 &(map_info
.data_size
),
400 &(map_info
.region_mappings
),
401 &(map_info
.client_base
),
402 &(map_info
.alternate_base
),
403 &(map_info
.alternate_next
),
404 &(map_info
.flags
), &next
);
406 if((map_info
.flags
& SHARED_REGION_FULL
) &&
407 (map_info
.flags
& SHARED_REGION_SYSTEM
)) {
408 if(map_info
.self
!= (vm_offset_t
)system_shared_region
) {
409 shared_region_mapping_ref(system_shared_region
);
410 vm_set_shared_region(task
,
411 system_shared_region
);
412 shared_region_mapping_dealloc(
413 (shared_region_mapping_t
)map_info
.self
);
420 addr
= map_info
.client_base
;
421 vm_map(map
, &addr
, map_info
.text_size
, 0,
422 (VM_MEMORY_SHARED_PMAP
<< 24)
424 map_info
.text_region
, 0, FALSE
,
425 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
426 addr
= map_info
.client_base
+ map_info
.text_size
;
427 vm_map(map
, &addr
, map_info
.data_size
,
429 map_info
.data_region
, 0, TRUE
,
430 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
432 ret
= load_dylinker(dlp
, map
, thr_act
, depth
, result
);
436 kfree(kl_addr
, kl_size
);
438 if ((ret
== LOAD_SUCCESS
) && (depth
== 1) &&
439 (result
->thread_count
== 0))
441 if (ret
== LOAD_SUCCESS
)
450 struct segment_command
*scp
,
452 unsigned long pager_offset
,
453 unsigned long macho_size
,
454 unsigned long end_of_file
,
456 load_result_t
*result
460 vm_offset_t map_addr
, map_offset
;
461 vm_size_t map_size
, seg_size
, delta_size
;
466 extern int print_map_addr
;
470 * Make sure what we get from the file is really ours (as specified
473 if (scp
->fileoff
+ scp
->filesize
> macho_size
)
474 return (LOAD_BADMACHO
);
476 seg_size
= round_page(scp
->vmsize
);
478 return(KERN_SUCCESS
);
481 * Round sizes to page size.
483 map_size
= round_page(scp
->filesize
);
484 map_addr
= trunc_page(scp
->vmaddr
);
486 map_offset
= pager_offset
+ scp
->fileoff
;
489 initprot
= (scp
->initprot
) & VM_PROT_ALL
;
490 maxprot
= (scp
->maxprot
) & VM_PROT_ALL
;
492 * Map a copy of the file into the address space.
495 &map_addr
, map_size
, (vm_offset_t
)0, FALSE
,
496 pager
, map_offset
, TRUE
,
499 if (ret
!= KERN_SUCCESS
)
500 return(LOAD_NOSPACE
);
504 printf("LSegment: Mapped addr= %x; size = %x\n", map_addr
, map_size
);
507 * If the file didn't end on a page boundary,
508 * we need to zero the leftover.
510 delta_size
= map_size
- scp
->filesize
;
512 if (delta_size
> 0) {
515 ret
= vm_allocate(kernel_map
, &tmp
, delta_size
, TRUE
);
516 if (ret
!= KERN_SUCCESS
)
517 return(LOAD_RESOURCE
);
519 if (copyout(tmp
, map_addr
+ scp
->filesize
,
521 (void) vm_deallocate(
522 kernel_map
, tmp
, delta_size
);
523 return(LOAD_FAILURE
);
526 (void) vm_deallocate(kernel_map
, tmp
, delta_size
);
532 * If the virtual size of the segment is greater
533 * than the size from the file, we need to allocate
534 * zero fill memory for the rest.
536 delta_size
= seg_size
- map_size
;
537 if (delta_size
> 0) {
538 vm_offset_t tmp
= map_addr
+ map_size
;
540 ret
= vm_allocate(map
, &tmp
, delta_size
, FALSE
);
541 if (ret
!= KERN_SUCCESS
)
542 return(LOAD_NOSPACE
);
546 * Set protection values. (Note: ignore errors!)
549 if (scp
->maxprot
!= VM_PROT_DEFAULT
) {
550 (void) vm_protect(map
,
554 if (scp
->initprot
!= VM_PROT_DEFAULT
) {
555 (void) vm_protect(map
,
557 FALSE
, scp
->initprot
);
559 if ( (scp
->fileoff
== 0) && (scp
->filesize
!= 0) )
560 result
->mach_header
= map_addr
;
561 return(LOAD_SUCCESS
);
567 struct thread_command
*tcp
,
568 thread_act_t thr_act
,
569 load_result_t
*result
572 thread_t thread
= current_thread();
576 if (result
->thread_count
!= 0)
577 return (LOAD_FAILURE
);
579 thread
= getshuttle_thread(thr_act
);
580 ret
= load_threadstack(thread
,
581 (unsigned long *)(((vm_offset_t
)tcp
) +
582 sizeof(struct thread_command
)),
583 tcp
->cmdsize
- sizeof(struct thread_command
),
586 if (ret
!= LOAD_SUCCESS
)
590 result
->customstack
= 1;
592 result
->customstack
= 0;
593 ret
= load_threadentry(thread
,
594 (unsigned long *)(((vm_offset_t
)tcp
) +
595 sizeof(struct thread_command
)),
596 tcp
->cmdsize
- sizeof(struct thread_command
),
597 &result
->entry_point
);
598 if (ret
!= LOAD_SUCCESS
)
601 ret
= load_threadstate(thread
,
602 (unsigned long *)(((vm_offset_t
)tcp
) +
603 sizeof(struct thread_command
)),
604 tcp
->cmdsize
- sizeof(struct thread_command
));
605 if (ret
!= LOAD_SUCCESS
)
608 result
->unixproc
= TRUE
;
609 result
->thread_count
++;
611 return(LOAD_SUCCESS
);
617 struct thread_command
*tcp
,
618 thread_act_t thr_act
,
619 load_result_t
*result
628 task
= get_threadtask(thr_act
);
629 thread
= getshuttle_thread(thr_act
);
631 /* if count is 0; same as thr_act */
632 if (result
->thread_count
!= 0) {
633 kret
= thread_create(task
, &thread
);
634 if (kret
!= KERN_SUCCESS
)
635 return(LOAD_RESOURCE
);
636 thread_deallocate(thread
);
639 lret
= load_threadstate(thread
,
640 (unsigned long *)(((vm_offset_t
)tcp
) +
641 sizeof(struct thread_command
)),
642 tcp
->cmdsize
- sizeof(struct thread_command
));
643 if (lret
!= LOAD_SUCCESS
)
646 if (result
->thread_count
== 0) {
647 lret
= load_threadstack(thread
,
648 (unsigned long *)(((vm_offset_t
)tcp
) +
649 sizeof(struct thread_command
)),
650 tcp
->cmdsize
- sizeof(struct thread_command
),
654 result
->customstack
= 1;
656 result
->customstack
= 0;
658 if (lret
!= LOAD_SUCCESS
)
661 lret
= load_threadentry(thread
,
662 (unsigned long *)(((vm_offset_t
)tcp
) +
663 sizeof(struct thread_command
)),
664 tcp
->cmdsize
- sizeof(struct thread_command
),
665 &result
->entry_point
);
666 if (lret
!= LOAD_SUCCESS
)
670 * Resume thread now, note that this means that the thread
671 * commands should appear after all the load commands to
672 * be sure they don't reference anything not yet mapped.
675 thread_resume(thread
);
677 result
->thread_count
++;
679 return(LOAD_SUCCESS
);
687 unsigned long total_size
695 * Set the thread state.
698 while (total_size
> 0) {
701 total_size
-= (size
+2)*sizeof(unsigned long);
703 return(LOAD_BADMACHO
);
704 ret
= thread_setstatus(getact_thread(thread
), flavor
, ts
, size
);
705 if (ret
!= KERN_SUCCESS
)
706 return(LOAD_FAILURE
);
707 ts
+= size
; /* ts is a (unsigned long *) */
709 return(LOAD_SUCCESS
);
717 unsigned long total_size
,
718 vm_offset_t
*user_stack
,
727 * Set the thread state.
730 while (total_size
> 0) {
733 total_size
-= (size
+2)*sizeof(unsigned long);
735 return(LOAD_BADMACHO
);
736 ret
= thread_userstack(thread
, flavor
, ts
, size
, user_stack
, customstack
);
737 if (ret
!= KERN_SUCCESS
)
738 return(LOAD_FAILURE
);
739 ts
+= size
; /* ts is a (unsigned long *) */
741 return(LOAD_SUCCESS
);
749 unsigned long total_size
,
750 vm_offset_t
*entry_point
758 * Set the thread state.
761 while (total_size
> 0) {
764 total_size
-= (size
+2)*sizeof(unsigned long);
766 return(LOAD_BADMACHO
);
767 ret
= thread_entrypoint(thread
, flavor
, ts
, size
, entry_point
);
768 if (ret
!= KERN_SUCCESS
)
769 return(LOAD_FAILURE
);
770 ts
+= size
; /* ts is a (unsigned long *) */
772 return(LOAD_SUCCESS
);
779 struct dylinker_command
*lcp
,
781 thread_act_t thr_act
,
783 load_result_t
*result
789 struct mach_header header
;
790 unsigned long file_offset
;
791 unsigned long macho_size
;
793 load_result_t myresult
;
796 vm_offset_t dyl_start
, map_addr
;
797 vm_size_t dyl_length
;
799 name
= (char *)lcp
+ lcp
->name
.offset
;
801 * Check for a proper null terminated string.
805 if (p
>= (char *)lcp
+ lcp
->cmdsize
)
806 return(LOAD_BADMACHO
);
809 ret
= get_macho_vnode(name
, &header
, &file_offset
, &macho_size
, &vp
);
813 myresult
= (load_result_t
) { 0 };
819 copy_map
= vm_map_create(pmap_create(macho_size
),
820 get_map_min(map
), get_map_max( map
), TRUE
);
822 ret
= parse_machfile(vp
, copy_map
, thr_act
, &header
,
823 file_offset
, macho_size
,
829 if (get_map_nentries(copy_map
) > 0) {
831 dyl_start
= get_map_start(copy_map
);
832 dyl_length
= get_map_end(copy_map
) - dyl_start
;
834 map_addr
= dyl_start
;
835 ret
= vm_allocate(map
, &map_addr
, dyl_length
, FALSE
);
836 if (ret
!= KERN_SUCCESS
) {
837 ret
= vm_allocate(map
, &map_addr
, dyl_length
, TRUE
);
840 if (ret
!= KERN_SUCCESS
) {
845 ret
= vm_map_copyin(copy_map
, dyl_start
, dyl_length
, TRUE
,
847 if (ret
!= KERN_SUCCESS
) {
848 (void) vm_map_remove(map
,
850 map_addr
+ dyl_length
,
855 ret
= vm_map_copy_overwrite(map
, map_addr
, tmp
, FALSE
);
856 if (ret
!= KERN_SUCCESS
) {
857 vm_map_copy_discard(tmp
);
858 (void) vm_map_remove(map
,
860 map_addr
+ dyl_length
,
864 if (map_addr
!= dyl_start
)
865 myresult
.entry_point
+= (map_addr
- dyl_start
);
869 if (ret
== LOAD_SUCCESS
) {
870 result
->dynlinker
= TRUE
;
871 result
->entry_point
= myresult
.entry_point
;
875 vm_map_deallocate(copy_map
);
886 struct mach_header
*mach_header
,
887 unsigned long *file_offset
,
888 unsigned long *macho_size
,
893 struct vattr attr
, *atp
;
894 struct nameidata nid
, *ndp
;
895 struct proc
*p
= current_proc(); /* XXXX */
897 struct fat_arch fat_arch
;
898 int error
= KERN_SUCCESS
;
901 struct mach_header mach_header
;
902 struct fat_header fat_header
;
905 off_t fsize
= (off_t
)0;
906 struct ucred
*cred
= p
->p_ucred
;
911 /* init the namei data to point the file user's program name */
912 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE
, path
, p
);
914 if (error
= namei(ndp
))
919 /* check for regular file */
920 if (vp
->v_type
!= VREG
) {
926 if (error
= VOP_GETATTR(vp
, &attr
, cred
, p
))
929 /* Check mount point */
930 if (vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) {
935 if ((vp
->v_mount
->mnt_flag
& MNT_NOSUID
) || (p
->p_flag
& P_TRACED
))
936 atp
->va_mode
&= ~(VSUID
| VSGID
);
938 /* check access. for root we have to see if any exec bit on */
939 if (error
= VOP_ACCESS(vp
, VEXEC
, cred
, p
))
941 if ((atp
->va_mode
& (S_IXUSR
| S_IXGRP
| S_IXOTH
)) == 0) {
946 /* hold the vnode for the IO */
947 if (UBCINFOEXISTS(vp
) && !ubc_hold(vp
)) {
953 if (error
= VOP_OPEN(vp
, FREAD
, cred
, p
)) {
958 if(error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
, sizeof(header
), 0,
959 UIO_SYSSPACE
, IO_NODELOCKED
, cred
, &resid
, p
))
962 if (header
.mach_header
.magic
== MH_MAGIC
)
964 else if (header
.fat_header
.magic
== FAT_MAGIC
||
965 header
.fat_header
.magic
== FAT_CIGAM
)
968 error
= LOAD_BADMACHO
;
973 /* Look up our architecture in the fat file. */
974 error
= fatfile_getarch(vp
, (vm_offset_t
)(&header
.fat_header
), &fat_arch
);
975 if (error
!= LOAD_SUCCESS
)
978 /* Read the Mach-O header out of it */
979 error
= vn_rdwr(UIO_READ
, vp
, &header
.mach_header
,
980 sizeof(header
.mach_header
), fat_arch
.offset
,
981 UIO_SYSSPACE
, IO_NODELOCKED
, cred
, &resid
, p
);
983 error
= LOAD_FAILURE
;
987 /* Is this really a Mach-O? */
988 if (header
.mach_header
.magic
!= MH_MAGIC
) {
989 error
= LOAD_BADMACHO
;
993 *file_offset
= fat_arch
.offset
;
994 *macho_size
= fsize
= fat_arch
.size
;
998 *macho_size
= fsize
= attr
.va_size
;
1001 *mach_header
= header
.mach_header
;
1004 ubc_setsize(vp
, fsize
); /* XXX why? */
1006 VOP_UNLOCK(vp
, 0, p
);
1011 VOP_UNLOCK(vp
, 0, p
);
1012 error
= VOP_CLOSE(vp
, FREAD
, cred
, p
);