2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * Copyright (C) 1988, 1989, NeXT, Inc.
28 * File: kern/mach_loader.c
29 * Author: Avadis Tevanian, Jr.
31 * Mach object file loader (kernel version, for now).
33 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
36 #include <sys/param.h>
37 #include <sys/vnode.h>
39 #include <sys/namei.h>
42 #include <sys/malloc.h>
43 #include <sys/mount.h>
44 #include <sys/fcntl.h>
47 #include <mach/mach_types.h>
49 #include <kern/mach_loader.h>
50 #include <kern/task.h>
52 #include <mach-o/fat.h>
53 #include <mach-o/loader.h>
55 #include <kern/cpu_number.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_pager.h>
60 #include <vm/vnode_pager.h>
61 #include <mach/vm_statistics.h>
63 #include <mach/shared_memory_server.h>
64 #include <vm/vm_shared_memory_server.h>
66 #include <machine/vmparam.h>
69 * Prototypes of static functions.
77 struct mach_header
*header
,
78 unsigned long file_offset
,
79 unsigned long macho_size
,
81 load_result_t
*result
,
82 boolean_t clean_regions
85 struct segment_command
*scp
,
87 unsigned long pager_offset
,
88 unsigned long macho_size
,
89 unsigned long end_of_file
,
94 struct thread_command
*tcp
,
99 struct thread_command
*tcp
,
100 thread_act_t thr_act
,
101 load_result_t
*result
106 unsigned long total_size
111 unsigned long total_size
,
112 vm_offset_t
*user_stack
,
118 unsigned long total_size
,
119 vm_offset_t
*entry_point
122 struct dylinker_command
*lcp
,
124 thread_act_t thr_act
,
126 load_result_t
*result
,
127 boolean_t clean_regions
131 struct mach_header
*mach_header
,
132 unsigned long *file_offset
,
133 unsigned long *macho_size
,
140 struct mach_header
*header
,
141 unsigned long file_offset
,
142 unsigned long macho_size
,
143 load_result_t
*result
,
144 thread_act_t thr_act
,
146 boolean_t clean_regions
152 load_result_t myresult
;
155 boolean_t create_map
= TRUE
;
157 extern pmap_t
pmap_create(vm_size_t size
); /* XXX */
160 if (new_map
!= VM_MAP_NULL
) {
165 old_map
= current_map();
167 pmap
= get_task_pmap(current_task());
168 pmap_reference(pmap
);
170 pmap
= pmap_create((vm_size_t
) 0);
172 map
= vm_map_create(pmap
,
173 get_map_min(old_map
),
174 get_map_max(old_map
),
175 TRUE
); /**** FIXME ****/
182 *result
= (load_result_t
) { 0 };
184 lret
= parse_machfile(vp
, map
, thr_act
, header
, file_offset
, macho_size
,
185 0, result
, clean_regions
);
187 if (lret
!= LOAD_SUCCESS
) {
189 vm_map_deallocate(map
); /* will lose pmap reference too */
195 * Commit to new map. First make sure that the current
196 * users of the task get done with it, and that we clean
197 * up the old contents of IPC and memory. The task is
198 * guaranteed to be single threaded upon return (us).
200 * Swap the new map for the old, which consumes our new map
201 * reference but each leaves us responsible for the old_map reference.
202 * That lets us get off the pmap associated with it, and
203 * then we can release it.
206 task_halt(current_task());
208 old_map
= swap_task_map(current_task(), map
);
210 pmap_switch(pmap
); /* Make sure we are using the new pmap */
212 vm_map_deallocate(old_map
);
214 return(LOAD_SUCCESS
);
224 thread_act_t thr_act
,
225 struct mach_header
*header
,
226 unsigned long file_offset
,
227 unsigned long macho_size
,
229 load_result_t
*result
,
230 boolean_t clean_regions
233 struct machine_slot
*ms
;
235 struct load_command
*lcp
, *next
;
236 struct dylinker_command
*dlp
= 0;
238 load_return_t ret
= LOAD_SUCCESS
;
239 vm_offset_t addr
, kl_addr
;
240 vm_size_t size
,kl_size
;
243 struct proc
*p
= current_proc(); /* XXXX */
249 * Break infinite recursion
252 return(LOAD_FAILURE
);
254 task
= (task_t
)get_threadtask(thr_act
);
259 * Check to see if right machine type.
261 ms
= &machine_slot
[cpu_number()];
262 if ((header
->cputype
!= ms
->cpu_type
) ||
263 !check_cpu_subtype(header
->cpusubtype
))
264 return(LOAD_BADARCH
);
266 switch (header
->filetype
) {
272 return (LOAD_FAILURE
);
278 return (LOAD_FAILURE
);
283 return (LOAD_FAILURE
);
287 return (LOAD_FAILURE
);
291 * Get the pager for the file.
293 UBCINFOCHECK("parse_machfile", vp
);
294 pager
= (void *) ubc_getpager(vp
);
297 * Map portion that must be accessible directly into
300 if ((sizeof (struct mach_header
) + header
->sizeofcmds
) > macho_size
)
301 return(LOAD_BADMACHO
);
304 * Round size of Mach-O commands up to page boundry.
306 size
= round_page_32(sizeof (struct mach_header
) + header
->sizeofcmds
);
308 return(LOAD_BADMACHO
);
311 * Map the load commands into kernel memory.
315 kl_addr
= kalloc(size
);
318 return(LOAD_NOSPACE
);
320 if(error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)addr
, size
, file_offset
,
321 UIO_SYSSPACE
, 0, p
->p_ucred
, &resid
, p
)) {
323 kfree(kl_addr
, kl_size
);
324 return(LOAD_IOERROR
);
326 /* ubc_map(vp); */ /* NOT HERE */
329 * Scan through the commands, processing each one as necessary.
331 for (pass
= 1; pass
<= 2; pass
++) {
332 offset
= sizeof(struct mach_header
);
333 ncmds
= header
->ncmds
;
336 * Get a pointer to the command.
338 lcp
= (struct load_command
*)(addr
+ offset
);
339 offset
+= lcp
->cmdsize
;
342 * Check for valid lcp pointer by checking
345 if (offset
> header
->sizeofcmds
346 + sizeof(struct mach_header
)) {
348 kfree(kl_addr
, kl_size
);
349 return(LOAD_BADMACHO
);
353 * Check for valid command.
360 (struct segment_command
*) lcp
,
363 (unsigned long)ubc_getsize(vp
),
370 ret
= load_thread((struct thread_command
*)lcp
, thr_act
,
376 ret
= load_unixthread(
377 (struct thread_command
*) lcp
, thr_act
,
380 case LC_LOAD_DYLINKER
:
383 if ((depth
== 1) && (dlp
== 0))
384 dlp
= (struct dylinker_command
*)lcp
;
389 ret
= LOAD_SUCCESS
;/* ignore other stuff */
391 if (ret
!= LOAD_SUCCESS
)
394 if (ret
!= LOAD_SUCCESS
)
397 if ((ret
== LOAD_SUCCESS
) && (depth
== 1)) {
399 shared_region_mapping_t shared_region
;
400 struct shared_region_task_mappings map_info
;
401 shared_region_mapping_t next
;
404 vm_get_shared_region(task
, &shared_region
);
405 map_info
.self
= (vm_offset_t
)shared_region
;
406 shared_region_mapping_info(shared_region
,
407 &(map_info
.text_region
),
408 &(map_info
.text_size
),
409 &(map_info
.data_region
),
410 &(map_info
.data_size
),
411 &(map_info
.region_mappings
),
412 &(map_info
.client_base
),
413 &(map_info
.alternate_base
),
414 &(map_info
.alternate_next
),
417 &(map_info
.flags
), &next
);
419 if((map_info
.flags
& SHARED_REGION_FULL
) ||
420 (map_info
.flags
& SHARED_REGION_STALE
)) {
421 shared_region_mapping_t system_region
;
422 system_region
= lookup_default_shared_region(
423 map_info
.fs_base
, map_info
.system
);
424 if((map_info
.self
!= (vm_offset_t
)system_region
) &&
425 (map_info
.flags
& SHARED_REGION_SYSTEM
)) {
426 if(system_region
== NULL
) {
427 shared_file_boot_time_init(
428 map_info
.fs_base
, map_info
.system
);
430 vm_set_shared_region(task
, system_region
);
432 shared_region_mapping_dealloc(
433 (shared_region_mapping_t
)map_info
.self
);
435 } else if (map_info
.flags
& SHARED_REGION_SYSTEM
) {
436 shared_region_mapping_dealloc(system_region
);
437 shared_file_boot_time_init(
438 map_info
.fs_base
, map_info
.system
);
439 shared_region_mapping_dealloc(
440 (shared_region_mapping_t
)map_info
.self
);
442 shared_region_mapping_dealloc(system_region
);
448 p
->p_flag
|= P_NOSHLIB
; /* no shlibs in use */
449 addr
= map_info
.client_base
;
451 vm_map(map
, &addr
, map_info
.text_size
,
453 map_info
.text_region
, 0, FALSE
,
454 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
456 vm_map(map
, &addr
, map_info
.text_size
, 0,
457 (VM_MEMORY_SHARED_PMAP
<< 24)
459 map_info
.text_region
, 0, FALSE
,
460 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
462 addr
= map_info
.client_base
+ map_info
.text_size
;
463 vm_map(map
, &addr
, map_info
.data_size
,
465 map_info
.data_region
, 0, TRUE
,
466 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
469 /* this should be fleshed out for the general case */
470 /* but this is not necessary for now. Indeed we */
471 /* are handling the com page inside of the */
472 /* shared_region mapping create calls for now for */
473 /* simplicities sake. If more general support is */
474 /* needed the code to manipulate the shared range */
475 /* chain can be pulled out and moved to the callers*/
476 shared_region_mapping_info(next
,
477 &(map_info
.text_region
),
478 &(map_info
.text_size
),
479 &(map_info
.data_region
),
480 &(map_info
.data_size
),
481 &(map_info
.region_mappings
),
482 &(map_info
.client_base
),
483 &(map_info
.alternate_base
),
484 &(map_info
.alternate_next
),
487 &(map_info
.flags
), &next
);
489 addr
= map_info
.client_base
;
490 vm_map(map
, &addr
, map_info
.text_size
,
492 map_info
.text_region
, 0, FALSE
,
493 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
497 ret
= load_dylinker(dlp
, map
, thr_act
,
498 depth
, result
, clean_regions
);
503 kfree(kl_addr
, kl_size
);
505 if ((ret
== LOAD_SUCCESS
) && (depth
== 1) &&
506 (result
->thread_count
== 0))
508 if (ret
== LOAD_SUCCESS
)
517 struct segment_command
*scp
,
519 unsigned long pager_offset
,
520 unsigned long macho_size
,
521 unsigned long end_of_file
,
523 load_result_t
*result
527 vm_offset_t map_addr
, map_offset
;
528 vm_size_t map_size
, seg_size
, delta_size
;
534 * Make sure what we get from the file is really ours (as specified
537 if (scp
->fileoff
+ scp
->filesize
> macho_size
)
538 return (LOAD_BADMACHO
);
540 seg_size
= round_page_32(scp
->vmsize
);
542 return(KERN_SUCCESS
);
545 * Round sizes to page size.
547 map_size
= round_page_32(scp
->filesize
);
548 map_addr
= trunc_page_32(scp
->vmaddr
);
550 map_offset
= pager_offset
+ scp
->fileoff
;
553 initprot
= (scp
->initprot
) & VM_PROT_ALL
;
554 maxprot
= (scp
->maxprot
) & VM_PROT_ALL
;
556 * Map a copy of the file into the address space.
559 &map_addr
, map_size
, (vm_offset_t
)0, FALSE
,
560 pager
, map_offset
, TRUE
,
563 if (ret
!= KERN_SUCCESS
)
564 return(LOAD_NOSPACE
);
567 * If the file didn't end on a page boundary,
568 * we need to zero the leftover.
570 delta_size
= map_size
- scp
->filesize
;
572 if (delta_size
> 0) {
575 ret
= vm_allocate(kernel_map
, &tmp
, delta_size
, TRUE
);
576 if (ret
!= KERN_SUCCESS
)
577 return(LOAD_RESOURCE
);
579 if (copyout(tmp
, map_addr
+ scp
->filesize
,
581 (void) vm_deallocate(
582 kernel_map
, tmp
, delta_size
);
583 return(LOAD_FAILURE
);
586 (void) vm_deallocate(kernel_map
, tmp
, delta_size
);
592 * If the virtual size of the segment is greater
593 * than the size from the file, we need to allocate
594 * zero fill memory for the rest.
596 delta_size
= seg_size
- map_size
;
597 if (delta_size
> 0) {
598 vm_offset_t tmp
= map_addr
+ map_size
;
600 ret
= vm_allocate(map
, &tmp
, delta_size
, FALSE
);
601 if (ret
!= KERN_SUCCESS
)
602 return(LOAD_NOSPACE
);
606 * Set protection values. (Note: ignore errors!)
609 if (scp
->maxprot
!= VM_PROT_DEFAULT
) {
610 (void) vm_protect(map
,
614 if (scp
->initprot
!= VM_PROT_DEFAULT
) {
615 (void) vm_protect(map
,
617 FALSE
, scp
->initprot
);
619 if ( (scp
->fileoff
== 0) && (scp
->filesize
!= 0) )
620 result
->mach_header
= map_addr
;
621 return(LOAD_SUCCESS
);
627 struct thread_command
*tcp
,
629 load_result_t
*result
635 if (result
->thread_count
!= 0)
636 return (LOAD_FAILURE
);
638 ret
= load_threadstack(thread
,
639 (unsigned long *)(((vm_offset_t
)tcp
) +
640 sizeof(struct thread_command
)),
641 tcp
->cmdsize
- sizeof(struct thread_command
),
644 if (ret
!= LOAD_SUCCESS
)
648 result
->customstack
= 1;
650 result
->customstack
= 0;
651 ret
= load_threadentry(thread
,
652 (unsigned long *)(((vm_offset_t
)tcp
) +
653 sizeof(struct thread_command
)),
654 tcp
->cmdsize
- sizeof(struct thread_command
),
655 &result
->entry_point
);
656 if (ret
!= LOAD_SUCCESS
)
659 ret
= load_threadstate(thread
,
660 (unsigned long *)(((vm_offset_t
)tcp
) +
661 sizeof(struct thread_command
)),
662 tcp
->cmdsize
- sizeof(struct thread_command
));
663 if (ret
!= LOAD_SUCCESS
)
666 result
->unixproc
= TRUE
;
667 result
->thread_count
++;
669 return(LOAD_SUCCESS
);
675 struct thread_command
*tcp
,
677 load_result_t
*result
685 task
= get_threadtask(thread
);
687 /* if count is 0; same as thr_act */
688 if (result
->thread_count
!= 0) {
689 kret
= thread_create(task
, &thread
);
690 if (kret
!= KERN_SUCCESS
)
691 return(LOAD_RESOURCE
);
692 act_deallocate(thread
);
695 lret
= load_threadstate(thread
,
696 (unsigned long *)(((vm_offset_t
)tcp
) +
697 sizeof(struct thread_command
)),
698 tcp
->cmdsize
- sizeof(struct thread_command
));
699 if (lret
!= LOAD_SUCCESS
)
702 if (result
->thread_count
== 0) {
703 lret
= load_threadstack(thread
,
704 (unsigned long *)(((vm_offset_t
)tcp
) +
705 sizeof(struct thread_command
)),
706 tcp
->cmdsize
- sizeof(struct thread_command
),
710 result
->customstack
= 1;
712 result
->customstack
= 0;
714 if (lret
!= LOAD_SUCCESS
)
717 lret
= load_threadentry(thread
,
718 (unsigned long *)(((vm_offset_t
)tcp
) +
719 sizeof(struct thread_command
)),
720 tcp
->cmdsize
- sizeof(struct thread_command
),
721 &result
->entry_point
);
722 if (lret
!= LOAD_SUCCESS
)
726 * Resume thread now, note that this means that the thread
727 * commands should appear after all the load commands to
728 * be sure they don't reference anything not yet mapped.
731 thread_resume(thread
);
733 result
->thread_count
++;
735 return(LOAD_SUCCESS
);
743 unsigned long total_size
751 * Set the thread state.
754 while (total_size
> 0) {
757 total_size
-= (size
+2)*sizeof(unsigned long);
759 return(LOAD_BADMACHO
);
760 ret
= thread_setstatus(thread
, flavor
, ts
, size
);
761 if (ret
!= KERN_SUCCESS
)
762 return(LOAD_FAILURE
);
763 ts
+= size
; /* ts is a (unsigned long *) */
765 return(LOAD_SUCCESS
);
773 unsigned long total_size
,
774 vm_offset_t
*user_stack
,
782 while (total_size
> 0) {
785 total_size
-= (size
+2)*sizeof(unsigned long);
787 return(LOAD_BADMACHO
);
788 *user_stack
= USRSTACK
;
789 ret
= thread_userstack(thread
, flavor
, ts
, size
,
790 user_stack
, customstack
);
791 if (ret
!= KERN_SUCCESS
)
792 return(LOAD_FAILURE
);
793 ts
+= size
; /* ts is a (unsigned long *) */
795 return(LOAD_SUCCESS
);
803 unsigned long total_size
,
804 vm_offset_t
*entry_point
812 * Set the thread state.
815 while (total_size
> 0) {
818 total_size
-= (size
+2)*sizeof(unsigned long);
820 return(LOAD_BADMACHO
);
821 ret
= thread_entrypoint(thread
, flavor
, ts
, size
, entry_point
);
822 if (ret
!= KERN_SUCCESS
)
823 return(LOAD_FAILURE
);
824 ts
+= size
; /* ts is a (unsigned long *) */
826 return(LOAD_SUCCESS
);
833 struct dylinker_command
*lcp
,
835 thread_act_t thr_act
,
837 load_result_t
*result
,
838 boolean_t clean_regions
844 struct mach_header header
;
845 unsigned long file_offset
;
846 unsigned long macho_size
;
848 load_result_t myresult
;
851 vm_offset_t dyl_start
, map_addr
;
852 vm_size_t dyl_length
;
853 extern pmap_t
pmap_create(vm_size_t size
); /* XXX */
855 name
= (char *)lcp
+ lcp
->name
.offset
;
857 * Check for a proper null terminated string.
861 if (p
>= (char *)lcp
+ lcp
->cmdsize
)
862 return(LOAD_BADMACHO
);
865 ret
= get_macho_vnode(name
, &header
, &file_offset
, &macho_size
, &vp
);
869 myresult
= (load_result_t
) { 0 };
875 copy_map
= vm_map_create(pmap_create(macho_size
),
876 get_map_min(map
), get_map_max( map
), TRUE
);
878 ret
= parse_machfile(vp
, copy_map
, thr_act
, &header
,
879 file_offset
, macho_size
,
880 depth
, &myresult
, clean_regions
);
885 if (get_map_nentries(copy_map
) > 0) {
887 dyl_start
= get_map_start(copy_map
);
888 dyl_length
= get_map_end(copy_map
) - dyl_start
;
890 map_addr
= dyl_start
;
891 ret
= vm_allocate(map
, &map_addr
, dyl_length
, FALSE
);
892 if (ret
!= KERN_SUCCESS
) {
893 ret
= vm_allocate(map
, &map_addr
, dyl_length
, TRUE
);
896 if (ret
!= KERN_SUCCESS
) {
901 ret
= vm_map_copyin(copy_map
, dyl_start
, dyl_length
, TRUE
,
903 if (ret
!= KERN_SUCCESS
) {
904 (void) vm_map_remove(map
,
906 map_addr
+ dyl_length
,
911 ret
= vm_map_copy_overwrite(map
, map_addr
, tmp
, FALSE
);
912 if (ret
!= KERN_SUCCESS
) {
913 vm_map_copy_discard(tmp
);
914 (void) vm_map_remove(map
,
916 map_addr
+ dyl_length
,
920 if (map_addr
!= dyl_start
)
921 myresult
.entry_point
+= (map_addr
- dyl_start
);
925 if (ret
== LOAD_SUCCESS
) {
926 result
->dynlinker
= TRUE
;
927 result
->entry_point
= myresult
.entry_point
;
931 vm_map_deallocate(copy_map
);
942 struct mach_header
*mach_header
,
943 unsigned long *file_offset
,
944 unsigned long *macho_size
,
949 struct vattr attr
, *atp
;
950 struct nameidata nid
, *ndp
;
951 struct proc
*p
= current_proc(); /* XXXX */
953 struct fat_arch fat_arch
;
954 int error
= LOAD_SUCCESS
;
957 struct mach_header mach_header
;
958 struct fat_header fat_header
;
961 off_t fsize
= (off_t
)0;
962 struct ucred
*cred
= p
->p_ucred
;
968 /* init the namei data to point the file user's program name */
969 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE
, path
, p
);
971 if (error
= namei(ndp
)) {
975 error
= LOAD_FAILURE
;
981 /* check for regular file */
982 if (vp
->v_type
!= VREG
) {
983 error
= LOAD_PROTECT
;
988 if (error
= VOP_GETATTR(vp
, &attr
, cred
, p
)) {
989 error
= LOAD_FAILURE
;
993 /* Check mount point */
994 if (vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) {
995 error
= LOAD_PROTECT
;
999 if ((vp
->v_mount
->mnt_flag
& MNT_NOSUID
) || (p
->p_flag
& P_TRACED
))
1000 atp
->va_mode
&= ~(VSUID
| VSGID
);
1002 /* check access. for root we have to see if any exec bit on */
1003 if (error
= VOP_ACCESS(vp
, VEXEC
, cred
, p
)) {
1004 error
= LOAD_PROTECT
;
1007 if ((atp
->va_mode
& (S_IXUSR
| S_IXGRP
| S_IXOTH
)) == 0) {
1008 error
= LOAD_PROTECT
;
1012 /* hold the vnode for the IO */
1013 if (UBCINFOEXISTS(vp
) && !ubc_hold(vp
)) {
1014 error
= LOAD_ENOENT
;
1018 /* try to open it */
1019 if (error
= VOP_OPEN(vp
, FREAD
, cred
, p
)) {
1020 error
= LOAD_PROTECT
;
1025 if(error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
, sizeof(header
), 0,
1026 UIO_SYSSPACE
, IO_NODELOCKED
, cred
, &resid
, p
)) {
1027 error
= LOAD_IOERROR
;
1031 if (header
.mach_header
.magic
== MH_MAGIC
)
1033 else if (header
.fat_header
.magic
== FAT_MAGIC
||
1034 header
.fat_header
.magic
== FAT_CIGAM
)
1037 error
= LOAD_BADMACHO
;
1042 /* Look up our architecture in the fat file. */
1043 error
= fatfile_getarch(vp
, (vm_offset_t
)(&header
.fat_header
), &fat_arch
);
1044 if (error
!= LOAD_SUCCESS
)
1047 /* Read the Mach-O header out of it */
1048 error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
.mach_header
,
1049 sizeof(header
.mach_header
), fat_arch
.offset
,
1050 UIO_SYSSPACE
, IO_NODELOCKED
, cred
, &resid
, p
);
1052 error
= LOAD_IOERROR
;
1056 /* Is this really a Mach-O? */
1057 if (header
.mach_header
.magic
!= MH_MAGIC
) {
1058 error
= LOAD_BADMACHO
;
1062 *file_offset
= fat_arch
.offset
;
1063 *macho_size
= fsize
= fat_arch
.size
;
1067 *macho_size
= fsize
= attr
.va_size
;
1070 *mach_header
= header
.mach_header
;
1073 ubc_setsize(vp
, fsize
); /* XXX why? */
1075 VOP_UNLOCK(vp
, 0, p
);
1080 VOP_UNLOCK(vp
, 0, p
);
1081 err2
= VOP_CLOSE(vp
, FREAD
, cred
, p
);