2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (C) 1988, 1989, NeXT, Inc.
25 * File: kern/mach_loader.c
26 * Author: Avadis Tevanian, Jr.
28 * Mach object file loader (kernel version, for now).
30 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
33 #include <sys/param.h>
34 #include <sys/vnode.h>
36 #include <sys/namei.h>
39 #include <sys/malloc.h>
40 #include <sys/mount.h>
41 #include <sys/fcntl.h>
44 #include <mach/mach_types.h>
46 #include <kern/mach_loader.h>
47 #include <kern/task.h>
49 #include <mach-o/fat.h>
50 #include <mach-o/loader.h>
52 #include <kern/cpu_number.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vm_pager.h>
57 #include <vm/vnode_pager.h>
58 #include <mach/vm_statistics.h>
60 #include <mach/shared_memory_server.h>
61 #include <vm/vm_shared_memory_server.h>
63 #include <machine/vmparam.h>
66 * Prototypes of static functions.
74 struct mach_header
*header
,
75 unsigned long file_offset
,
76 unsigned long macho_size
,
78 load_result_t
*result
,
79 boolean_t clean_regions
82 struct segment_command
*scp
,
84 unsigned long pager_offset
,
85 unsigned long macho_size
,
86 unsigned long end_of_file
,
91 struct thread_command
*tcp
,
96 struct thread_command
*tcp
,
103 unsigned long total_size
108 unsigned long total_size
,
109 vm_offset_t
*user_stack
,
115 unsigned long total_size
,
116 vm_offset_t
*entry_point
119 struct dylinker_command
*lcp
,
121 thread_act_t thr_act
,
123 load_result_t
*result
,
124 boolean_t clean_regions
128 struct mach_header
*mach_header
,
129 unsigned long *file_offset
,
130 unsigned long *macho_size
,
137 struct mach_header
*header
,
138 unsigned long file_offset
,
139 unsigned long macho_size
,
140 load_result_t
*result
,
141 thread_act_t thr_act
,
143 boolean_t clean_regions
149 load_result_t myresult
;
152 boolean_t create_map
= TRUE
;
154 extern pmap_t
pmap_create(vm_size_t size
); /* XXX */
157 if (new_map
!= VM_MAP_NULL
) {
162 old_map
= current_map();
164 pmap
= get_task_pmap(current_task());
165 pmap_reference(pmap
);
167 pmap
= pmap_create((vm_size_t
) 0);
169 map
= vm_map_create(pmap
,
170 get_map_min(old_map
),
171 get_map_max(old_map
),
172 TRUE
); /**** FIXME ****/
179 *result
= (load_result_t
) { 0 };
181 lret
= parse_machfile(vp
, map
, thr_act
, header
, file_offset
, macho_size
,
182 0, result
, clean_regions
);
184 if (lret
!= LOAD_SUCCESS
) {
186 vm_map_deallocate(map
); /* will lose pmap reference too */
192 * Commit to new map. First make sure that the current
193 * users of the task get done with it, and that we clean
194 * up the old contents of IPC and memory. The task is
195 * guaranteed to be single threaded upon return (us).
197 * Swap the new map for the old, which consumes our new map
198 * reference but each leaves us responsible for the old_map reference.
199 * That lets us get off the pmap associated with it, and
200 * then we can release it.
203 task_halt(current_task());
205 old_map
= swap_task_map(current_task(), map
);
207 pmap_switch(pmap
); /* Make sure we are using the new pmap */
209 vm_map_deallocate(old_map
);
211 return(LOAD_SUCCESS
);
221 thread_act_t thr_act
,
222 struct mach_header
*header
,
223 unsigned long file_offset
,
224 unsigned long macho_size
,
226 load_result_t
*result
,
227 boolean_t clean_regions
230 struct machine_slot
*ms
;
232 struct load_command
*lcp
, *next
;
233 struct dylinker_command
*dlp
= 0;
235 load_return_t ret
= LOAD_SUCCESS
;
236 vm_offset_t addr
, kl_addr
;
237 vm_size_t size
,kl_size
;
240 struct proc
*p
= current_proc(); /* XXXX */
246 * Break infinite recursion
249 return(LOAD_FAILURE
);
251 task
= (task_t
)get_threadtask(thr_act
);
256 * Check to see if right machine type.
258 ms
= &machine_slot
[cpu_number()];
259 if ((header
->cputype
!= ms
->cpu_type
) ||
260 !check_cpu_subtype(header
->cpusubtype
))
261 return(LOAD_BADARCH
);
263 switch (header
->filetype
) {
269 return (LOAD_FAILURE
);
275 return (LOAD_FAILURE
);
280 return (LOAD_FAILURE
);
284 return (LOAD_FAILURE
);
288 * Get the pager for the file.
290 UBCINFOCHECK("parse_machfile", vp
);
291 pager
= (void *) ubc_getpager(vp
);
294 * Map portion that must be accessible directly into
297 if ((sizeof (struct mach_header
) + header
->sizeofcmds
) > macho_size
)
298 return(LOAD_BADMACHO
);
301 * Round size of Mach-O commands up to page boundry.
303 size
= round_page_32(sizeof (struct mach_header
) + header
->sizeofcmds
);
305 return(LOAD_BADMACHO
);
308 * Map the load commands into kernel memory.
312 kl_addr
= kalloc(size
);
315 return(LOAD_NOSPACE
);
317 if(error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)addr
, size
, file_offset
,
318 UIO_SYSSPACE
, 0, p
->p_ucred
, &resid
, p
)) {
320 kfree(kl_addr
, kl_size
);
321 return(LOAD_IOERROR
);
323 /* ubc_map(vp); */ /* NOT HERE */
326 * Scan through the commands, processing each one as necessary.
328 for (pass
= 1; pass
<= 2; pass
++) {
329 offset
= sizeof(struct mach_header
);
330 ncmds
= header
->ncmds
;
333 * Get a pointer to the command.
335 lcp
= (struct load_command
*)(addr
+ offset
);
336 offset
+= lcp
->cmdsize
;
339 * Check for valid lcp pointer by checking
342 if (offset
> header
->sizeofcmds
343 + sizeof(struct mach_header
)) {
345 kfree(kl_addr
, kl_size
);
346 return(LOAD_BADMACHO
);
350 * Check for valid command.
357 (struct segment_command
*) lcp
,
360 (unsigned long)ubc_getsize(vp
),
367 ret
= load_thread((struct thread_command
*)lcp
, thr_act
,
373 ret
= load_unixthread(
374 (struct thread_command
*) lcp
, thr_act
,
377 case LC_LOAD_DYLINKER
:
380 if ((depth
== 1) && (dlp
== 0))
381 dlp
= (struct dylinker_command
*)lcp
;
386 ret
= LOAD_SUCCESS
;/* ignore other stuff */
388 if (ret
!= LOAD_SUCCESS
)
391 if (ret
!= LOAD_SUCCESS
)
394 if ((ret
== LOAD_SUCCESS
) && (depth
== 1)) {
396 shared_region_mapping_t shared_region
;
397 struct shared_region_task_mappings map_info
;
398 shared_region_mapping_t next
;
401 vm_get_shared_region(task
, &shared_region
);
402 map_info
.self
= (vm_offset_t
)shared_region
;
403 shared_region_mapping_info(shared_region
,
404 &(map_info
.text_region
),
405 &(map_info
.text_size
),
406 &(map_info
.data_region
),
407 &(map_info
.data_size
),
408 &(map_info
.region_mappings
),
409 &(map_info
.client_base
),
410 &(map_info
.alternate_base
),
411 &(map_info
.alternate_next
),
414 &(map_info
.flags
), &next
);
416 if((map_info
.flags
& SHARED_REGION_FULL
) ||
417 (map_info
.flags
& SHARED_REGION_STALE
)) {
418 shared_region_mapping_t system_region
;
419 system_region
= lookup_default_shared_region(
420 map_info
.fs_base
, map_info
.system
);
421 if((map_info
.self
!= (vm_offset_t
)system_region
) &&
422 (map_info
.flags
& SHARED_REGION_SYSTEM
)) {
423 if(system_region
== NULL
) {
424 shared_file_boot_time_init(
425 map_info
.fs_base
, map_info
.system
);
427 vm_set_shared_region(task
, system_region
);
429 shared_region_mapping_dealloc(
430 (shared_region_mapping_t
)map_info
.self
);
432 } else if (map_info
.flags
& SHARED_REGION_SYSTEM
) {
433 shared_region_mapping_dealloc(system_region
);
434 shared_file_boot_time_init(
435 map_info
.fs_base
, map_info
.system
);
436 shared_region_mapping_dealloc(
437 (shared_region_mapping_t
)map_info
.self
);
439 shared_region_mapping_dealloc(system_region
);
445 p
->p_flag
|= P_NOSHLIB
; /* no shlibs in use */
446 addr
= map_info
.client_base
;
448 vm_map(map
, &addr
, map_info
.text_size
,
450 map_info
.text_region
, 0, FALSE
,
451 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
453 vm_map(map
, &addr
, map_info
.text_size
, 0,
454 (VM_MEMORY_SHARED_PMAP
<< 24)
456 map_info
.text_region
, 0, FALSE
,
457 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
459 addr
= map_info
.client_base
+ map_info
.text_size
;
460 vm_map(map
, &addr
, map_info
.data_size
,
462 map_info
.data_region
, 0, TRUE
,
463 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
466 /* this should be fleshed out for the general case */
467 /* but this is not necessary for now. Indeed we */
468 /* are handling the com page inside of the */
469 /* shared_region mapping create calls for now for */
470 /* simplicities sake. If more general support is */
471 /* needed the code to manipulate the shared range */
472 /* chain can be pulled out and moved to the callers*/
473 shared_region_mapping_info(next
,
474 &(map_info
.text_region
),
475 &(map_info
.text_size
),
476 &(map_info
.data_region
),
477 &(map_info
.data_size
),
478 &(map_info
.region_mappings
),
479 &(map_info
.client_base
),
480 &(map_info
.alternate_base
),
481 &(map_info
.alternate_next
),
484 &(map_info
.flags
), &next
);
486 addr
= map_info
.client_base
;
487 vm_map(map
, &addr
, map_info
.text_size
,
489 map_info
.text_region
, 0, FALSE
,
490 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
494 ret
= load_dylinker(dlp
, map
, thr_act
,
495 depth
, result
, clean_regions
);
500 kfree(kl_addr
, kl_size
);
502 if ((ret
== LOAD_SUCCESS
) && (depth
== 1) &&
503 (result
->thread_count
== 0))
505 if (ret
== LOAD_SUCCESS
)
514 struct segment_command
*scp
,
516 unsigned long pager_offset
,
517 unsigned long macho_size
,
518 unsigned long end_of_file
,
520 load_result_t
*result
524 vm_offset_t map_addr
, map_offset
;
525 vm_size_t map_size
, seg_size
, delta_size
;
531 * Make sure what we get from the file is really ours (as specified
534 if (scp
->fileoff
+ scp
->filesize
> macho_size
)
535 return (LOAD_BADMACHO
);
537 seg_size
= round_page_32(scp
->vmsize
);
539 return(KERN_SUCCESS
);
542 * Round sizes to page size.
544 map_size
= round_page_32(scp
->filesize
);
545 map_addr
= trunc_page_32(scp
->vmaddr
);
547 map_offset
= pager_offset
+ scp
->fileoff
;
550 initprot
= (scp
->initprot
) & VM_PROT_ALL
;
551 maxprot
= (scp
->maxprot
) & VM_PROT_ALL
;
553 * Map a copy of the file into the address space.
556 &map_addr
, map_size
, (vm_offset_t
)0, FALSE
,
557 pager
, map_offset
, TRUE
,
560 if (ret
!= KERN_SUCCESS
)
561 return(LOAD_NOSPACE
);
564 * If the file didn't end on a page boundary,
565 * we need to zero the leftover.
567 delta_size
= map_size
- scp
->filesize
;
569 if (delta_size
> 0) {
572 ret
= vm_allocate(kernel_map
, &tmp
, delta_size
, TRUE
);
573 if (ret
!= KERN_SUCCESS
)
574 return(LOAD_RESOURCE
);
576 if (copyout(tmp
, map_addr
+ scp
->filesize
,
578 (void) vm_deallocate(
579 kernel_map
, tmp
, delta_size
);
580 return(LOAD_FAILURE
);
583 (void) vm_deallocate(kernel_map
, tmp
, delta_size
);
589 * If the virtual size of the segment is greater
590 * than the size from the file, we need to allocate
591 * zero fill memory for the rest.
593 delta_size
= seg_size
- map_size
;
594 if (delta_size
> 0) {
595 vm_offset_t tmp
= map_addr
+ map_size
;
597 ret
= vm_allocate(map
, &tmp
, delta_size
, FALSE
);
598 if (ret
!= KERN_SUCCESS
)
599 return(LOAD_NOSPACE
);
603 * Set protection values. (Note: ignore errors!)
606 if (scp
->maxprot
!= VM_PROT_DEFAULT
) {
607 (void) vm_protect(map
,
611 if (scp
->initprot
!= VM_PROT_DEFAULT
) {
612 (void) vm_protect(map
,
614 FALSE
, scp
->initprot
);
616 if ( (scp
->fileoff
== 0) && (scp
->filesize
!= 0) )
617 result
->mach_header
= map_addr
;
618 return(LOAD_SUCCESS
);
624 struct thread_command
*tcp
,
626 load_result_t
*result
632 if (result
->thread_count
!= 0)
633 return (LOAD_FAILURE
);
635 ret
= load_threadstack(thread
,
636 (unsigned long *)(((vm_offset_t
)tcp
) +
637 sizeof(struct thread_command
)),
638 tcp
->cmdsize
- sizeof(struct thread_command
),
641 if (ret
!= LOAD_SUCCESS
)
645 result
->customstack
= 1;
647 result
->customstack
= 0;
648 ret
= load_threadentry(thread
,
649 (unsigned long *)(((vm_offset_t
)tcp
) +
650 sizeof(struct thread_command
)),
651 tcp
->cmdsize
- sizeof(struct thread_command
),
652 &result
->entry_point
);
653 if (ret
!= LOAD_SUCCESS
)
656 ret
= load_threadstate(thread
,
657 (unsigned long *)(((vm_offset_t
)tcp
) +
658 sizeof(struct thread_command
)),
659 tcp
->cmdsize
- sizeof(struct thread_command
));
660 if (ret
!= LOAD_SUCCESS
)
663 result
->unixproc
= TRUE
;
664 result
->thread_count
++;
666 return(LOAD_SUCCESS
);
672 struct thread_command
*tcp
,
674 load_result_t
*result
682 task
= get_threadtask(thread
);
684 /* if count is 0; same as thr_act */
685 if (result
->thread_count
!= 0) {
686 kret
= thread_create(task
, &thread
);
687 if (kret
!= KERN_SUCCESS
)
688 return(LOAD_RESOURCE
);
689 act_deallocate(thread
);
692 lret
= load_threadstate(thread
,
693 (unsigned long *)(((vm_offset_t
)tcp
) +
694 sizeof(struct thread_command
)),
695 tcp
->cmdsize
- sizeof(struct thread_command
));
696 if (lret
!= LOAD_SUCCESS
)
699 if (result
->thread_count
== 0) {
700 lret
= load_threadstack(thread
,
701 (unsigned long *)(((vm_offset_t
)tcp
) +
702 sizeof(struct thread_command
)),
703 tcp
->cmdsize
- sizeof(struct thread_command
),
707 result
->customstack
= 1;
709 result
->customstack
= 0;
711 if (lret
!= LOAD_SUCCESS
)
714 lret
= load_threadentry(thread
,
715 (unsigned long *)(((vm_offset_t
)tcp
) +
716 sizeof(struct thread_command
)),
717 tcp
->cmdsize
- sizeof(struct thread_command
),
718 &result
->entry_point
);
719 if (lret
!= LOAD_SUCCESS
)
723 * Resume thread now, note that this means that the thread
724 * commands should appear after all the load commands to
725 * be sure they don't reference anything not yet mapped.
728 thread_resume(thread
);
730 result
->thread_count
++;
732 return(LOAD_SUCCESS
);
740 unsigned long total_size
748 * Set the thread state.
751 while (total_size
> 0) {
754 total_size
-= (size
+2)*sizeof(unsigned long);
756 return(LOAD_BADMACHO
);
757 ret
= thread_setstatus(thread
, flavor
, ts
, size
);
758 if (ret
!= KERN_SUCCESS
)
759 return(LOAD_FAILURE
);
760 ts
+= size
; /* ts is a (unsigned long *) */
762 return(LOAD_SUCCESS
);
770 unsigned long total_size
,
771 vm_offset_t
*user_stack
,
779 while (total_size
> 0) {
782 total_size
-= (size
+2)*sizeof(unsigned long);
784 return(LOAD_BADMACHO
);
785 *user_stack
= USRSTACK
;
786 ret
= thread_userstack(thread
, flavor
, ts
, size
,
787 user_stack
, customstack
);
788 if (ret
!= KERN_SUCCESS
)
789 return(LOAD_FAILURE
);
790 ts
+= size
; /* ts is a (unsigned long *) */
792 return(LOAD_SUCCESS
);
800 unsigned long total_size
,
801 vm_offset_t
*entry_point
809 * Set the thread state.
812 while (total_size
> 0) {
815 total_size
-= (size
+2)*sizeof(unsigned long);
817 return(LOAD_BADMACHO
);
818 ret
= thread_entrypoint(thread
, flavor
, ts
, size
, entry_point
);
819 if (ret
!= KERN_SUCCESS
)
820 return(LOAD_FAILURE
);
821 ts
+= size
; /* ts is a (unsigned long *) */
823 return(LOAD_SUCCESS
);
830 struct dylinker_command
*lcp
,
832 thread_act_t thr_act
,
834 load_result_t
*result
,
835 boolean_t clean_regions
841 struct mach_header header
;
842 unsigned long file_offset
;
843 unsigned long macho_size
;
845 load_result_t myresult
;
848 vm_offset_t dyl_start
, map_addr
;
849 vm_size_t dyl_length
;
850 extern pmap_t
pmap_create(vm_size_t size
); /* XXX */
852 name
= (char *)lcp
+ lcp
->name
.offset
;
854 * Check for a proper null terminated string.
858 if (p
>= (char *)lcp
+ lcp
->cmdsize
)
859 return(LOAD_BADMACHO
);
862 ret
= get_macho_vnode(name
, &header
, &file_offset
, &macho_size
, &vp
);
866 myresult
= (load_result_t
) { 0 };
872 copy_map
= vm_map_create(pmap_create(macho_size
),
873 get_map_min(map
), get_map_max( map
), TRUE
);
875 ret
= parse_machfile(vp
, copy_map
, thr_act
, &header
,
876 file_offset
, macho_size
,
877 depth
, &myresult
, clean_regions
);
882 if (get_map_nentries(copy_map
) > 0) {
884 dyl_start
= get_map_start(copy_map
);
885 dyl_length
= get_map_end(copy_map
) - dyl_start
;
887 map_addr
= dyl_start
;
888 ret
= vm_allocate(map
, &map_addr
, dyl_length
, FALSE
);
889 if (ret
!= KERN_SUCCESS
) {
890 ret
= vm_allocate(map
, &map_addr
, dyl_length
, TRUE
);
893 if (ret
!= KERN_SUCCESS
) {
898 ret
= vm_map_copyin(copy_map
, dyl_start
, dyl_length
, TRUE
,
900 if (ret
!= KERN_SUCCESS
) {
901 (void) vm_map_remove(map
,
903 map_addr
+ dyl_length
,
908 ret
= vm_map_copy_overwrite(map
, map_addr
, tmp
, FALSE
);
909 if (ret
!= KERN_SUCCESS
) {
910 vm_map_copy_discard(tmp
);
911 (void) vm_map_remove(map
,
913 map_addr
+ dyl_length
,
917 if (map_addr
!= dyl_start
)
918 myresult
.entry_point
+= (map_addr
- dyl_start
);
922 if (ret
== LOAD_SUCCESS
) {
923 result
->dynlinker
= TRUE
;
924 result
->entry_point
= myresult
.entry_point
;
928 vm_map_deallocate(copy_map
);
939 struct mach_header
*mach_header
,
940 unsigned long *file_offset
,
941 unsigned long *macho_size
,
946 struct vattr attr
, *atp
;
947 struct nameidata nid
, *ndp
;
948 struct proc
*p
= current_proc(); /* XXXX */
950 struct fat_arch fat_arch
;
951 int error
= LOAD_SUCCESS
;
954 struct mach_header mach_header
;
955 struct fat_header fat_header
;
958 off_t fsize
= (off_t
)0;
959 struct ucred
*cred
= p
->p_ucred
;
965 /* init the namei data to point the file user's program name */
966 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE
, path
, p
);
968 if (error
= namei(ndp
)) {
972 error
= LOAD_FAILURE
;
978 /* check for regular file */
979 if (vp
->v_type
!= VREG
) {
980 error
= LOAD_PROTECT
;
985 if (error
= VOP_GETATTR(vp
, &attr
, cred
, p
)) {
986 error
= LOAD_FAILURE
;
990 /* Check mount point */
991 if (vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) {
992 error
= LOAD_PROTECT
;
996 if ((vp
->v_mount
->mnt_flag
& MNT_NOSUID
) || (p
->p_flag
& P_TRACED
))
997 atp
->va_mode
&= ~(VSUID
| VSGID
);
999 /* check access. for root we have to see if any exec bit on */
1000 if (error
= VOP_ACCESS(vp
, VEXEC
, cred
, p
)) {
1001 error
= LOAD_PROTECT
;
1004 if ((atp
->va_mode
& (S_IXUSR
| S_IXGRP
| S_IXOTH
)) == 0) {
1005 error
= LOAD_PROTECT
;
1009 /* hold the vnode for the IO */
1010 if (UBCINFOEXISTS(vp
) && !ubc_hold(vp
)) {
1011 error
= LOAD_ENOENT
;
1015 /* try to open it */
1016 if (error
= VOP_OPEN(vp
, FREAD
, cred
, p
)) {
1017 error
= LOAD_PROTECT
;
1022 if(error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
, sizeof(header
), 0,
1023 UIO_SYSSPACE
, IO_NODELOCKED
, cred
, &resid
, p
)) {
1024 error
= LOAD_IOERROR
;
1028 if (header
.mach_header
.magic
== MH_MAGIC
)
1030 else if (header
.fat_header
.magic
== FAT_MAGIC
||
1031 header
.fat_header
.magic
== FAT_CIGAM
)
1034 error
= LOAD_BADMACHO
;
1039 /* Look up our architecture in the fat file. */
1040 error
= fatfile_getarch(vp
, (vm_offset_t
)(&header
.fat_header
), &fat_arch
);
1041 if (error
!= LOAD_SUCCESS
)
1044 /* Read the Mach-O header out of it */
1045 error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
.mach_header
,
1046 sizeof(header
.mach_header
), fat_arch
.offset
,
1047 UIO_SYSSPACE
, IO_NODELOCKED
, cred
, &resid
, p
);
1049 error
= LOAD_IOERROR
;
1053 /* Is this really a Mach-O? */
1054 if (header
.mach_header
.magic
!= MH_MAGIC
) {
1055 error
= LOAD_BADMACHO
;
1059 *file_offset
= fat_arch
.offset
;
1060 *macho_size
= fsize
= fat_arch
.size
;
1064 *macho_size
= fsize
= attr
.va_size
;
1067 *mach_header
= header
.mach_header
;
1070 ubc_setsize(vp
, fsize
); /* XXX why? */
1072 VOP_UNLOCK(vp
, 0, p
);
1077 VOP_UNLOCK(vp
, 0, p
);
1078 err2
= VOP_CLOSE(vp
, FREAD
, cred
, p
);