2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * Copyright (C) 1988, 1989, NeXT, Inc.
28 * File: kern/mach_loader.c
29 * Author: Avadis Tevanian, Jr.
31 * Mach object file loader (kernel version, for now).
33 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
36 #include <sys/param.h>
37 #include <sys/vnode.h>
39 #include <sys/namei.h>
42 #include <sys/malloc.h>
43 #include <sys/mount.h>
44 #include <sys/fcntl.h>
47 #include <mach/mach_types.h>
49 #include <kern/mach_loader.h>
51 #include <mach-o/fat.h>
52 #include <mach-o/loader.h>
54 #include <kern/cpu_number.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_pager.h>
59 #include <vm/vnode_pager.h>
60 #include <mach/vm_statistics.h>
62 #include <mach/shared_memory_server.h>
63 #include <vm/vm_shared_memory_server.h>
65 #include <machine/vmparam.h>
68 * Prototypes of static functions.
76 struct mach_header
*header
,
77 unsigned long file_offset
,
78 unsigned long macho_size
,
80 load_result_t
*result
,
81 boolean_t clean_regions
84 struct segment_command
*scp
,
86 unsigned long pager_offset
,
87 unsigned long macho_size
,
88 unsigned long end_of_file
,
93 struct thread_command
*tcp
,
98 struct thread_command
*tcp
,
100 load_result_t
*result
105 unsigned long total_size
110 unsigned long total_size
,
111 vm_offset_t
*user_stack
,
117 unsigned long total_size
,
118 vm_offset_t
*entry_point
121 struct dylinker_command
*lcp
,
123 thread_act_t thr_act
,
125 load_result_t
*result
,
126 boolean_t clean_regions
130 struct mach_header
*mach_header
,
131 unsigned long *file_offset
,
132 unsigned long *macho_size
,
139 struct mach_header
*header
,
140 unsigned long file_offset
,
141 unsigned long macho_size
,
142 load_result_t
*result
,
143 thread_act_t thr_act
,
145 boolean_t clean_regions
151 load_result_t myresult
;
154 boolean_t create_map
= TRUE
;
156 if (new_map
!= VM_MAP_NULL
) {
161 old_map
= current_map();
163 pmap
= get_task_pmap(current_task());
164 pmap_reference(pmap
);
166 pmap
= pmap_create((vm_size_t
) 0);
168 map
= vm_map_create(pmap
,
169 get_map_min(old_map
),
170 get_map_max(old_map
),
171 TRUE
); /**** FIXME ****/
178 *result
= (load_result_t
) { 0 };
180 lret
= parse_machfile(vp
, map
, thr_act
, header
, file_offset
, macho_size
,
181 0, result
, clean_regions
);
183 if (lret
!= LOAD_SUCCESS
) {
185 vm_map_deallocate(map
); /* will lose pmap reference too */
191 * Commit to new map. First make sure that the current
192 * users of the task get done with it, and that we clean
193 * up the old contents of IPC and memory. The task is
194 * guaranteed to be single threaded upon return (us).
196 * Swap the new map for the old at the task level and at
197 * our activation. The latter consumes our new map reference
198 * but each leaves us responsible for the old_map reference.
199 * That lets us get off the pmap associated with it, and
200 * then we can release it.
203 task_halt(current_task());
205 old_map
= swap_task_map(current_task(), map
);
206 vm_map_deallocate(old_map
);
208 old_map
= swap_act_map(current_act(), map
);
211 pmap_switch(pmap
); /* Make sure we are using the new pmap */
213 vm_map_deallocate(old_map
);
215 return(LOAD_SUCCESS
);
225 thread_act_t thr_act
,
226 struct mach_header
*header
,
227 unsigned long file_offset
,
228 unsigned long macho_size
,
230 load_result_t
*result
,
231 boolean_t clean_regions
234 struct machine_slot
*ms
;
236 struct load_command
*lcp
, *next
;
237 struct dylinker_command
*dlp
= 0;
239 load_return_t ret
= LOAD_SUCCESS
;
240 vm_offset_t addr
, kl_addr
;
241 vm_size_t size
,kl_size
;
244 struct proc
*p
= current_proc(); /* XXXX */
250 * Break infinite recursion
253 return(LOAD_FAILURE
);
255 task
= (task_t
)get_threadtask(thr_act
);
260 * Check to see if right machine type.
262 ms
= &machine_slot
[cpu_number()];
263 if ((header
->cputype
!= ms
->cpu_type
) ||
264 !check_cpu_subtype(header
->cpusubtype
))
265 return(LOAD_BADARCH
);
267 switch (header
->filetype
) {
273 return (LOAD_FAILURE
);
279 return (LOAD_FAILURE
);
284 return (LOAD_FAILURE
);
288 return (LOAD_FAILURE
);
292 * Get the pager for the file.
294 UBCINFOCHECK("parse_machfile", vp
);
295 pager
= (void *) ubc_getpager(vp
);
298 * Map portion that must be accessible directly into
301 if ((sizeof (struct mach_header
) + header
->sizeofcmds
) > macho_size
)
302 return(LOAD_BADMACHO
);
305 * Round size of Mach-O commands up to page boundry.
307 size
= round_page_32(sizeof (struct mach_header
) + header
->sizeofcmds
);
309 return(LOAD_BADMACHO
);
312 * Map the load commands into kernel memory.
316 kl_addr
= kalloc(size
);
319 return(LOAD_NOSPACE
);
321 if(error
= vn_rdwr(UIO_READ
, vp
, addr
, size
, file_offset
,
322 UIO_SYSSPACE
, 0, p
->p_ucred
, &resid
, p
)) {
324 kfree(kl_addr
, kl_size
);
327 /* ubc_map(vp); */ /* NOT HERE */
330 * Scan through the commands, processing each one as necessary.
332 for (pass
= 1; pass
<= 2; pass
++) {
333 offset
= sizeof(struct mach_header
);
334 ncmds
= header
->ncmds
;
337 * Get a pointer to the command.
339 lcp
= (struct load_command
*)(addr
+ offset
);
340 offset
+= lcp
->cmdsize
;
343 * Check for valid lcp pointer by checking
346 if (offset
> header
->sizeofcmds
347 + sizeof(struct mach_header
)) {
349 kfree(kl_addr
, kl_size
);
350 return(LOAD_BADMACHO
);
354 * Check for valid command.
361 (struct segment_command
*) lcp
,
364 (unsigned long)ubc_getsize(vp
),
371 ret
= load_thread((struct thread_command
*)lcp
, thr_act
,
377 ret
= load_unixthread(
378 (struct thread_command
*) lcp
, thr_act
,
381 case LC_LOAD_DYLINKER
:
384 if ((depth
== 1) && (dlp
== 0))
385 dlp
= (struct dylinker_command
*)lcp
;
390 ret
= LOAD_SUCCESS
;/* ignore other stuff */
392 if (ret
!= LOAD_SUCCESS
)
395 if (ret
!= LOAD_SUCCESS
)
398 if ((ret
== LOAD_SUCCESS
) && (depth
== 1)) {
400 shared_region_mapping_t shared_region
;
401 struct shared_region_task_mappings map_info
;
402 shared_region_mapping_t next
;
405 vm_get_shared_region(task
, &shared_region
);
406 map_info
.self
= (vm_offset_t
)shared_region
;
407 shared_region_mapping_info(shared_region
,
408 &(map_info
.text_region
),
409 &(map_info
.text_size
),
410 &(map_info
.data_region
),
411 &(map_info
.data_size
),
412 &(map_info
.region_mappings
),
413 &(map_info
.client_base
),
414 &(map_info
.alternate_base
),
415 &(map_info
.alternate_next
),
418 &(map_info
.flags
), &next
);
420 if((map_info
.flags
& SHARED_REGION_FULL
) ||
421 (map_info
.flags
& SHARED_REGION_STALE
)) {
422 shared_region_mapping_t system_region
;
423 system_region
= lookup_default_shared_region(
424 map_info
.fs_base
, map_info
.system
);
425 if((map_info
.self
!= (vm_offset_t
)system_region
) &&
426 (map_info
.flags
& SHARED_REGION_SYSTEM
)) {
427 if(system_region
== NULL
) {
428 shared_file_boot_time_init(
429 map_info
.fs_base
, map_info
.system
);
431 vm_set_shared_region(task
, system_region
);
433 shared_region_mapping_dealloc(
434 (shared_region_mapping_t
)map_info
.self
);
436 } else if (map_info
.flags
& SHARED_REGION_SYSTEM
) {
437 shared_region_mapping_dealloc(system_region
);
438 shared_file_boot_time_init(
439 map_info
.fs_base
, map_info
.system
);
440 shared_region_mapping_dealloc(
441 (shared_region_mapping_t
)map_info
.self
);
443 shared_region_mapping_dealloc(system_region
);
449 p
->p_flag
|= P_NOSHLIB
; /* no shlibs in use */
450 addr
= map_info
.client_base
;
452 vm_map(map
, &addr
, map_info
.text_size
,
454 map_info
.text_region
, 0, FALSE
,
455 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
457 vm_map(map
, &addr
, map_info
.text_size
, 0,
458 (VM_MEMORY_SHARED_PMAP
<< 24)
460 map_info
.text_region
, 0, FALSE
,
461 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
463 addr
= map_info
.client_base
+ map_info
.text_size
;
464 vm_map(map
, &addr
, map_info
.data_size
,
466 map_info
.data_region
, 0, TRUE
,
467 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
470 /* this should be fleshed out for the general case */
471 /* but this is not necessary for now. Indeed we */
472 /* are handling the com page inside of the */
473 /* shared_region mapping create calls for now for */
474 /* simplicities sake. If more general support is */
475 /* needed the code to manipulate the shared range */
476 /* chain can be pulled out and moved to the callers*/
477 shared_region_mapping_info(next
,
478 &(map_info
.text_region
),
479 &(map_info
.text_size
),
480 &(map_info
.data_region
),
481 &(map_info
.data_size
),
482 &(map_info
.region_mappings
),
483 &(map_info
.client_base
),
484 &(map_info
.alternate_base
),
485 &(map_info
.alternate_next
),
488 &(map_info
.flags
), &next
);
490 addr
= map_info
.client_base
;
491 vm_map(map
, &addr
, map_info
.text_size
,
493 map_info
.text_region
, 0, FALSE
,
494 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
498 ret
= load_dylinker(dlp
, map
, thr_act
,
499 depth
, result
, clean_regions
);
504 kfree(kl_addr
, kl_size
);
506 if ((ret
== LOAD_SUCCESS
) && (depth
== 1) &&
507 (result
->thread_count
== 0))
509 if (ret
== LOAD_SUCCESS
)
518 struct segment_command
*scp
,
520 unsigned long pager_offset
,
521 unsigned long macho_size
,
522 unsigned long end_of_file
,
524 load_result_t
*result
528 vm_offset_t map_addr
, map_offset
;
529 vm_size_t map_size
, seg_size
, delta_size
;
534 extern int print_map_addr
;
538 * Make sure what we get from the file is really ours (as specified
541 if (scp
->fileoff
+ scp
->filesize
> macho_size
)
542 return (LOAD_BADMACHO
);
544 seg_size
= round_page_32(scp
->vmsize
);
546 return(KERN_SUCCESS
);
549 * Round sizes to page size.
551 map_size
= round_page_32(scp
->filesize
);
552 map_addr
= trunc_page_32(scp
->vmaddr
);
554 map_offset
= pager_offset
+ scp
->fileoff
;
557 initprot
= (scp
->initprot
) & VM_PROT_ALL
;
558 maxprot
= (scp
->maxprot
) & VM_PROT_ALL
;
560 * Map a copy of the file into the address space.
563 &map_addr
, map_size
, (vm_offset_t
)0, FALSE
,
564 pager
, map_offset
, TRUE
,
567 if (ret
!= KERN_SUCCESS
)
568 return(LOAD_NOSPACE
);
572 printf("LSegment: Mapped addr= %x; size = %x\n", map_addr
, map_size
);
575 * If the file didn't end on a page boundary,
576 * we need to zero the leftover.
578 delta_size
= map_size
- scp
->filesize
;
580 if (delta_size
> 0) {
583 ret
= vm_allocate(kernel_map
, &tmp
, delta_size
, TRUE
);
584 if (ret
!= KERN_SUCCESS
)
585 return(LOAD_RESOURCE
);
587 if (copyout(tmp
, map_addr
+ scp
->filesize
,
589 (void) vm_deallocate(
590 kernel_map
, tmp
, delta_size
);
591 return(LOAD_FAILURE
);
594 (void) vm_deallocate(kernel_map
, tmp
, delta_size
);
600 * If the virtual size of the segment is greater
601 * than the size from the file, we need to allocate
602 * zero fill memory for the rest.
604 delta_size
= seg_size
- map_size
;
605 if (delta_size
> 0) {
606 vm_offset_t tmp
= map_addr
+ map_size
;
608 ret
= vm_allocate(map
, &tmp
, delta_size
, FALSE
);
609 if (ret
!= KERN_SUCCESS
)
610 return(LOAD_NOSPACE
);
614 * Set protection values. (Note: ignore errors!)
617 if (scp
->maxprot
!= VM_PROT_DEFAULT
) {
618 (void) vm_protect(map
,
622 if (scp
->initprot
!= VM_PROT_DEFAULT
) {
623 (void) vm_protect(map
,
625 FALSE
, scp
->initprot
);
627 if ( (scp
->fileoff
== 0) && (scp
->filesize
!= 0) )
628 result
->mach_header
= map_addr
;
629 return(LOAD_SUCCESS
);
635 struct thread_command
*tcp
,
636 thread_act_t thr_act
,
637 load_result_t
*result
640 thread_t thread
= current_thread();
644 if (result
->thread_count
!= 0)
645 return (LOAD_FAILURE
);
647 thread
= getshuttle_thread(thr_act
);
648 ret
= load_threadstack(thread
,
649 (unsigned long *)(((vm_offset_t
)tcp
) +
650 sizeof(struct thread_command
)),
651 tcp
->cmdsize
- sizeof(struct thread_command
),
654 if (ret
!= LOAD_SUCCESS
)
658 result
->customstack
= 1;
660 result
->customstack
= 0;
661 ret
= load_threadentry(thread
,
662 (unsigned long *)(((vm_offset_t
)tcp
) +
663 sizeof(struct thread_command
)),
664 tcp
->cmdsize
- sizeof(struct thread_command
),
665 &result
->entry_point
);
666 if (ret
!= LOAD_SUCCESS
)
669 ret
= load_threadstate(thread
,
670 (unsigned long *)(((vm_offset_t
)tcp
) +
671 sizeof(struct thread_command
)),
672 tcp
->cmdsize
- sizeof(struct thread_command
));
673 if (ret
!= LOAD_SUCCESS
)
676 result
->unixproc
= TRUE
;
677 result
->thread_count
++;
679 return(LOAD_SUCCESS
);
685 struct thread_command
*tcp
,
686 thread_act_t thr_act
,
687 load_result_t
*result
696 task
= get_threadtask(thr_act
);
697 thread
= getshuttle_thread(thr_act
);
699 /* if count is 0; same as thr_act */
700 if (result
->thread_count
!= 0) {
701 kret
= thread_create(task
, &thread
);
702 if (kret
!= KERN_SUCCESS
)
703 return(LOAD_RESOURCE
);
704 thread_deallocate(thread
);
707 lret
= load_threadstate(thread
,
708 (unsigned long *)(((vm_offset_t
)tcp
) +
709 sizeof(struct thread_command
)),
710 tcp
->cmdsize
- sizeof(struct thread_command
));
711 if (lret
!= LOAD_SUCCESS
)
714 if (result
->thread_count
== 0) {
715 lret
= load_threadstack(thread
,
716 (unsigned long *)(((vm_offset_t
)tcp
) +
717 sizeof(struct thread_command
)),
718 tcp
->cmdsize
- sizeof(struct thread_command
),
722 result
->customstack
= 1;
724 result
->customstack
= 0;
726 if (lret
!= LOAD_SUCCESS
)
729 lret
= load_threadentry(thread
,
730 (unsigned long *)(((vm_offset_t
)tcp
) +
731 sizeof(struct thread_command
)),
732 tcp
->cmdsize
- sizeof(struct thread_command
),
733 &result
->entry_point
);
734 if (lret
!= LOAD_SUCCESS
)
738 * Resume thread now, note that this means that the thread
739 * commands should appear after all the load commands to
740 * be sure they don't reference anything not yet mapped.
743 thread_resume(thread
);
745 result
->thread_count
++;
747 return(LOAD_SUCCESS
);
755 unsigned long total_size
763 * Set the thread state.
766 while (total_size
> 0) {
769 total_size
-= (size
+2)*sizeof(unsigned long);
771 return(LOAD_BADMACHO
);
772 ret
= thread_setstatus(getact_thread(thread
), flavor
, ts
, size
);
773 if (ret
!= KERN_SUCCESS
)
774 return(LOAD_FAILURE
);
775 ts
+= size
; /* ts is a (unsigned long *) */
777 return(LOAD_SUCCESS
);
785 unsigned long total_size
,
786 vm_offset_t
*user_stack
,
794 while (total_size
> 0) {
797 total_size
-= (size
+2)*sizeof(unsigned long);
799 return(LOAD_BADMACHO
);
800 *user_stack
= USRSTACK
;
801 ret
= thread_userstack(thread
, flavor
, ts
, size
,
802 user_stack
, customstack
);
803 if (ret
!= KERN_SUCCESS
)
804 return(LOAD_FAILURE
);
805 ts
+= size
; /* ts is a (unsigned long *) */
807 return(LOAD_SUCCESS
);
815 unsigned long total_size
,
816 vm_offset_t
*entry_point
824 * Set the thread state.
827 while (total_size
> 0) {
830 total_size
-= (size
+2)*sizeof(unsigned long);
832 return(LOAD_BADMACHO
);
833 ret
= thread_entrypoint(thread
, flavor
, ts
, size
, entry_point
);
834 if (ret
!= KERN_SUCCESS
)
835 return(LOAD_FAILURE
);
836 ts
+= size
; /* ts is a (unsigned long *) */
838 return(LOAD_SUCCESS
);
845 struct dylinker_command
*lcp
,
847 thread_act_t thr_act
,
849 load_result_t
*result
,
850 boolean_t clean_regions
856 struct mach_header header
;
857 unsigned long file_offset
;
858 unsigned long macho_size
;
860 load_result_t myresult
;
863 vm_offset_t dyl_start
, map_addr
;
864 vm_size_t dyl_length
;
866 name
= (char *)lcp
+ lcp
->name
.offset
;
868 * Check for a proper null terminated string.
872 if (p
>= (char *)lcp
+ lcp
->cmdsize
)
873 return(LOAD_BADMACHO
);
876 ret
= get_macho_vnode(name
, &header
, &file_offset
, &macho_size
, &vp
);
880 myresult
= (load_result_t
) { 0 };
886 copy_map
= vm_map_create(pmap_create(macho_size
),
887 get_map_min(map
), get_map_max( map
), TRUE
);
889 ret
= parse_machfile(vp
, copy_map
, thr_act
, &header
,
890 file_offset
, macho_size
,
891 depth
, &myresult
, clean_regions
);
896 if (get_map_nentries(copy_map
) > 0) {
898 dyl_start
= get_map_start(copy_map
);
899 dyl_length
= get_map_end(copy_map
) - dyl_start
;
901 map_addr
= dyl_start
;
902 ret
= vm_allocate(map
, &map_addr
, dyl_length
, FALSE
);
903 if (ret
!= KERN_SUCCESS
) {
904 ret
= vm_allocate(map
, &map_addr
, dyl_length
, TRUE
);
907 if (ret
!= KERN_SUCCESS
) {
912 ret
= vm_map_copyin(copy_map
, dyl_start
, dyl_length
, TRUE
,
914 if (ret
!= KERN_SUCCESS
) {
915 (void) vm_map_remove(map
,
917 map_addr
+ dyl_length
,
922 ret
= vm_map_copy_overwrite(map
, map_addr
, tmp
, FALSE
);
923 if (ret
!= KERN_SUCCESS
) {
924 vm_map_copy_discard(tmp
);
925 (void) vm_map_remove(map
,
927 map_addr
+ dyl_length
,
931 if (map_addr
!= dyl_start
)
932 myresult
.entry_point
+= (map_addr
- dyl_start
);
936 if (ret
== LOAD_SUCCESS
) {
937 result
->dynlinker
= TRUE
;
938 result
->entry_point
= myresult
.entry_point
;
942 vm_map_deallocate(copy_map
);
953 struct mach_header
*mach_header
,
954 unsigned long *file_offset
,
955 unsigned long *macho_size
,
960 struct vattr attr
, *atp
;
961 struct nameidata nid
, *ndp
;
962 struct proc
*p
= current_proc(); /* XXXX */
964 struct fat_arch fat_arch
;
965 int error
= KERN_SUCCESS
;
968 struct mach_header mach_header
;
969 struct fat_header fat_header
;
972 off_t fsize
= (off_t
)0;
973 struct ucred
*cred
= p
->p_ucred
;
978 /* init the namei data to point the file user's program name */
979 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE
, path
, p
);
981 if (error
= namei(ndp
))
986 /* check for regular file */
987 if (vp
->v_type
!= VREG
) {
993 if (error
= VOP_GETATTR(vp
, &attr
, cred
, p
))
996 /* Check mount point */
997 if (vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) {
1002 if ((vp
->v_mount
->mnt_flag
& MNT_NOSUID
) || (p
->p_flag
& P_TRACED
))
1003 atp
->va_mode
&= ~(VSUID
| VSGID
);
1005 /* check access. for root we have to see if any exec bit on */
1006 if (error
= VOP_ACCESS(vp
, VEXEC
, cred
, p
))
1008 if ((atp
->va_mode
& (S_IXUSR
| S_IXGRP
| S_IXOTH
)) == 0) {
1013 /* hold the vnode for the IO */
1014 if (UBCINFOEXISTS(vp
) && !ubc_hold(vp
)) {
1019 /* try to open it */
1020 if (error
= VOP_OPEN(vp
, FREAD
, cred
, p
)) {
1025 if(error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
, sizeof(header
), 0,
1026 UIO_SYSSPACE
, IO_NODELOCKED
, cred
, &resid
, p
))
1029 if (header
.mach_header
.magic
== MH_MAGIC
)
1031 else if (header
.fat_header
.magic
== FAT_MAGIC
||
1032 header
.fat_header
.magic
== FAT_CIGAM
)
1035 error
= LOAD_BADMACHO
;
1040 /* Look up our architecture in the fat file. */
1041 error
= fatfile_getarch(vp
, (vm_offset_t
)(&header
.fat_header
), &fat_arch
);
1042 if (error
!= LOAD_SUCCESS
)
1045 /* Read the Mach-O header out of it */
1046 error
= vn_rdwr(UIO_READ
, vp
, &header
.mach_header
,
1047 sizeof(header
.mach_header
), fat_arch
.offset
,
1048 UIO_SYSSPACE
, IO_NODELOCKED
, cred
, &resid
, p
);
1050 error
= LOAD_FAILURE
;
1054 /* Is this really a Mach-O? */
1055 if (header
.mach_header
.magic
!= MH_MAGIC
) {
1056 error
= LOAD_BADMACHO
;
1060 *file_offset
= fat_arch
.offset
;
1061 *macho_size
= fsize
= fat_arch
.size
;
1065 *macho_size
= fsize
= attr
.va_size
;
1068 *mach_header
= header
.mach_header
;
1071 ubc_setsize(vp
, fsize
); /* XXX why? */
1073 VOP_UNLOCK(vp
, 0, p
);
1078 VOP_UNLOCK(vp
, 0, p
);
1079 error
= VOP_CLOSE(vp
, FREAD
, cred
, p
);