2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (C) 1988, 1989, NeXT, Inc.
25 * File: kern/mach_loader.c
26 * Author: Avadis Tevanian, Jr.
28 * Mach object file loader (kernel version, for now).
30 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
33 #include <sys/param.h>
34 #include <sys/vnode.h>
36 #include <sys/namei.h>
39 #include <sys/malloc.h>
40 #include <sys/mount.h>
41 #include <sys/fcntl.h>
44 #include <ufs/ufs/lockf.h>
45 #include <ufs/ufs/quota.h>
46 #include <ufs/ufs/inode.h>
48 #include <mach/mach_types.h>
50 #include <kern/mach_loader.h>
52 #include <mach-o/fat.h>
53 #include <mach-o/loader.h>
55 #include <kern/cpu_number.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_pager.h>
60 #include <vm/vnode_pager.h>
61 #include <mach/shared_memory_server.h>
62 #include <mach/vm_statistics.h>
65 * Prototypes of static functions.
72 struct mach_header
*header
,
73 unsigned long file_offset
,
74 unsigned long macho_size
,
76 unsigned long *lib_version
,
80 struct segment_command
*scp
,
82 unsigned long pager_offset
,
83 unsigned long macho_size
,
84 unsigned long end_of_file
,
89 struct thread_command
*tcp
,
93 struct thread_command
*tcp
,
99 unsigned long total_size
104 unsigned long total_size
,
105 vm_offset_t
*user_stack
110 unsigned long total_size
,
111 vm_offset_t
*entry_point
114 struct fvmlib_command
*lcp
,
119 struct fvmlib_command
*lcp
,
120 unsigned long *version
123 struct dylinker_command
*lcp
,
126 load_result_t
*result
130 struct mach_header
*mach_header
,
131 unsigned long *file_offset
,
132 unsigned long *macho_size
,
139 struct mach_header
*header
,
140 unsigned long file_offset
,
141 unsigned long macho_size
,
142 load_result_t
*result
148 load_result_t myresult
;
152 old_map
= current_map();
154 pmap
= get_task_pmap(current_task());
155 pmap_reference(pmap
);
157 pmap
= pmap_create((vm_size_t
) 0);
159 map
= vm_map_create(pmap
,
160 get_map_min(old_map
),
161 get_map_max(old_map
),
162 TRUE
); /**** FIXME ****/
167 *result
= (load_result_t
) { 0 };
169 lret
= parse_machfile(vp
, map
, header
, file_offset
, macho_size
,
170 0, (unsigned long *)0, result
);
172 if (lret
!= LOAD_SUCCESS
) {
173 vm_map_deallocate(map
); /* will lose pmap reference too */
177 * Commit to new map. First make sure that the current
178 * users of the task get done with it, and that we clean
179 * up the old contents of IPC and memory. The task is
180 * guaranteed to be single threaded upon return (us).
182 * Swap the new map for the old at the task level and at
183 * our activation. The latter consumes our new map reference
184 * but each leaves us responsible for the old_map reference.
185 * That lets us get off the pmap associated with it, and
186 * then we can release it.
188 task_halt(current_task());
190 old_map
= swap_task_map(current_task(), map
);
191 vm_map_deallocate(old_map
);
193 old_map
= swap_act_map(current_act(), map
);
196 pmap_switch(pmap
); /* Make sure we are using the new pmap */
199 vm_map_deallocate(old_map
);
200 return(LOAD_SUCCESS
);
204 extern vm_offset_t system_shared_region
;
211 struct mach_header
*header
,
212 unsigned long file_offset
,
213 unsigned long macho_size
,
215 unsigned long *lib_version
,
216 load_result_t
*result
219 struct machine_slot
*ms
;
221 struct load_command
*lcp
, *next
;
222 struct dylinker_command
*dlp
= 0;
225 vm_offset_t addr
, kl_addr
;
226 vm_size_t size
,kl_size
;
229 struct proc
*p
= current_proc(); /* XXXX */
234 * Break infinite recursion
237 return(LOAD_FAILURE
);
241 * Check to see if right machine type.
243 ms
= &machine_slot
[cpu_number()];
244 if ((header
->cputype
!= ms
->cpu_type
) ||
245 !check_cpu_subtype(header
->cpusubtype
))
246 return(LOAD_BADARCH
);
248 switch (header
->filetype
) {
254 return (LOAD_FAILURE
);
260 return (LOAD_FAILURE
);
265 return (LOAD_FAILURE
);
269 return (LOAD_FAILURE
);
273 * Get the pager for the file.
275 UBCINFOCHECK("parse_machfile", vp
);
276 pager
= (void *) ubc_getpager(vp
);
279 * Map portion that must be accessible directly into
282 if ((sizeof (struct mach_header
) + header
->sizeofcmds
) > macho_size
)
283 return(LOAD_BADMACHO
);
286 * Round size of Mach-O commands up to page boundry.
288 size
= round_page(sizeof (struct mach_header
) + header
->sizeofcmds
);
290 return(LOAD_BADMACHO
);
293 * Map the load commands into kernel memory.
298 ret = vm_allocate_with_pager(kernel_map, &addr, size, TRUE, pager,
301 ret = vm_map(kernel_map,&addr,size,0,TRUE, pager, file_offset, FALSE,
302 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
304 if (ret
!= KERN_SUCCESS
) {
305 return(LOAD_NOSPACE
);
310 kl_addr
= kalloc(size
);
313 printf("No space to readin load commands\n");
314 return(LOAD_NOSPACE
);
316 if(error
= vn_rdwr(UIO_READ
, vp
, addr
, size
, file_offset
,
317 UIO_SYSSPACE
, 0, p
->p_ucred
, &resid
, p
)) {
318 printf("Load command read over nfs failed\n");
319 if (kl_addr
) kfree(kl_addr
,kl_size
);
322 /* ubc_map(vp); */ /* NOT HERE */
326 * Scan through the commands, processing each one as necessary.
328 for (pass
= 1; pass
<= 2; pass
++) {
329 offset
= sizeof(struct mach_header
);
330 ncmds
= header
->ncmds
;
333 * Get a pointer to the command.
335 lcp
= (struct load_command
*)(addr
+ offset
);
336 offset
+= lcp
->cmdsize
;
339 * Check for valid lcp pointer by checking
342 if (offset
> header
->sizeofcmds
343 + sizeof(struct mach_header
)) {
345 vm_map_remove(kernel_map
, addr
, addr
+ size
);
347 if (kl_addr
) kfree(kl_addr
,kl_size
);
348 return(LOAD_BADMACHO
);
352 * Check for valid command.
359 (struct segment_command
*) lcp
,
362 (unsigned long)ubc_getsize(vp
),
369 ret
= load_thread((struct thread_command
*)lcp
,
375 ret
= load_unixthread(
376 (struct thread_command
*) lcp
,
382 ret
= load_fvmlib((struct fvmlib_command
*)lcp
,
390 (struct fvmlib_command
*)lcp
,
394 case LC_LOAD_DYLINKER
:
397 if (depth
== 1 || dlp
== 0)
398 dlp
= (struct dylinker_command
*)lcp
;
403 ret
= KERN_SUCCESS
;/* ignore other stuff */
405 if (ret
!= LOAD_SUCCESS
)
408 if (ret
!= LOAD_SUCCESS
)
411 if (ret
== LOAD_SUCCESS
&& dlp
!= 0) {
413 shared_region_mapping_t shared_region
;
414 struct shared_region_task_mappings map_info
;
415 shared_region_mapping_t next
;
418 vm_get_shared_region(current_task(), &shared_region
);
419 map_info
.self
= (vm_offset_t
)shared_region
;
420 shared_region_mapping_info(shared_region
,
421 &(map_info
.text_region
),
422 &(map_info
.text_size
),
423 &(map_info
.data_region
),
424 &(map_info
.data_size
),
425 &(map_info
.region_mappings
),
426 &(map_info
.client_base
),
427 &(map_info
.alternate_base
),
428 &(map_info
.alternate_next
),
429 &(map_info
.flags
), &next
);
431 if((map_info
.flags
& SHARED_REGION_FULL
) &&
432 (map_info
.flags
& SHARED_REGION_SYSTEM
)) {
433 if(map_info
.self
!= (vm_offset_t
)system_shared_region
) {
434 shared_region_mapping_ref(system_shared_region
);
435 vm_set_shared_region(current_task(),
436 system_shared_region
);
437 shared_region_mapping_dealloc(
438 (shared_region_mapping_t
)map_info
.self
);
445 addr
= map_info
.client_base
;
446 vm_map(map
, &addr
, map_info
.text_size
, 0,
447 (VM_MEMORY_SHARED_PMAP
<< 24)
449 map_info
.text_region
, 0, FALSE
,
450 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
451 addr
= map_info
.client_base
+ map_info
.text_size
;
452 vm_map(map
, &addr
, map_info
.data_size
,
454 map_info
.data_region
, 0, TRUE
,
455 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_SHARE
);
457 ret
= load_dylinker(dlp
, map
, depth
, result
);
460 if (kl_addr
) kfree(kl_addr
,kl_size
);
462 vm_map_remove(kernel_map
, addr
, addr
+ size
);
464 if ((ret
== LOAD_SUCCESS
) && (depth
== 1) &&
465 (result
->thread_count
== 0))
467 if (ret
== LOAD_SUCCESS
)
476 struct segment_command
*scp
,
478 unsigned long pager_offset
,
479 unsigned long macho_size
,
480 unsigned long end_of_file
,
482 load_result_t
*result
486 vm_offset_t map_addr
, map_offset
;
487 vm_size_t map_size
, seg_size
, delta_size
;
492 extern int print_map_addr
;
496 * Make sure what we get from the file is really ours (as specified
499 if (scp
->fileoff
+ scp
->filesize
> macho_size
)
500 return (LOAD_BADMACHO
);
502 seg_size
= round_page(scp
->vmsize
);
504 return(KERN_SUCCESS
);
507 * Round sizes to page size.
509 map_size
= round_page(scp
->filesize
);
510 map_addr
= trunc_page(scp
->vmaddr
);
512 map_offset
= pager_offset
+ scp
->fileoff
;
515 initprot
= (scp
->initprot
) & VM_PROT_ALL
;
516 maxprot
= (scp
->maxprot
) & VM_PROT_ALL
;
518 * Map a copy of the file into the address space.
521 &map_addr
, map_size
, (vm_offset_t
)0, FALSE
,
522 pager
, map_offset
, TRUE
,
525 if (ret
!= KERN_SUCCESS
)
526 return(LOAD_NOSPACE
);
530 printf("LSegment: Mapped addr= %x; size = %x\n", map_addr
, map_size
);
533 * If the file didn't end on a page boundary,
534 * we need to zero the leftover.
536 delta_size
= map_size
- scp
->filesize
;
538 if (delta_size
> 0) {
541 ret
= vm_allocate(kernel_map
, &tmp
, delta_size
, TRUE
);
542 if (ret
!= KERN_SUCCESS
)
543 return(LOAD_RESOURCE
);
545 if (copyout(tmp
, map_addr
+ scp
->filesize
,
547 (void) vm_deallocate(
548 kernel_map
, tmp
, delta_size
);
549 return(LOAD_FAILURE
);
552 (void) vm_deallocate(kernel_map
, tmp
, delta_size
);
558 * If the virtual size of the segment is greater
559 * than the size from the file, we need to allocate
560 * zero fill memory for the rest.
562 delta_size
= seg_size
- map_size
;
563 if (delta_size
> 0) {
564 vm_offset_t tmp
= map_addr
+ map_size
;
566 ret
= vm_allocate(map
, &tmp
, delta_size
, FALSE
);
567 if (ret
!= KERN_SUCCESS
)
568 return(LOAD_NOSPACE
);
572 * Set protection values. (Note: ignore errors!)
575 if (scp
->maxprot
!= VM_PROT_DEFAULT
) {
576 (void) vm_protect(map
,
580 if (scp
->initprot
!= VM_PROT_DEFAULT
) {
581 (void) vm_protect(map
,
583 FALSE
, scp
->initprot
);
585 if ( (scp
->fileoff
== 0) && (scp
->filesize
!= 0) )
586 result
->mach_header
= map_addr
;
587 return(LOAD_SUCCESS
);
593 struct thread_command
*tcp
,
594 load_result_t
*result
597 thread_t thread
= current_thread();
600 if (result
->thread_count
!= 0)
601 return (LOAD_FAILURE
);
603 ret
= load_threadstack(thread
,
604 (unsigned long *)(((vm_offset_t
)tcp
) +
605 sizeof(struct thread_command
)),
606 tcp
->cmdsize
- sizeof(struct thread_command
),
607 &result
->user_stack
);
608 if (ret
!= LOAD_SUCCESS
)
611 ret
= load_threadentry(thread
,
612 (unsigned long *)(((vm_offset_t
)tcp
) +
613 sizeof(struct thread_command
)),
614 tcp
->cmdsize
- sizeof(struct thread_command
),
615 &result
->entry_point
);
616 if (ret
!= LOAD_SUCCESS
)
619 ret
= load_threadstate(thread
,
620 (unsigned long *)(((vm_offset_t
)tcp
) +
621 sizeof(struct thread_command
)),
622 tcp
->cmdsize
- sizeof(struct thread_command
));
623 if (ret
!= LOAD_SUCCESS
)
626 result
->unixproc
= TRUE
;
627 result
->thread_count
++;
629 return(LOAD_SUCCESS
);
635 struct thread_command
*tcp
,
636 load_result_t
*result
643 if (result
->thread_count
== 0)
644 thread
= current_thread();
646 kret
= thread_create(current_task(), &thread
);
647 if (kret
!= KERN_SUCCESS
)
648 return(LOAD_RESOURCE
);
649 thread_deallocate(thread
);
652 lret
= load_threadstate(thread
,
653 (unsigned long *)(((vm_offset_t
)tcp
) +
654 sizeof(struct thread_command
)),
655 tcp
->cmdsize
- sizeof(struct thread_command
));
656 if (lret
!= LOAD_SUCCESS
)
659 if (result
->thread_count
== 0) {
660 lret
= load_threadstack(current_thread(),
661 (unsigned long *)(((vm_offset_t
)tcp
) +
662 sizeof(struct thread_command
)),
663 tcp
->cmdsize
- sizeof(struct thread_command
),
664 &result
->user_stack
);
665 if (lret
!= LOAD_SUCCESS
)
668 lret
= load_threadentry(current_thread(),
669 (unsigned long *)(((vm_offset_t
)tcp
) +
670 sizeof(struct thread_command
)),
671 tcp
->cmdsize
- sizeof(struct thread_command
),
672 &result
->entry_point
);
673 if (lret
!= LOAD_SUCCESS
)
677 * Resume thread now, note that this means that the thread
678 * commands should appear after all the load commands to
679 * be sure they don't reference anything not yet mapped.
682 thread_resume(thread
);
684 result
->thread_count
++;
686 return(LOAD_SUCCESS
);
694 unsigned long total_size
702 * Set the thread state.
705 while (total_size
> 0) {
708 total_size
-= (size
+2)*sizeof(unsigned long);
710 return(LOAD_BADMACHO
);
711 ret
= thread_setstatus(getact_thread(thread
), flavor
, ts
, size
);
712 if (ret
!= KERN_SUCCESS
)
713 return(LOAD_FAILURE
);
714 ts
+= size
; /* ts is a (unsigned long *) */
716 return(LOAD_SUCCESS
);
724 unsigned long total_size
,
725 vm_offset_t
*user_stack
733 * Set the thread state.
736 while (total_size
> 0) {
739 total_size
-= (size
+2)*sizeof(unsigned long);
741 return(LOAD_BADMACHO
);
742 ret
= thread_userstack(thread
, flavor
, ts
, size
, user_stack
);
743 if (ret
!= KERN_SUCCESS
)
744 return(LOAD_FAILURE
);
745 ts
+= size
; /* ts is a (unsigned long *) */
747 return(LOAD_SUCCESS
);
755 unsigned long total_size
,
756 vm_offset_t
*entry_point
764 * Set the thread state.
767 while (total_size
> 0) {
770 total_size
-= (size
+2)*sizeof(unsigned long);
772 return(LOAD_BADMACHO
);
773 ret
= thread_entrypoint(thread
, flavor
, ts
, size
, entry_point
);
774 if (ret
!= KERN_SUCCESS
)
775 return(LOAD_FAILURE
);
776 ts
+= size
; /* ts is a (unsigned long *) */
778 return(LOAD_SUCCESS
);
784 struct fvmlib_command
*lcp
,
792 struct mach_header header
;
793 unsigned long file_offset
;
794 unsigned long macho_size
;
795 unsigned long lib_version
;
796 load_result_t myresult
;
799 name
= (char *)lcp
+ lcp
->fvmlib
.name
.offset
;
801 * Check for a proper null terminated string.
805 if (p
>= (char *)lcp
+ lcp
->cmdsize
)
806 return(LOAD_BADMACHO
);
809 ret
= get_macho_vnode(name
, &header
, &file_offset
, &macho_size
, &vp
);
813 myresult
= (load_result_t
) { 0 };
818 ret
= parse_machfile(vp
, map
, &header
,
819 file_offset
, macho_size
,
820 depth
, &lib_version
, &myresult
);
822 if ((ret
== LOAD_SUCCESS
) &&
823 (lib_version
< lcp
->fvmlib
.minor_version
))
833 struct fvmlib_command
*lcp
,
834 unsigned long *version
837 *version
= lcp
->fvmlib
.minor_version
;
838 return(LOAD_SUCCESS
);
844 struct dylinker_command
*lcp
,
847 load_result_t
*result
853 struct mach_header header
;
854 unsigned long file_offset
;
855 unsigned long macho_size
;
857 load_result_t myresult
;
860 vm_offset_t dyl_start
, map_addr
;
861 vm_size_t dyl_length
;
863 name
= (char *)lcp
+ lcp
->name
.offset
;
865 * Check for a proper null terminated string.
869 if (p
>= (char *)lcp
+ lcp
->cmdsize
)
870 return(LOAD_BADMACHO
);
873 ret
= get_macho_vnode(name
, &header
, &file_offset
, &macho_size
, &vp
);
877 myresult
= (load_result_t
) { 0 };
883 copy_map
= vm_map_create(pmap_create(macho_size
),
884 get_map_min(map
), get_map_max( map
), TRUE
);
886 ret
= parse_machfile(vp
, copy_map
, &header
,
887 file_offset
, macho_size
,
888 depth
, 0, &myresult
);
893 if (get_map_nentries(copy_map
) > 0) {
895 dyl_start
= get_map_start(copy_map
);
896 dyl_length
= get_map_end(copy_map
) - dyl_start
;
898 map_addr
= dyl_start
;
899 ret
= vm_allocate(map
, &map_addr
, dyl_length
, FALSE
);
900 if (ret
!= KERN_SUCCESS
) {
901 ret
= vm_allocate(map
, &map_addr
, dyl_length
, TRUE
);
904 if (ret
!= KERN_SUCCESS
) {
909 ret
= vm_map_copyin(copy_map
, dyl_start
, dyl_length
, TRUE
,
911 if (ret
!= KERN_SUCCESS
) {
912 (void) vm_map_remove(map
,
914 map_addr
+ dyl_length
,
919 ret
= vm_map_copy_overwrite(map
, map_addr
, tmp
, FALSE
);
920 if (ret
!= KERN_SUCCESS
) {
921 vm_map_copy_discard(tmp
);
922 (void) vm_map_remove(map
,
924 map_addr
+ dyl_length
,
928 if (map_addr
!= dyl_start
)
929 myresult
.entry_point
+= (map_addr
- dyl_start
);
933 if (ret
== LOAD_SUCCESS
) {
934 result
->dynlinker
= TRUE
;
935 result
->entry_point
= myresult
.entry_point
;
939 vm_map_deallocate(copy_map
);
950 struct mach_header
*mach_header
,
951 unsigned long *file_offset
,
952 unsigned long *macho_size
,
957 struct vattr attr
, *atp
;
958 struct nameidata nid
, *ndp
;
959 struct proc
*p
= current_proc(); /* XXXX */
961 struct fat_arch fat_arch
;
965 struct mach_header mach_header
;
966 struct fat_header fat_header
;
969 error
= KERN_SUCCESS
;
974 /* init the namei data to point the file user's program name */
975 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
| SAVENAME
, UIO_SYSSPACE
, path
, p
);
977 if (error
= namei(ndp
))
982 /* check for regular file */
983 if (vp
->v_type
!= VREG
) {
989 if (error
= VOP_GETATTR(vp
, &attr
, p
->p_ucred
, p
))
992 /* Check mount point */
993 if (vp
->v_mount
->mnt_flag
& MNT_NOEXEC
) {
998 if ((vp
->v_mount
->mnt_flag
& MNT_NOSUID
) || (p
->p_flag
& P_TRACED
))
999 atp
->va_mode
&= ~(VSUID
| VSGID
);
1001 /* check access. for root we have to see if any exec bit on */
1002 if (error
= VOP_ACCESS(vp
, VEXEC
, p
->p_ucred
, p
))
1004 if ((atp
->va_mode
& (S_IXUSR
| S_IXGRP
| S_IXOTH
)) == 0) {
1009 /* try to open it */
1010 if (error
= VOP_OPEN(vp
, FREAD
, p
->p_ucred
, p
))
1012 if(error
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
, sizeof(header
), 0,
1013 UIO_SYSSPACE
, IO_NODELOCKED
, p
->p_ucred
, &resid
, p
))
1016 /* XXXX WMG - we should check for a short read of the header here */
1018 if (header
.mach_header
.magic
== MH_MAGIC
)
1020 else if (header
.fat_header
.magic
== FAT_MAGIC
||
1021 header
.fat_header
.magic
== FAT_CIGAM
)
1024 error
= LOAD_BADMACHO
;
1030 * Look up our architecture in the fat file.
1032 error
= fatfile_getarch(vp
, (vm_offset_t
)(&header
.fat_header
), &fat_arch
);
1033 if (error
!= LOAD_SUCCESS
) {
1037 * Read the Mach-O header out of it
1039 error
= vn_rdwr(UIO_READ
, vp
, &header
.mach_header
,
1040 sizeof(header
.mach_header
), fat_arch
.offset
,
1041 UIO_SYSSPACE
, IO_NODELOCKED
, p
->p_ucred
, &resid
, p
);
1043 error
= LOAD_FAILURE
;
1048 * Is this really a Mach-O?
1050 if (header
.mach_header
.magic
!= MH_MAGIC
) {
1051 error
= LOAD_BADMACHO
;
1055 *mach_header
= header
.mach_header
;
1056 *file_offset
= fat_arch
.offset
;
1057 *macho_size
= fat_arch
.size
;
1059 /* leaks otherwise - A.R */
1060 FREE_ZONE(ndp
->ni_cnd
.cn_pnbuf
, ndp
->ni_cnd
.cn_pnlen
, M_NAMEI
);
1062 /* i_lock exclusive panics, otherwise during pageins */
1063 VOP_UNLOCK(vp
, 0, p
);
1067 *mach_header
= header
.mach_header
;
1070 ubc_setsize(vp
, attr
.va_size
); /* XXX why? */
1071 *macho_size
= attr
.va_size
;
1073 /* leaks otherwise - A.R */
1074 FREE_ZONE(ndp
->ni_cnd
.cn_pnbuf
, ndp
->ni_cnd
.cn_pnlen
, M_NAMEI
);
1076 /* i_lock exclusive panics, otherwise during pageins */
1077 VOP_UNLOCK(vp
, 0, p
);
1083 * unlock and close the vnode, restore the old one, free the
1084 * pathname buf, and punt.
1086 VOP_UNLOCK(vp
, 0, p
);
1087 vn_close(vp
, FREAD
, p
->p_ucred
, p
);
1088 FREE_ZONE(ndp
->ni_cnd
.cn_pnbuf
, ndp
->ni_cnd
.cn_pnlen
, M_NAMEI
);
1092 * free the namei pathname buffer, and put the vnode
1093 * (which we don't yet have open).
1095 FREE_ZONE(ndp
->ni_cnd
.cn_pnbuf
, ndp
->ni_cnd
.cn_pnlen
, M_NAMEI
);