2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * External virtual filesystem routines
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/proc_internal.h>
73 #include <sys/kauth.h>
74 #include <sys/mount.h>
75 #include <sys/mount_internal.h>
77 #include <sys/vnode_internal.h>
79 #include <sys/namei.h>
80 #include <sys/ucred.h>
82 #include <sys/errno.h>
83 #include <sys/malloc.h>
84 #include <sys/domain.h>
86 #include <sys/syslog.h>
89 #include <sys/sysctl.h>
90 #include <sys/filedesc.h>
91 #include <sys/fsevents.h>
93 #include <sys/lockf.h>
94 #include <sys/xattr.h>
96 #include <kern/assert.h>
97 #include <kern/kalloc.h>
99 #include <miscfs/specfs/specdev.h>
101 #include <mach/mach_types.h>
102 #include <mach/memory_object_types.h>
111 #define THREAD_SAFE_FS(VP) \
112 ((VP)->v_unsafefs ? 0 : 1)
114 #define NATIVE_XATTR(VP) \
115 ((VP)->v_mount ? (VP)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR : 0)
117 static void xattrfile_remove(vnode_t dvp
, const char * basename
, vfs_context_t context
,
118 int thread_safe
, int force
);
119 static void xattrfile_setattr(vnode_t dvp
, const char * basename
, struct vnode_attr
* vap
,
120 vfs_context_t context
, int thread_safe
);
124 vnode_setneedinactive(vnode_t vp
)
129 vp
->v_lflag
|= VL_NEEDINACTIVE
;
135 lock_fsnode(vnode_t vp
, int *funnel_state
)
138 *funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
140 if (vp
->v_unsafefs
) {
141 if (vp
->v_unsafefs
->fsnodeowner
== current_thread()) {
142 vp
->v_unsafefs
->fsnode_count
++;
144 lck_mtx_lock(&vp
->v_unsafefs
->fsnodelock
);
146 if (vp
->v_lflag
& (VL_TERMWANT
| VL_TERMINATE
| VL_DEAD
)) {
147 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
150 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
153 vp
->v_unsafefs
->fsnodeowner
= current_thread();
154 vp
->v_unsafefs
->fsnode_count
= 1;
162 unlock_fsnode(vnode_t vp
, int *funnel_state
)
164 if (vp
->v_unsafefs
) {
165 if (--vp
->v_unsafefs
->fsnode_count
== 0) {
166 vp
->v_unsafefs
->fsnodeowner
= NULL
;
167 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
171 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
176 /* ====================================================================== */
177 /* ************ EXTERNAL KERNEL APIS ********************************** */
178 /* ====================================================================== */
181 * prototypes for exported VFS operations
184 VFS_MOUNT(struct mount
* mp
, vnode_t devvp
, user_addr_t data
, vfs_context_t context
)
188 int funnel_state
= 0;
190 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_mount
== 0))
193 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
197 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
200 if (vfs_context_is64bit(context
)) {
201 if (vfs_64bitready(mp
)) {
202 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, context
);
209 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, context
);
213 (void) thread_funnel_set(kernel_flock
, funnel_state
);
219 VFS_START(struct mount
* mp
, int flags
, vfs_context_t context
)
223 int funnel_state
= 0;
225 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_start
== 0))
228 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
231 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
233 error
= (*mp
->mnt_op
->vfs_start
)(mp
, flags
, context
);
235 (void) thread_funnel_set(kernel_flock
, funnel_state
);
241 VFS_UNMOUNT(struct mount
*mp
, int flags
, vfs_context_t context
)
245 int funnel_state
= 0;
247 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_unmount
== 0))
250 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
253 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
255 error
= (*mp
->mnt_op
->vfs_unmount
)(mp
, flags
, context
);
257 (void) thread_funnel_set(kernel_flock
, funnel_state
);
263 VFS_ROOT(struct mount
* mp
, struct vnode
** vpp
, vfs_context_t context
)
267 int funnel_state
= 0;
268 struct vfs_context acontext
;
270 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_root
== 0))
273 if (context
== NULL
) {
274 acontext
.vc_proc
= current_proc();
275 acontext
.vc_ucred
= kauth_cred_get();
278 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
281 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
283 error
= (*mp
->mnt_op
->vfs_root
)(mp
, vpp
, context
);
285 (void) thread_funnel_set(kernel_flock
, funnel_state
);
291 VFS_QUOTACTL(struct mount
*mp
, int cmd
, uid_t uid
, caddr_t datap
, vfs_context_t context
)
295 int funnel_state
= 0;
297 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_quotactl
== 0))
300 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
303 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
305 error
= (*mp
->mnt_op
->vfs_quotactl
)(mp
, cmd
, uid
, datap
, context
);
307 (void) thread_funnel_set(kernel_flock
, funnel_state
);
313 VFS_GETATTR(struct mount
*mp
, struct vfs_attr
*vfa
, vfs_context_t context
)
317 int funnel_state
= 0;
318 struct vfs_context acontext
;
320 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_getattr
== 0))
323 if (context
== NULL
) {
324 acontext
.vc_proc
= current_proc();
325 acontext
.vc_ucred
= kauth_cred_get();
328 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
331 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
333 error
= (*mp
->mnt_op
->vfs_getattr
)(mp
, vfa
, context
);
335 (void) thread_funnel_set(kernel_flock
, funnel_state
);
341 VFS_SETATTR(struct mount
*mp
, struct vfs_attr
*vfa
, vfs_context_t context
)
345 int funnel_state
= 0;
346 struct vfs_context acontext
;
348 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_setattr
== 0))
351 if (context
== NULL
) {
352 acontext
.vc_proc
= current_proc();
353 acontext
.vc_ucred
= kauth_cred_get();
356 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
359 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
361 error
= (*mp
->mnt_op
->vfs_setattr
)(mp
, vfa
, context
);
363 (void) thread_funnel_set(kernel_flock
, funnel_state
);
369 VFS_SYNC(struct mount
*mp
, int flags
, vfs_context_t context
)
373 int funnel_state
= 0;
374 struct vfs_context acontext
;
376 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_sync
== 0))
379 if (context
== NULL
) {
380 acontext
.vc_proc
= current_proc();
381 acontext
.vc_ucred
= kauth_cred_get();
384 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
387 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
389 error
= (*mp
->mnt_op
->vfs_sync
)(mp
, flags
, context
);
391 (void) thread_funnel_set(kernel_flock
, funnel_state
);
397 VFS_VGET(struct mount
* mp
, ino64_t ino
, struct vnode
**vpp
, vfs_context_t context
)
401 int funnel_state
= 0;
402 struct vfs_context acontext
;
404 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_vget
== 0))
407 if (context
== NULL
) {
408 acontext
.vc_proc
= current_proc();
409 acontext
.vc_ucred
= kauth_cred_get();
412 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
415 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
417 error
= (*mp
->mnt_op
->vfs_vget
)(mp
, ino
, vpp
, context
);
419 (void) thread_funnel_set(kernel_flock
, funnel_state
);
425 VFS_FHTOVP(struct mount
* mp
, int fhlen
, unsigned char * fhp
, vnode_t
* vpp
, vfs_context_t context
)
429 int funnel_state
= 0;
430 struct vfs_context acontext
;
432 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_fhtovp
== 0))
435 if (context
== NULL
) {
436 acontext
.vc_proc
= current_proc();
437 acontext
.vc_ucred
= kauth_cred_get();
440 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
443 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
445 error
= (*mp
->mnt_op
->vfs_fhtovp
)(mp
, fhlen
, fhp
, vpp
, context
);
447 (void) thread_funnel_set(kernel_flock
, funnel_state
);
453 VFS_VPTOFH(struct vnode
* vp
, int *fhlenp
, unsigned char * fhp
, vfs_context_t context
)
457 int funnel_state
= 0;
458 struct vfs_context acontext
;
460 if ((vp
->v_mount
== dead_mountp
) || (vp
->v_mount
->mnt_op
->vfs_vptofh
== 0))
463 if (context
== NULL
) {
464 acontext
.vc_proc
= current_proc();
465 acontext
.vc_ucred
= kauth_cred_get();
468 thread_safe
= THREAD_SAFE_FS(vp
);
471 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
473 error
= (*vp
->v_mount
->mnt_op
->vfs_vptofh
)(vp
, fhlenp
, fhp
, context
);
475 (void) thread_funnel_set(kernel_flock
, funnel_state
);
481 /* returns a copy of vfs type name for the mount_t */
483 vfs_name(mount_t mp
, char * buffer
)
485 strncpy(buffer
, mp
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
488 /* returns vfs type number for the mount_t */
490 vfs_typenum(mount_t mp
)
492 return(mp
->mnt_vtable
->vfc_typenum
);
496 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
498 vfs_flags(mount_t mp
)
500 return((uint64_t)(mp
->mnt_flag
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
)));
503 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
505 vfs_setflags(mount_t mp
, uint64_t flags
)
507 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
509 mp
->mnt_flag
|= lflags
;
512 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
514 vfs_clearflags(mount_t mp
, uint64_t flags
)
516 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
518 mp
->mnt_flag
&= ~lflags
;
521 /* Is the mount_t ronly and upgrade read/write requested? */
523 vfs_iswriteupgrade(mount_t mp
) /* ronly && MNTK_WANTRDWR */
525 return ((mp
->mnt_flag
& MNT_RDONLY
) && (mp
->mnt_kern_flag
& MNTK_WANTRDWR
));
529 /* Is the mount_t mounted ronly */
531 vfs_isrdonly(mount_t mp
)
533 return (mp
->mnt_flag
& MNT_RDONLY
);
536 /* Is the mount_t mounted for filesystem synchronous writes? */
538 vfs_issynchronous(mount_t mp
)
540 return (mp
->mnt_flag
& MNT_SYNCHRONOUS
);
543 /* Is the mount_t mounted read/write? */
545 vfs_isrdwr(mount_t mp
)
547 return ((mp
->mnt_flag
& MNT_RDONLY
) == 0);
551 /* Is mount_t marked for update (ie MNT_UPDATE) */
553 vfs_isupdate(mount_t mp
)
555 return (mp
->mnt_flag
& MNT_UPDATE
);
559 /* Is mount_t marked for reload (ie MNT_RELOAD) */
561 vfs_isreload(mount_t mp
)
563 return ((mp
->mnt_flag
& MNT_UPDATE
) && (mp
->mnt_flag
& MNT_RELOAD
));
566 /* Is mount_t marked for reload (ie MNT_FORCE) */
568 vfs_isforce(mount_t mp
)
570 if ((mp
->mnt_flag
& MNT_FORCE
) || (mp
->mnt_kern_flag
& MNTK_FRCUNMOUNT
))
577 vfs_64bitready(mount_t mp
)
579 if ((mp
->mnt_vtable
->vfc_64bitready
))
586 vfs_authopaque(mount_t mp
)
588 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE
))
595 vfs_authopaqueaccess(mount_t mp
)
597 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE_ACCESS
))
604 vfs_setauthopaque(mount_t mp
)
607 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE
;
612 vfs_setauthopaqueaccess(mount_t mp
)
615 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE_ACCESS
;
620 vfs_clearauthopaque(mount_t mp
)
623 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE
;
628 vfs_clearauthopaqueaccess(mount_t mp
)
631 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE_ACCESS
;
636 vfs_setextendedsecurity(mount_t mp
)
639 mp
->mnt_kern_flag
|= MNTK_EXTENDED_SECURITY
;
644 vfs_clearextendedsecurity(mount_t mp
)
647 mp
->mnt_kern_flag
&= ~MNTK_EXTENDED_SECURITY
;
652 vfs_extendedsecurity(mount_t mp
)
654 return(mp
->mnt_kern_flag
& MNTK_EXTENDED_SECURITY
);
657 /* returns the max size of short symlink in this mount_t */
659 vfs_maxsymlen(mount_t mp
)
661 return(mp
->mnt_maxsymlinklen
);
664 /* set max size of short symlink on mount_t */
666 vfs_setmaxsymlen(mount_t mp
, uint32_t symlen
)
668 mp
->mnt_maxsymlinklen
= symlen
;
671 /* return a pointer to the RO vfs_statfs associated with mount_t */
673 vfs_statfs(mount_t mp
)
675 return(&mp
->mnt_vfsstat
);
679 vfs_getattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
684 if ((error
= VFS_GETATTR(mp
, vfa
, ctx
)) != 0)
688 * If we have a filesystem create time, use it to default some others.
690 if (VFSATTR_IS_SUPPORTED(vfa
, f_create_time
)) {
691 if (VFSATTR_IS_ACTIVE(vfa
, f_modify_time
) && !VFSATTR_IS_SUPPORTED(vfa
, f_modify_time
))
692 VFSATTR_RETURN(vfa
, f_modify_time
, vfa
->f_create_time
);
699 vfs_setattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
703 if (vfs_isrdonly(mp
))
706 error
= VFS_SETATTR(mp
, vfa
, ctx
);
709 * If we had alternate ways of setting vfs attributes, we'd
716 /* return the private data handle stored in mount_t */
718 vfs_fsprivate(mount_t mp
)
720 return(mp
->mnt_data
);
723 /* set the private data handle in mount_t */
725 vfs_setfsprivate(mount_t mp
, void *mntdata
)
727 mp
->mnt_data
= mntdata
;
732 * return the block size of the underlying
733 * device associated with mount_t
736 vfs_devblocksize(mount_t mp
) {
738 return(mp
->mnt_devblocksize
);
743 * return the io attributes associated with mount_t
746 vfs_ioattr(mount_t mp
, struct vfsioattr
*ioattrp
)
749 ioattrp
->io_maxreadcnt
= MAXPHYS
;
750 ioattrp
->io_maxwritecnt
= MAXPHYS
;
751 ioattrp
->io_segreadcnt
= 32;
752 ioattrp
->io_segwritecnt
= 32;
753 ioattrp
->io_maxsegreadsize
= MAXPHYS
;
754 ioattrp
->io_maxsegwritesize
= MAXPHYS
;
755 ioattrp
->io_devblocksize
= DEV_BSIZE
;
757 ioattrp
->io_maxreadcnt
= mp
->mnt_maxreadcnt
;
758 ioattrp
->io_maxwritecnt
= mp
->mnt_maxwritecnt
;
759 ioattrp
->io_segreadcnt
= mp
->mnt_segreadcnt
;
760 ioattrp
->io_segwritecnt
= mp
->mnt_segwritecnt
;
761 ioattrp
->io_maxsegreadsize
= mp
->mnt_maxsegreadsize
;
762 ioattrp
->io_maxsegwritesize
= mp
->mnt_maxsegwritesize
;
763 ioattrp
->io_devblocksize
= mp
->mnt_devblocksize
;
765 ioattrp
->io_reserved
[0] = 0;
766 ioattrp
->io_reserved
[1] = 0;
767 ioattrp
->io_reserved
[2] = 0;
772 * set the IO attributes associated with mount_t
775 vfs_setioattr(mount_t mp
, struct vfsioattr
* ioattrp
)
779 mp
->mnt_maxreadcnt
= ioattrp
->io_maxreadcnt
;
780 mp
->mnt_maxwritecnt
= ioattrp
->io_maxwritecnt
;
781 mp
->mnt_segreadcnt
= ioattrp
->io_segreadcnt
;
782 mp
->mnt_segwritecnt
= ioattrp
->io_segwritecnt
;
783 mp
->mnt_maxsegreadsize
= ioattrp
->io_maxsegreadsize
;
784 mp
->mnt_maxsegwritesize
= ioattrp
->io_maxsegwritesize
;
785 mp
->mnt_devblocksize
= ioattrp
->io_devblocksize
;
789 * Add a new filesystem into the kernel specified in passed in
790 * vfstable structure. It fills in the vnode
791 * dispatch vector that is to be passed to when vnodes are created.
792 * It returns a handle which is to be used to when the FS is to be removed
794 typedef int (*PFI
)(void *);
795 extern int vfs_opv_numops
;
797 vfs_fsadd(struct vfs_fsentry
*vfe
, vfstable_t
* handle
)
800 struct vfstable
*newvfstbl
= NULL
;
802 int (***opv_desc_vector_p
)(void *);
803 int (**opv_desc_vector
)(void *);
804 struct vnodeopv_entry_desc
*opve_descp
;
810 * This routine is responsible for all the initialization that would
811 * ordinarily be done as part of the system startup;
814 if (vfe
== (struct vfs_fsentry
*)0)
817 desccount
= vfe
->vfe_vopcnt
;
818 if ((desccount
<=0) || ((desccount
> 5)) || (vfe
->vfe_vfsops
== (struct vfsops
*)NULL
)
819 || (vfe
->vfe_opvdescs
== (struct vnodeopv_desc
**)NULL
))
823 MALLOC(newvfstbl
, void *, sizeof(struct vfstable
), M_TEMP
,
825 bzero(newvfstbl
, sizeof(struct vfstable
));
826 newvfstbl
->vfc_vfsops
= vfe
->vfe_vfsops
;
827 strncpy(&newvfstbl
->vfc_name
[0], vfe
->vfe_fsname
, MFSNAMELEN
);
828 if ((vfe
->vfe_flags
& VFS_TBLNOTYPENUM
))
829 newvfstbl
->vfc_typenum
= maxvfsconf
++;
831 newvfstbl
->vfc_typenum
= vfe
->vfe_fstypenum
;
833 newvfstbl
->vfc_refcount
= 0;
834 newvfstbl
->vfc_flags
= 0;
835 newvfstbl
->vfc_mountroot
= NULL
;
836 newvfstbl
->vfc_next
= NULL
;
837 newvfstbl
->vfc_threadsafe
= 0;
838 newvfstbl
->vfc_vfsflags
= 0;
839 if (vfe
->vfe_flags
& VFS_TBL64BITREADY
)
840 newvfstbl
->vfc_64bitready
= 1;
841 if (vfe
->vfe_flags
& VFS_TBLTHREADSAFE
)
842 newvfstbl
->vfc_threadsafe
= 1;
843 if (vfe
->vfe_flags
& VFS_TBLFSNODELOCK
)
844 newvfstbl
->vfc_threadsafe
= 1;
845 if ((vfe
->vfe_flags
& VFS_TBLLOCALVOL
) == VFS_TBLLOCALVOL
)
846 newvfstbl
->vfc_flags
|= MNT_LOCAL
;
847 if (vfe
->vfe_flags
& VFS_TBLLOCALVOL
)
848 newvfstbl
->vfc_vfsflags
|= VFC_VFSLOCALARGS
;
850 newvfstbl
->vfc_vfsflags
|= VFC_VFSGENERICARGS
;
854 * Allocate and init the vectors.
855 * Also handle backwards compatibility.
857 * We allocate one large block to hold all <desccount>
858 * vnode operation vectors stored contiguously.
860 /* XXX - shouldn't be M_TEMP */
862 descsize
= desccount
* vfs_opv_numops
* sizeof(PFI
);
863 MALLOC(descptr
, PFI
*, descsize
,
865 bzero(descptr
, descsize
);
867 newvfstbl
->vfc_descptr
= descptr
;
868 newvfstbl
->vfc_descsize
= descsize
;
871 for (i
= 0; i
< desccount
; i
++ ) {
872 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
874 * Fill in the caller's pointer to the start of the i'th vector.
875 * They'll need to supply it when calling vnode_create.
877 opv_desc_vector
= descptr
+ i
* vfs_opv_numops
;
878 *opv_desc_vector_p
= opv_desc_vector
;
880 for (j
= 0; vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
].opve_op
; j
++) {
881 opve_descp
= &(vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
]);
884 * Sanity check: is this operation listed
885 * in the list of operations? We check this
886 * by seeing if its offest is zero. Since
887 * the default routine should always be listed
888 * first, it should be the only one with a zero
889 * offset. Any other operation with a zero
890 * offset is probably not listed in
891 * vfs_op_descs, and so is probably an error.
893 * A panic here means the layer programmer
894 * has committed the all-too common bug
895 * of adding a new operation to the layer's
896 * list of vnode operations but
897 * not adding the operation to the system-wide
898 * list of supported operations.
900 if (opve_descp
->opve_op
->vdesc_offset
== 0 &&
901 opve_descp
->opve_op
->vdesc_offset
!= VOFFSET(vnop_default
)) {
902 printf("vfs_fsadd: operation %s not listed in %s.\n",
903 opve_descp
->opve_op
->vdesc_name
,
905 panic("vfs_fsadd: bad operation");
908 * Fill in this entry.
910 opv_desc_vector
[opve_descp
->opve_op
->vdesc_offset
] =
911 opve_descp
->opve_impl
;
916 * Finally, go back and replace unfilled routines
917 * with their default. (Sigh, an O(n^3) algorithm. I
918 * could make it better, but that'd be work, and n is small.)
920 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
923 * Force every operations vector to have a default routine.
925 opv_desc_vector
= *opv_desc_vector_p
;
926 if (opv_desc_vector
[VOFFSET(vnop_default
)] == NULL
)
927 panic("vfs_fsadd: operation vector without default routine.");
928 for (j
= 0; j
< vfs_opv_numops
; j
++)
929 if (opv_desc_vector
[j
] == NULL
)
931 opv_desc_vector
[VOFFSET(vnop_default
)];
933 } /* end of each vnodeopv_desc parsing */
937 *handle
= vfstable_add(newvfstbl
);
939 if (newvfstbl
->vfc_typenum
<= maxvfsconf
)
940 maxvfsconf
= newvfstbl
->vfc_typenum
+ 1;
943 if (newvfstbl
->vfc_vfsops
->vfs_init
)
944 (*newvfstbl
->vfc_vfsops
->vfs_init
)((struct vfsconf
*)handle
);
946 FREE(newvfstbl
, M_TEMP
);
952 * Removes the filesystem from kernel.
953 * The argument passed in is the handle that was given when
954 * file system was added
957 vfs_fsremove(vfstable_t handle
)
959 struct vfstable
* vfstbl
= (struct vfstable
*)handle
;
960 void *old_desc
= NULL
;
963 /* Preflight check for any mounts */
965 if ( vfstbl
->vfc_refcount
!= 0 ) {
972 * save the old descriptor; the free cannot occur unconditionally,
973 * since vfstable_del() may fail.
975 if (vfstbl
->vfc_descptr
&& vfstbl
->vfc_descsize
) {
976 old_desc
= vfstbl
->vfc_descptr
;
978 err
= vfstable_del(vfstbl
);
980 /* free the descriptor if the delete was successful */
981 if (err
== 0 && old_desc
) {
982 FREE(old_desc
, M_TEMP
);
989 * This returns a reference to mount_t
990 * which should be dropped using vfs_mountrele().
991 * Not doing so will leak a mountpoint
992 * and associated data structures.
995 vfs_mountref(__unused mount_t mp
) /* gives a reference */
1000 /* This drops the reference on mount_t that was acquired */
1002 vfs_mountrele(__unused mount_t mp
) /* drops reference */
1008 vfs_context_pid(vfs_context_t context
)
1010 return (context
->vc_proc
->p_pid
);
1014 vfs_context_suser(vfs_context_t context
)
1016 return (suser(context
->vc_ucred
, 0));
1019 vfs_context_issignal(vfs_context_t context
, sigset_t mask
)
1021 if (context
->vc_proc
)
1022 return(proc_pendingsignals(context
->vc_proc
, mask
));
1027 vfs_context_is64bit(vfs_context_t context
)
1029 if (context
->vc_proc
)
1030 return(proc_is64bit(context
->vc_proc
));
1035 vfs_context_proc(vfs_context_t context
)
1037 return (context
->vc_proc
);
1041 vfs_context_create(vfs_context_t context
)
1043 struct vfs_context
* newcontext
;
1045 newcontext
= (struct vfs_context
*)kalloc(sizeof(struct vfs_context
));
1049 newcontext
->vc_proc
= context
->vc_proc
;
1050 newcontext
->vc_ucred
= context
->vc_ucred
;
1052 newcontext
->vc_proc
= proc_self();
1053 newcontext
->vc_ucred
= kauth_cred_get();
1057 return((vfs_context_t
)0);
1061 vfs_context_rele(vfs_context_t context
)
1064 kfree(context
, sizeof(struct vfs_context
));
1070 vfs_context_ucred(vfs_context_t context
)
1072 return (context
->vc_ucred
);
1076 * Return true if the context is owned by the superuser.
1079 vfs_context_issuser(vfs_context_t context
)
1081 return(context
->vc_ucred
->cr_uid
== 0);
1085 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1089 * Convert between vnode types and inode formats (since POSIX.1
1090 * defines mode word of stat structure in terms of inode formats).
1093 vnode_iftovt(int mode
)
1095 return(iftovt_tab
[((mode
) & S_IFMT
) >> 12]);
1099 vnode_vttoif(enum vtype indx
)
1101 return(vttoif_tab
[(int)(indx
)]);
1105 vnode_makeimode(int indx
, int mode
)
1107 return (int)(VTTOIF(indx
) | (mode
));
1112 * vnode manipulation functions.
1115 /* returns system root vnode reference; It should be dropped using vrele() */
1121 error
= vnode_get(rootvnode
);
1123 return ((vnode_t
)0);
1130 vnode_vid(vnode_t vp
)
1132 return ((uint32_t)(vp
->v_id
));
1135 /* returns a mount reference; drop it with vfs_mountrelease() */
1137 vnode_mount(vnode_t vp
)
1139 return (vp
->v_mount
);
1142 /* returns a mount reference iff vnode_t is a dir and is a mount point */
1144 vnode_mountedhere(vnode_t vp
)
1148 if ((vp
->v_type
== VDIR
) && ((mp
= vp
->v_mountedhere
) != NULL
) &&
1149 (mp
->mnt_vnodecovered
== vp
))
1152 return (mount_t
)NULL
;
1155 /* returns vnode type of vnode_t */
1157 vnode_vtype(vnode_t vp
)
1159 return (vp
->v_type
);
1162 /* returns FS specific node saved in vnode */
1164 vnode_fsnode(vnode_t vp
)
1166 return (vp
->v_data
);
1170 vnode_clearfsnode(vnode_t vp
)
1176 vnode_specrdev(vnode_t vp
)
1182 /* Accessor functions */
1183 /* is vnode_t a root vnode */
1185 vnode_isvroot(vnode_t vp
)
1187 return ((vp
->v_flag
& VROOT
)? 1 : 0);
1190 /* is vnode_t a system vnode */
1192 vnode_issystem(vnode_t vp
)
1194 return ((vp
->v_flag
& VSYSTEM
)? 1 : 0);
1197 /* if vnode_t mount operation in progress */
1199 vnode_ismount(vnode_t vp
)
1201 return ((vp
->v_flag
& VMOUNT
)? 1 : 0);
1204 /* is this vnode under recyle now */
1206 vnode_isrecycled(vnode_t vp
)
1211 ret
= (vp
->v_lflag
& (VL_TERMINATE
|VL_DEAD
))? 1 : 0;
1216 /* is vnode_t marked to not keep data cached once it's been consumed */
1218 vnode_isnocache(vnode_t vp
)
1220 return ((vp
->v_flag
& VNOCACHE_DATA
)? 1 : 0);
1224 * has sequential readahead been disabled on this vnode
1227 vnode_isnoreadahead(vnode_t vp
)
1229 return ((vp
->v_flag
& VRAOFF
)? 1 : 0);
1232 /* is vnode_t a standard one? */
1234 vnode_isstandard(vnode_t vp
)
1236 return ((vp
->v_flag
& VSTANDARD
)? 1 : 0);
1239 /* don't vflush() if SKIPSYSTEM */
1241 vnode_isnoflush(vnode_t vp
)
1243 return ((vp
->v_flag
& VNOFLUSH
)? 1 : 0);
1246 /* is vnode_t a regular file */
1248 vnode_isreg(vnode_t vp
)
1250 return ((vp
->v_type
== VREG
)? 1 : 0);
1253 /* is vnode_t a directory? */
1255 vnode_isdir(vnode_t vp
)
1257 return ((vp
->v_type
== VDIR
)? 1 : 0);
1260 /* is vnode_t a symbolic link ? */
1262 vnode_islnk(vnode_t vp
)
1264 return ((vp
->v_type
== VLNK
)? 1 : 0);
1267 /* is vnode_t a fifo ? */
1269 vnode_isfifo(vnode_t vp
)
1271 return ((vp
->v_type
== VFIFO
)? 1 : 0);
1274 /* is vnode_t a block device? */
1276 vnode_isblk(vnode_t vp
)
1278 return ((vp
->v_type
== VBLK
)? 1 : 0);
1281 /* is vnode_t a char device? */
1283 vnode_ischr(vnode_t vp
)
1285 return ((vp
->v_type
== VCHR
)? 1 : 0);
1288 /* is vnode_t a socket? */
1290 vnode_issock(vnode_t vp
)
1292 return ((vp
->v_type
== VSOCK
)? 1 : 0);
1296 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1298 vnode_setnocache(vnode_t vp
)
1301 vp
->v_flag
|= VNOCACHE_DATA
;
1306 vnode_clearnocache(vnode_t vp
)
1309 vp
->v_flag
&= ~VNOCACHE_DATA
;
1314 vnode_setnoreadahead(vnode_t vp
)
1317 vp
->v_flag
|= VRAOFF
;
1322 vnode_clearnoreadahead(vnode_t vp
)
1325 vp
->v_flag
&= ~VRAOFF
;
1330 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1332 vnode_setnoflush(vnode_t vp
)
1335 vp
->v_flag
|= VNOFLUSH
;
1340 vnode_clearnoflush(vnode_t vp
)
1343 vp
->v_flag
&= ~VNOFLUSH
;
1348 /* is vnode_t a blkdevice and has a FS mounted on it */
1350 vnode_ismountedon(vnode_t vp
)
1352 return ((vp
->v_specflags
& SI_MOUNTEDON
)? 1 : 0);
1356 vnode_setmountedon(vnode_t vp
)
1359 vp
->v_specflags
|= SI_MOUNTEDON
;
1364 vnode_clearmountedon(vnode_t vp
)
1367 vp
->v_specflags
&= ~SI_MOUNTEDON
;
1373 vnode_settag(vnode_t vp
, int tag
)
1380 vnode_tag(vnode_t vp
)
1386 vnode_parent(vnode_t vp
)
1389 return(vp
->v_parent
);
1393 vnode_setparent(vnode_t vp
, vnode_t dvp
)
1399 vnode_name(vnode_t vp
)
1401 /* we try to keep v_name a reasonable name for the node */
1406 vnode_setname(vnode_t vp
, char * name
)
1411 /* return the registered FS name when adding the FS to kernel */
1413 vnode_vfsname(vnode_t vp
, char * buf
)
1415 strncpy(buf
, vp
->v_mount
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
1418 /* return the FS type number */
1420 vnode_vfstypenum(vnode_t vp
)
1422 return(vp
->v_mount
->mnt_vtable
->vfc_typenum
);
1426 vnode_vfs64bitready(vnode_t vp
)
1429 if ((vp
->v_mount
->mnt_vtable
->vfc_64bitready
))
1437 /* return the visible flags on associated mount point of vnode_t */
1439 vnode_vfsvisflags(vnode_t vp
)
1441 return(vp
->v_mount
->mnt_flag
& MNT_VISFLAGMASK
);
1444 /* return the command modifier flags on associated mount point of vnode_t */
1446 vnode_vfscmdflags(vnode_t vp
)
1448 return(vp
->v_mount
->mnt_flag
& MNT_CMDFLAGS
);
1451 /* return the max symlink of short links of vnode_t */
1453 vnode_vfsmaxsymlen(vnode_t vp
)
1455 return(vp
->v_mount
->mnt_maxsymlinklen
);
1458 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
1460 vnode_vfsstatfs(vnode_t vp
)
1462 return(&vp
->v_mount
->mnt_vfsstat
);
1465 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
1467 vnode_vfsfsprivate(vnode_t vp
)
1469 return(vp
->v_mount
->mnt_data
);
1472 /* is vnode_t in a rdonly mounted FS */
1474 vnode_vfsisrdonly(vnode_t vp
)
1476 return ((vp
->v_mount
->mnt_flag
& MNT_RDONLY
)? 1 : 0);
1480 /* returns vnode ref to current working directory */
1482 current_workingdir(void)
1484 struct proc
*p
= current_proc();
1487 if ( (vp
= p
->p_fd
->fd_cdir
) ) {
1488 if ( (vnode_getwithref(vp
)) )
1494 /* returns vnode ref to current root(chroot) directory */
1496 current_rootdir(void)
1498 struct proc
*p
= current_proc();
1501 if ( (vp
= p
->p_fd
->fd_rdir
) ) {
1502 if ( (vnode_getwithref(vp
)) )
1509 vnode_get_filesec(vnode_t vp
, kauth_filesec_t
*fsecp
, vfs_context_t ctx
)
1511 kauth_filesec_t fsec
;
1514 size_t xsize
, rsize
;
1521 /* find out how big the EA is */
1522 if (vn_getxattr(vp
, KAUTH_FILESEC_XATTR
, NULL
, &xsize
, XATTR_NOSECURITY
, ctx
) != 0) {
1523 /* no EA, no filesec */
1524 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
1526 /* either way, we are done */
1530 /* how many entries would fit? */
1531 fsec_size
= KAUTH_FILESEC_COUNT(xsize
);
1533 /* get buffer and uio */
1534 if (((fsec
= kauth_filesec_alloc(fsec_size
)) == NULL
) ||
1535 ((fsec_uio
= uio_create(1, 0, UIO_SYSSPACE
, UIO_READ
)) == NULL
) ||
1536 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), xsize
)) {
1537 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
1542 /* read security attribute */
1544 if ((error
= vn_getxattr(vp
,
1545 KAUTH_FILESEC_XATTR
,
1551 /* no attribute - no security data */
1552 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
1554 /* either way, we are done */
1559 * Validate security structure. If it's corrupt, we will
1562 if (rsize
< KAUTH_FILESEC_SIZE(0)) {
1563 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize
);
1566 if (fsec
->fsec_magic
!= KAUTH_FILESEC_MAGIC
) {
1567 KAUTH_DEBUG("ACL - BAD MAGIC %x", fsec
->fsec_magic
);
1570 if ((fsec
->fsec_acl
.acl_entrycount
!= KAUTH_FILESEC_NOACL
) &&
1571 (fsec
->fsec_acl
.acl_entrycount
> KAUTH_ACL_MAX_ENTRIES
)) {
1572 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", fsec
->fsec_entrycount
);
1575 if ((fsec
->fsec_acl
.acl_entrycount
!= KAUTH_FILESEC_NOACL
) &&
1576 (KAUTH_FILESEC_SIZE(fsec
->fsec_acl
.acl_entrycount
) > rsize
)) {
1577 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", fsec
->fsec_acl
.acl_entrycount
, rsize
);
1586 kauth_filesec_free(fsec
);
1587 if (fsec_uio
!= NULL
)
1595 vnode_set_filesec(vnode_t vp
, kauth_filesec_t fsec
, kauth_acl_t acl
, vfs_context_t ctx
)
1602 if ((fsec_uio
= uio_create(2, 0, UIO_SYSSPACE
, UIO_WRITE
)) == NULL
) {
1603 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
1607 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), sizeof(struct kauth_filesec
) - sizeof(struct kauth_acl
));
1608 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(acl
), KAUTH_ACL_COPYSIZE(acl
));
1609 error
= vn_setxattr(vp
,
1610 KAUTH_FILESEC_XATTR
,
1612 XATTR_NOSECURITY
, /* we have auth'ed already */
1614 VFS_DEBUG(ctx
, vp
, "SETATTR - set ACL returning %d", error
);
1617 if (fsec_uio
!= NULL
)
1624 vnode_getattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
1626 kauth_filesec_t fsec
;
1632 /* don't ask for extended security data if the filesystem doesn't support it */
1633 if (!vfs_extendedsecurity(vnode_mount(vp
))) {
1634 VATTR_CLEAR_ACTIVE(vap
, va_acl
);
1635 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
1636 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
1640 * If the caller wants size values we might have to synthesise, give the
1641 * filesystem the opportunity to supply better intermediate results.
1643 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
1644 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
1645 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
1646 VATTR_SET_ACTIVE(vap
, va_data_size
);
1647 VATTR_SET_ACTIVE(vap
, va_data_alloc
);
1648 VATTR_SET_ACTIVE(vap
, va_total_size
);
1649 VATTR_SET_ACTIVE(vap
, va_total_alloc
);
1652 error
= VNOP_GETATTR(vp
, vap
, ctx
);
1654 KAUTH_DEBUG("ERROR - returning %d", error
);
1659 * If extended security data was requested but not returned, try the fallback
1662 if (VATTR_NOT_RETURNED(vap
, va_acl
) || VATTR_NOT_RETURNED(vap
, va_uuuid
) || VATTR_NOT_RETURNED(vap
, va_guuid
)) {
1665 if ((vp
->v_type
== VDIR
) || (vp
->v_type
== VLNK
) || (vp
->v_type
== VREG
)) {
1666 /* try to get the filesec */
1667 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0)
1670 /* if no filesec, no attributes */
1672 VATTR_RETURN(vap
, va_acl
, NULL
);
1673 VATTR_RETURN(vap
, va_uuuid
, kauth_null_guid
);
1674 VATTR_RETURN(vap
, va_guuid
, kauth_null_guid
);
1677 /* looks good, try to return what we were asked for */
1678 VATTR_RETURN(vap
, va_uuuid
, fsec
->fsec_owner
);
1679 VATTR_RETURN(vap
, va_guuid
, fsec
->fsec_group
);
1681 /* only return the ACL if we were actually asked for it */
1682 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
1683 if (fsec
->fsec_acl
.acl_entrycount
== KAUTH_FILESEC_NOACL
) {
1684 VATTR_RETURN(vap
, va_acl
, NULL
);
1686 facl
= kauth_acl_alloc(fsec
->fsec_acl
.acl_entrycount
);
1688 kauth_filesec_free(fsec
);
1692 bcopy(&fsec
->fsec_acl
, facl
, KAUTH_ACL_COPYSIZE(&fsec
->fsec_acl
));
1693 VATTR_RETURN(vap
, va_acl
, facl
);
1696 kauth_filesec_free(fsec
);
1700 * If someone gave us an unsolicited filesec, toss it. We promise that
1701 * we're OK with a filesystem giving us anything back, but our callers
1702 * only expect what they asked for.
1704 if (VATTR_IS_SUPPORTED(vap
, va_acl
) && !VATTR_IS_ACTIVE(vap
, va_acl
)) {
1705 if (vap
->va_acl
!= NULL
)
1706 kauth_acl_free(vap
->va_acl
);
1707 VATTR_CLEAR_SUPPORTED(vap
, va_acl
);
1710 #if 0 /* enable when we have a filesystem only supporting UUIDs */
1712 * Handle the case where we need a UID/GID, but only have extended
1713 * security information.
1715 if (VATTR_NOT_RETURNED(vap
, va_uid
) &&
1716 VATTR_IS_SUPPORTED(vap
, va_uuuid
) &&
1717 !kauth_guid_equal(&vap
->va_uuuid
, &kauth_null_guid
)) {
1718 if ((error
= kauth_cred_guid2uid(&vap
->va_uuuid
, &nuid
)) == 0)
1719 VATTR_RETURN(vap
, va_uid
, nuid
);
1721 if (VATTR_NOT_RETURNED(vap
, va_gid
) &&
1722 VATTR_IS_SUPPORTED(vap
, va_guuid
) &&
1723 !kauth_guid_equal(&vap
->va_guuid
, &kauth_null_guid
)) {
1724 if ((error
= kauth_cred_guid2gid(&vap
->va_guuid
, &ngid
)) == 0)
1725 VATTR_RETURN(vap
, va_gid
, ngid
);
1730 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
1732 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
1733 if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
1734 nuid
= vp
->v_mount
->mnt_fsowner
;
1735 if (nuid
== KAUTH_UID_NONE
)
1737 } else if (VATTR_IS_SUPPORTED(vap
, va_uid
)) {
1740 /* this will always be something sensible */
1741 nuid
= vp
->v_mount
->mnt_fsowner
;
1743 if ((nuid
== 99) && !vfs_context_issuser(ctx
))
1744 nuid
= kauth_cred_getuid(vfs_context_ucred(ctx
));
1745 VATTR_RETURN(vap
, va_uid
, nuid
);
1747 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
1748 if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
1749 ngid
= vp
->v_mount
->mnt_fsgroup
;
1750 if (ngid
== KAUTH_GID_NONE
)
1752 } else if (VATTR_IS_SUPPORTED(vap
, va_gid
)) {
1755 /* this will always be something sensible */
1756 ngid
= vp
->v_mount
->mnt_fsgroup
;
1758 if ((ngid
== 99) && !vfs_context_issuser(ctx
))
1759 ngid
= kauth_cred_getgid(vfs_context_ucred(ctx
));
1760 VATTR_RETURN(vap
, va_gid
, ngid
);
1764 * Synthesise some values that can be reasonably guessed.
1766 if (!VATTR_IS_SUPPORTED(vap
, va_iosize
))
1767 VATTR_RETURN(vap
, va_iosize
, vp
->v_mount
->mnt_vfsstat
.f_iosize
);
1769 if (!VATTR_IS_SUPPORTED(vap
, va_flags
))
1770 VATTR_RETURN(vap
, va_flags
, 0);
1772 if (!VATTR_IS_SUPPORTED(vap
, va_filerev
))
1773 VATTR_RETURN(vap
, va_filerev
, 0);
1775 if (!VATTR_IS_SUPPORTED(vap
, va_gen
))
1776 VATTR_RETURN(vap
, va_gen
, 0);
1779 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
1781 if (!VATTR_IS_SUPPORTED(vap
, va_data_size
))
1782 VATTR_RETURN(vap
, va_data_size
, 0);
1784 /* do we want any of the possibly-computed values? */
1785 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
1786 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
1787 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
1788 /* make sure f_bsize is valid */
1789 if (vp
->v_mount
->mnt_vfsstat
.f_bsize
== 0) {
1790 if ((error
= vfs_update_vfsstat(vp
->v_mount
, ctx
)) != 0)
1794 /* default va_data_alloc from va_data_size */
1795 if (!VATTR_IS_SUPPORTED(vap
, va_data_alloc
))
1796 VATTR_RETURN(vap
, va_data_alloc
, roundup(vap
->va_data_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
1798 /* default va_total_size from va_data_size */
1799 if (!VATTR_IS_SUPPORTED(vap
, va_total_size
))
1800 VATTR_RETURN(vap
, va_total_size
, vap
->va_data_size
);
1802 /* default va_total_alloc from va_total_size which is guaranteed at this point */
1803 if (!VATTR_IS_SUPPORTED(vap
, va_total_alloc
))
1804 VATTR_RETURN(vap
, va_total_alloc
, roundup(vap
->va_total_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
1808 * If we don't have a change time, pull it from the modtime.
1810 if (!VATTR_IS_SUPPORTED(vap
, va_change_time
) && VATTR_IS_SUPPORTED(vap
, va_modify_time
))
1811 VATTR_RETURN(vap
, va_change_time
, vap
->va_modify_time
);
1814 * This is really only supported for the creation VNOPs, but since the field is there
1815 * we should populate it correctly.
1817 VATTR_RETURN(vap
, va_type
, vp
->v_type
);
1820 * The fsid can be obtained from the mountpoint directly.
1822 VATTR_RETURN(vap
, va_fsid
, vp
->v_mount
->mnt_vfsstat
.f_fsid
.val
[0]);
1830 vnode_setattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
1832 int error
, is_ownership_change
=0;
1835 * Make sure the filesystem is mounted R/W.
1836 * If not, return an error.
1838 if (vfs_isrdonly(vp
->v_mount
))
1842 * If ownership is being ignored on this volume, we silently discard
1843 * ownership changes.
1845 if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
1846 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
1847 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
1850 if (VATTR_IS_ACTIVE(vap
, va_uid
) || VATTR_IS_ACTIVE(vap
, va_gid
)) {
1851 is_ownership_change
= 1;
1855 * Make sure that extended security is enabled if we're going to try
1858 if (!vfs_extendedsecurity(vnode_mount(vp
)) &&
1859 (VATTR_IS_ACTIVE(vap
, va_acl
) || VATTR_IS_ACTIVE(vap
, va_uuuid
) || VATTR_IS_ACTIVE(vap
, va_guuid
))) {
1860 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
1864 error
= VNOP_SETATTR(vp
, vap
, ctx
);
1866 if ((error
== 0) && !VATTR_ALL_SUPPORTED(vap
))
1867 error
= vnode_setattr_fallback(vp
, vap
, ctx
);
1870 * If we have changed any of the things about the file that are likely
1871 * to result in changes to authorisation results, blow the vnode auth
1874 if (VATTR_IS_SUPPORTED(vap
, va_mode
) ||
1875 VATTR_IS_SUPPORTED(vap
, va_uid
) ||
1876 VATTR_IS_SUPPORTED(vap
, va_gid
) ||
1877 VATTR_IS_SUPPORTED(vap
, va_flags
) ||
1878 VATTR_IS_SUPPORTED(vap
, va_acl
) ||
1879 VATTR_IS_SUPPORTED(vap
, va_uuuid
) ||
1880 VATTR_IS_SUPPORTED(vap
, va_guuid
))
1881 vnode_uncache_credentials(vp
);
1882 // only send a stat_changed event if this is more than
1883 // just an access time update
1884 if (error
== 0 && (vap
->va_active
!= VNODE_ATTR_BIT(va_access_time
))) {
1885 if (need_fsevent(FSE_STAT_CHANGED
, vp
) || (is_ownership_change
&& need_fsevent(FSE_CHOWN
, vp
))) {
1886 if (is_ownership_change
== 0)
1887 add_fsevent(FSE_STAT_CHANGED
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
1889 add_fsevent(FSE_CHOWN
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
1896 * Following an operation which sets attributes (setattr, create, etc.) we may
1897 * need to perform fallback operations to get attributes saved.
1900 vnode_setattr_fallback(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
1902 kauth_filesec_t fsec
;
1904 struct kauth_filesec lfsec
;
1910 * Extended security fallback via extended attributes.
1912 * Note that we do not free the filesec; the caller is expected to do this.
1914 if (VATTR_NOT_RETURNED(vap
, va_acl
) ||
1915 VATTR_NOT_RETURNED(vap
, va_uuuid
) ||
1916 VATTR_NOT_RETURNED(vap
, va_guuid
)) {
1917 VFS_DEBUG(ctx
, vp
, "SETATTR - doing filesec fallback");
1920 * Fail for file types that we don't permit extended security to be set on.
1922 if ((vp
->v_type
!= VDIR
) && (vp
->v_type
!= VLNK
) && (vp
->v_type
!= VREG
)) {
1923 VFS_DEBUG(ctx
, vp
, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp
));
1929 * If we don't have all the extended security items, we need to fetch the existing
1930 * data to perform a read-modify-write operation.
1933 if (!VATTR_IS_ACTIVE(vap
, va_acl
) ||
1934 !VATTR_IS_ACTIVE(vap
, va_uuuid
) ||
1935 !VATTR_IS_ACTIVE(vap
, va_guuid
)) {
1936 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0) {
1937 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error
);
1941 /* if we didn't get a filesec, use our local one */
1943 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
1946 KAUTH_DEBUG("SETATTR - updating existing filesec");
1949 facl
= &fsec
->fsec_acl
;
1951 /* if we're using the local filesec, we need to initialise it */
1952 if (fsec
== &lfsec
) {
1953 fsec
->fsec_magic
= KAUTH_FILESEC_MAGIC
;
1954 fsec
->fsec_owner
= kauth_null_guid
;
1955 fsec
->fsec_group
= kauth_null_guid
;
1956 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
1957 facl
->acl_flags
= 0;
1961 * Update with the supplied attributes.
1963 if (VATTR_IS_ACTIVE(vap
, va_uuuid
)) {
1964 KAUTH_DEBUG("SETATTR - updating owner UUID");
1965 fsec
->fsec_owner
= vap
->va_uuuid
;
1966 VATTR_SET_SUPPORTED(vap
, va_uuuid
);
1968 if (VATTR_IS_ACTIVE(vap
, va_guuid
)) {
1969 KAUTH_DEBUG("SETATTR - updating group UUID");
1970 fsec
->fsec_group
= vap
->va_guuid
;
1971 VATTR_SET_SUPPORTED(vap
, va_guuid
);
1973 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
1974 if (vap
->va_acl
== NULL
) {
1975 KAUTH_DEBUG("SETATTR - removing ACL");
1976 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
1978 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap
->va_acl
->acl_entrycount
);
1981 VATTR_SET_SUPPORTED(vap
, va_acl
);
1985 * If the filesec data is all invalid, we can just remove the EA completely.
1987 if ((facl
->acl_entrycount
== KAUTH_FILESEC_NOACL
) &&
1988 kauth_guid_equal(&fsec
->fsec_owner
, &kauth_null_guid
) &&
1989 kauth_guid_equal(&fsec
->fsec_group
, &kauth_null_guid
)) {
1990 error
= vn_removexattr(vp
, KAUTH_FILESEC_XATTR
, XATTR_NOSECURITY
, ctx
);
1991 /* no attribute is ok, nothing to delete */
1992 if (error
== ENOATTR
)
1994 VFS_DEBUG(ctx
, vp
, "SETATTR - remove filesec returning %d", error
);
1997 error
= vnode_set_filesec(vp
, fsec
, facl
, ctx
);
1998 VFS_DEBUG(ctx
, vp
, "SETATTR - update filesec returning %d", error
);
2001 /* if we fetched a filesec, dispose of the buffer */
2003 kauth_filesec_free(fsec
);
2011 * Definition of vnode operations.
2017 *#% lookup dvp L ? ?
2018 *#% lookup vpp - L -
2020 struct vnop_lookup_args
{
2021 struct vnodeop_desc
*a_desc
;
2024 struct componentname
*a_cnp
;
2025 vfs_context_t a_context
;
2030 VNOP_LOOKUP(vnode_t dvp
, vnode_t
*vpp
, struct componentname
*cnp
, vfs_context_t context
)
2033 struct vnop_lookup_args a
;
2036 int funnel_state
= 0;
2038 a
.a_desc
= &vnop_lookup_desc
;
2042 a
.a_context
= context
;
2043 thread_safe
= THREAD_SAFE_FS(dvp
);
2045 vnode_cache_credentials(dvp
, context
);
2048 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2052 _err
= (*dvp
->v_op
[vnop_lookup_desc
.vdesc_offset
])(&a
);
2057 if ( (cnp
->cn_flags
& ISLASTCN
) ) {
2058 if ( (cnp
->cn_flags
& LOCKPARENT
) ) {
2059 if ( !(cnp
->cn_flags
& FSNODELOCKHELD
) ) {
2061 * leave the fsnode lock held on
2062 * the directory, but restore the funnel...
2063 * also indicate that we need to drop the
2064 * fsnode_lock when we're done with the
2065 * system call processing for this path
2067 cnp
->cn_flags
|= FSNODELOCKHELD
;
2069 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2074 unlock_fsnode(dvp
, &funnel_state
);
2082 *#% create dvp L L L
2083 *#% create vpp - L -
2087 struct vnop_create_args
{
2088 struct vnodeop_desc
*a_desc
;
2091 struct componentname
*a_cnp
;
2092 struct vnode_attr
*a_vap
;
2093 vfs_context_t a_context
;
2097 VNOP_CREATE(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t context
)
2100 struct vnop_create_args a
;
2102 int funnel_state
= 0;
2104 a
.a_desc
= &vnop_create_desc
;
2109 a
.a_context
= context
;
2110 thread_safe
= THREAD_SAFE_FS(dvp
);
2113 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2117 _err
= (*dvp
->v_op
[vnop_create_desc
.vdesc_offset
])(&a
);
2118 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
2120 * Remove stale Apple Double file (if any).
2122 xattrfile_remove(dvp
, cnp
->cn_nameptr
, context
, thread_safe
, 0);
2125 unlock_fsnode(dvp
, &funnel_state
);
2133 *#% whiteout dvp L L L
2134 *#% whiteout cnp - - -
2135 *#% whiteout flag - - -
2138 struct vnop_whiteout_args
{
2139 struct vnodeop_desc
*a_desc
;
2141 struct componentname
*a_cnp
;
2143 vfs_context_t a_context
;
2147 VNOP_WHITEOUT(vnode_t dvp
, struct componentname
* cnp
, int flags
, vfs_context_t context
)
2150 struct vnop_whiteout_args a
;
2152 int funnel_state
= 0;
2154 a
.a_desc
= &vnop_whiteout_desc
;
2158 a
.a_context
= context
;
2159 thread_safe
= THREAD_SAFE_FS(dvp
);
2162 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2166 _err
= (*dvp
->v_op
[vnop_whiteout_desc
.vdesc_offset
])(&a
);
2168 unlock_fsnode(dvp
, &funnel_state
);
2180 struct vnop_mknod_args
{
2181 struct vnodeop_desc
*a_desc
;
2184 struct componentname
*a_cnp
;
2185 struct vnode_attr
*a_vap
;
2186 vfs_context_t a_context
;
2190 VNOP_MKNOD(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t context
)
2194 struct vnop_mknod_args a
;
2196 int funnel_state
= 0;
2198 a
.a_desc
= &vnop_mknod_desc
;
2203 a
.a_context
= context
;
2204 thread_safe
= THREAD_SAFE_FS(dvp
);
2207 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2211 _err
= (*dvp
->v_op
[vnop_mknod_desc
.vdesc_offset
])(&a
);
2213 unlock_fsnode(dvp
, &funnel_state
);
2224 struct vnop_open_args
{
2225 struct vnodeop_desc
*a_desc
;
2228 vfs_context_t a_context
;
2232 VNOP_OPEN(vnode_t vp
, int mode
, vfs_context_t context
)
2235 struct vnop_open_args a
;
2237 int funnel_state
= 0;
2238 struct vfs_context acontext
;
2240 if (context
== NULL
) {
2241 acontext
.vc_proc
= current_proc();
2242 acontext
.vc_ucred
= kauth_cred_get();
2243 context
= &acontext
;
2245 a
.a_desc
= &vnop_open_desc
;
2248 a
.a_context
= context
;
2249 thread_safe
= THREAD_SAFE_FS(vp
);
2252 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2253 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2254 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2255 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2260 _err
= (*vp
->v_op
[vnop_open_desc
.vdesc_offset
])(&a
);
2262 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2263 unlock_fsnode(vp
, NULL
);
2265 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2276 struct vnop_close_args
{
2277 struct vnodeop_desc
*a_desc
;
2280 vfs_context_t a_context
;
2284 VNOP_CLOSE(vnode_t vp
, int fflag
, vfs_context_t context
)
2287 struct vnop_close_args a
;
2289 int funnel_state
= 0;
2290 struct vfs_context acontext
;
2292 if (context
== NULL
) {
2293 acontext
.vc_proc
= current_proc();
2294 acontext
.vc_ucred
= kauth_cred_get();
2295 context
= &acontext
;
2297 a
.a_desc
= &vnop_close_desc
;
2300 a
.a_context
= context
;
2301 thread_safe
= THREAD_SAFE_FS(vp
);
2304 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2305 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2306 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2307 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2312 _err
= (*vp
->v_op
[vnop_close_desc
.vdesc_offset
])(&a
);
2314 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2315 unlock_fsnode(vp
, NULL
);
2317 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2328 struct vnop_access_args
{
2329 struct vnodeop_desc
*a_desc
;
2332 vfs_context_t a_context
;
2336 VNOP_ACCESS(vnode_t vp
, int action
, vfs_context_t context
)
2339 struct vnop_access_args a
;
2341 int funnel_state
= 0;
2342 struct vfs_context acontext
;
2344 if (context
== NULL
) {
2345 acontext
.vc_proc
= current_proc();
2346 acontext
.vc_ucred
= kauth_cred_get();
2347 context
= &acontext
;
2349 a
.a_desc
= &vnop_access_desc
;
2351 a
.a_action
= action
;
2352 a
.a_context
= context
;
2353 thread_safe
= THREAD_SAFE_FS(vp
);
2356 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2360 _err
= (*vp
->v_op
[vnop_access_desc
.vdesc_offset
])(&a
);
2362 unlock_fsnode(vp
, &funnel_state
);
2370 *#% getattr vp = = =
2373 struct vnop_getattr_args
{
2374 struct vnodeop_desc
*a_desc
;
2376 struct vnode_attr
*a_vap
;
2377 vfs_context_t a_context
;
2381 VNOP_GETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t context
)
2384 struct vnop_getattr_args a
;
2388 a
.a_desc
= &vnop_getattr_desc
;
2391 a
.a_context
= context
;
2392 thread_safe
= THREAD_SAFE_FS(vp
);
2395 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2399 _err
= (*vp
->v_op
[vnop_getattr_desc
.vdesc_offset
])(&a
);
2401 unlock_fsnode(vp
, &funnel_state
);
2409 *#% setattr vp L L L
2412 struct vnop_setattr_args
{
2413 struct vnodeop_desc
*a_desc
;
2415 struct vnode_attr
*a_vap
;
2416 vfs_context_t a_context
;
2420 VNOP_SETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t context
)
2423 struct vnop_setattr_args a
;
2427 a
.a_desc
= &vnop_setattr_desc
;
2430 a
.a_context
= context
;
2431 thread_safe
= THREAD_SAFE_FS(vp
);
2434 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2438 _err
= (*vp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
2441 * Shadow uid/gid/mod change to extended attibute file.
2443 if (_err
== 0 && !NATIVE_XATTR(vp
)) {
2444 struct vnode_attr va
;
2448 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
2449 VATTR_SET(&va
, va_uid
, vap
->va_uid
);
2452 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
2453 VATTR_SET(&va
, va_gid
, vap
->va_gid
);
2456 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
2457 VATTR_SET(&va
, va_mode
, vap
->va_mode
);
2464 dvp
= vnode_getparent(vp
);
2465 vname
= vnode_getname(vp
);
2467 xattrfile_setattr(dvp
, vname
, &va
, context
, thread_safe
);
2471 vnode_putname(vname
);
2475 unlock_fsnode(vp
, &funnel_state
);
2483 *#% getattrlist vp = = =
2486 struct vnop_getattrlist_args
{
2487 struct vnodeop_desc
*a_desc
;
2489 struct attrlist
*a_alist
;
2492 vfs_context_t a_context
;
2496 VNOP_GETATTRLIST(vnode_t vp
, struct attrlist
* alist
, struct uio
* uio
, int options
, vfs_context_t context
)
2499 struct vnop_getattrlist_args a
;
2501 int funnel_state
= 0;
2503 a
.a_desc
= &vnop_getattrlist_desc
;
2507 a
.a_options
= options
;
2508 a
.a_context
= context
;
2509 thread_safe
= THREAD_SAFE_FS(vp
);
2512 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2516 _err
= (*vp
->v_op
[vnop_getattrlist_desc
.vdesc_offset
])(&a
);
2518 unlock_fsnode(vp
, &funnel_state
);
2526 *#% setattrlist vp L L L
2529 struct vnop_setattrlist_args
{
2530 struct vnodeop_desc
*a_desc
;
2532 struct attrlist
*a_alist
;
2535 vfs_context_t a_context
;
2539 VNOP_SETATTRLIST(vnode_t vp
, struct attrlist
* alist
, struct uio
* uio
, int options
, vfs_context_t context
)
2542 struct vnop_setattrlist_args a
;
2544 int funnel_state
= 0;
2546 a
.a_desc
= &vnop_setattrlist_desc
;
2550 a
.a_options
= options
;
2551 a
.a_context
= context
;
2552 thread_safe
= THREAD_SAFE_FS(vp
);
2555 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2559 _err
= (*vp
->v_op
[vnop_setattrlist_desc
.vdesc_offset
])(&a
);
2561 vnode_uncache_credentials(vp
);
2564 unlock_fsnode(vp
, &funnel_state
);
2576 struct vnop_read_args
{
2577 struct vnodeop_desc
*a_desc
;
2581 vfs_context_t a_context
;
2585 VNOP_READ(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t context
)
2588 struct vnop_read_args a
;
2590 int funnel_state
= 0;
2591 struct vfs_context acontext
;
2593 if (context
== NULL
) {
2594 acontext
.vc_proc
= current_proc();
2595 acontext
.vc_ucred
= kauth_cred_get();
2596 context
= &acontext
;
2599 a
.a_desc
= &vnop_read_desc
;
2602 a
.a_ioflag
= ioflag
;
2603 a
.a_context
= context
;
2604 thread_safe
= THREAD_SAFE_FS(vp
);
2607 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2608 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2609 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2610 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2615 _err
= (*vp
->v_op
[vnop_read_desc
.vdesc_offset
])(&a
);
2618 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2619 unlock_fsnode(vp
, NULL
);
2621 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2633 struct vnop_write_args
{
2634 struct vnodeop_desc
*a_desc
;
2638 vfs_context_t a_context
;
2642 VNOP_WRITE(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t context
)
2644 struct vnop_write_args a
;
2647 int funnel_state
= 0;
2648 struct vfs_context acontext
;
2650 if (context
== NULL
) {
2651 acontext
.vc_proc
= current_proc();
2652 acontext
.vc_ucred
= kauth_cred_get();
2653 context
= &acontext
;
2656 a
.a_desc
= &vnop_write_desc
;
2659 a
.a_ioflag
= ioflag
;
2660 a
.a_context
= context
;
2661 thread_safe
= THREAD_SAFE_FS(vp
);
2664 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2665 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2666 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2667 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2672 _err
= (*vp
->v_op
[vnop_write_desc
.vdesc_offset
])(&a
);
2675 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2676 unlock_fsnode(vp
, NULL
);
2678 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2690 struct vnop_ioctl_args
{
2691 struct vnodeop_desc
*a_desc
;
2696 vfs_context_t a_context
;
2700 VNOP_IOCTL(vnode_t vp
, u_long command
, caddr_t data
, int fflag
, vfs_context_t context
)
2703 struct vnop_ioctl_args a
;
2705 int funnel_state
= 0;
2706 struct vfs_context acontext
;
2708 if (context
== NULL
) {
2709 acontext
.vc_proc
= current_proc();
2710 acontext
.vc_ucred
= kauth_cred_get();
2711 context
= &acontext
;
2714 if (vfs_context_is64bit(context
)) {
2715 if (!vnode_vfs64bitready(vp
)) {
2720 a
.a_desc
= &vnop_ioctl_desc
;
2722 a
.a_command
= command
;
2725 a
.a_context
= context
;
2726 thread_safe
= THREAD_SAFE_FS(vp
);
2729 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2730 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2731 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2732 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2737 _err
= (*vp
->v_op
[vnop_ioctl_desc
.vdesc_offset
])(&a
);
2739 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2740 unlock_fsnode(vp
, NULL
);
2742 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2754 struct vnop_select_args
{
2755 struct vnodeop_desc
*a_desc
;
2760 vfs_context_t a_context
;
2764 VNOP_SELECT(vnode_t vp
, int which
, int fflags
, void * wql
, vfs_context_t context
)
2767 struct vnop_select_args a
;
2769 int funnel_state
= 0;
2770 struct vfs_context acontext
;
2772 if (context
== NULL
) {
2773 acontext
.vc_proc
= current_proc();
2774 acontext
.vc_ucred
= kauth_cred_get();
2775 context
= &acontext
;
2777 a
.a_desc
= &vnop_select_desc
;
2780 a
.a_fflags
= fflags
;
2781 a
.a_context
= context
;
2783 thread_safe
= THREAD_SAFE_FS(vp
);
2786 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2787 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2788 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2789 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2794 _err
= (*vp
->v_op
[vnop_select_desc
.vdesc_offset
])(&a
);
2796 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2797 unlock_fsnode(vp
, NULL
);
2799 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2808 *#% exchange fvp L L L
2809 *#% exchange tvp L L L
2812 struct vnop_exchange_args
{
2813 struct vnodeop_desc
*a_desc
;
2817 vfs_context_t a_context
;
2821 VNOP_EXCHANGE(vnode_t fvp
, vnode_t tvp
, int options
, vfs_context_t context
)
2824 struct vnop_exchange_args a
;
2826 int funnel_state
= 0;
2827 vnode_t lock_first
= NULL
, lock_second
= NULL
;
2829 a
.a_desc
= &vnop_exchange_desc
;
2832 a
.a_options
= options
;
2833 a
.a_context
= context
;
2834 thread_safe
= THREAD_SAFE_FS(fvp
);
2838 * Lock in vnode address order to avoid deadlocks
2847 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) ) {
2850 if ( (_err
= lock_fsnode(lock_second
, NULL
)) ) {
2851 unlock_fsnode(lock_first
, &funnel_state
);
2855 _err
= (*fvp
->v_op
[vnop_exchange_desc
.vdesc_offset
])(&a
);
2857 unlock_fsnode(lock_second
, NULL
);
2858 unlock_fsnode(lock_first
, &funnel_state
);
2870 struct vnop_revoke_args
{
2871 struct vnodeop_desc
*a_desc
;
2874 vfs_context_t a_context
;
2878 VNOP_REVOKE(vnode_t vp
, int flags
, vfs_context_t context
)
2880 struct vnop_revoke_args a
;
2883 int funnel_state
= 0;
2885 a
.a_desc
= &vnop_revoke_desc
;
2888 a
.a_context
= context
;
2889 thread_safe
= THREAD_SAFE_FS(vp
);
2892 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2894 _err
= (*vp
->v_op
[vnop_revoke_desc
.vdesc_offset
])(&a
);
2896 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2908 struct vnop_mmap_args
{
2909 struct vnodeop_desc
*a_desc
;
2912 vfs_context_t a_context
;
2916 VNOP_MMAP(vnode_t vp
, int fflags
, vfs_context_t context
)
2919 struct vnop_mmap_args a
;
2921 int funnel_state
= 0;
2923 a
.a_desc
= &vnop_mmap_desc
;
2925 a
.a_fflags
= fflags
;
2926 a
.a_context
= context
;
2927 thread_safe
= THREAD_SAFE_FS(vp
);
2930 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2934 _err
= (*vp
->v_op
[vnop_mmap_desc
.vdesc_offset
])(&a
);
2936 unlock_fsnode(vp
, &funnel_state
);
2945 *# mnomap - vp U U U
2948 struct vnop_mnomap_args
{
2949 struct vnodeop_desc
*a_desc
;
2951 vfs_context_t a_context
;
2955 VNOP_MNOMAP(vnode_t vp
, vfs_context_t context
)
2958 struct vnop_mnomap_args a
;
2960 int funnel_state
= 0;
2962 a
.a_desc
= &vnop_mnomap_desc
;
2964 a
.a_context
= context
;
2965 thread_safe
= THREAD_SAFE_FS(vp
);
2968 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2972 _err
= (*vp
->v_op
[vnop_mnomap_desc
.vdesc_offset
])(&a
);
2974 unlock_fsnode(vp
, &funnel_state
);
2986 struct vnop_fsync_args
{
2987 struct vnodeop_desc
*a_desc
;
2990 vfs_context_t a_context
;
2994 VNOP_FSYNC(vnode_t vp
, int waitfor
, vfs_context_t context
)
2996 struct vnop_fsync_args a
;
2999 int funnel_state
= 0;
3001 a
.a_desc
= &vnop_fsync_desc
;
3003 a
.a_waitfor
= waitfor
;
3004 a
.a_context
= context
;
3005 thread_safe
= THREAD_SAFE_FS(vp
);
3008 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3012 _err
= (*vp
->v_op
[vnop_fsync_desc
.vdesc_offset
])(&a
);
3014 unlock_fsnode(vp
, &funnel_state
);
3023 *#% remove dvp L U U
3027 struct vnop_remove_args
{
3028 struct vnodeop_desc
*a_desc
;
3031 struct componentname
*a_cnp
;
3033 vfs_context_t a_context
;
3037 VNOP_REMOVE(vnode_t dvp
, vnode_t vp
, struct componentname
* cnp
, int flags
, vfs_context_t context
)
3040 struct vnop_remove_args a
;
3042 int funnel_state
= 0;
3044 a
.a_desc
= &vnop_remove_desc
;
3049 a
.a_context
= context
;
3050 thread_safe
= THREAD_SAFE_FS(dvp
);
3053 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3057 _err
= (*dvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&a
);
3060 vnode_setneedinactive(vp
);
3062 if ( !(NATIVE_XATTR(dvp
)) ) {
3064 * Remove any associated extended attibute file (._ AppleDouble file).
3066 xattrfile_remove(dvp
, cnp
->cn_nameptr
, context
, thread_safe
, 1);
3070 unlock_fsnode(vp
, &funnel_state
);
3083 struct vnop_link_args
{
3084 struct vnodeop_desc
*a_desc
;
3087 struct componentname
*a_cnp
;
3088 vfs_context_t a_context
;
3092 VNOP_LINK(vnode_t vp
, vnode_t tdvp
, struct componentname
* cnp
, vfs_context_t context
)
3095 struct vnop_link_args a
;
3097 int funnel_state
= 0;
3100 * For file systems with non-native extended attributes,
3101 * disallow linking to an existing "._" Apple Double file.
3103 if ( !NATIVE_XATTR(tdvp
) && (vp
->v_type
== VREG
)) {
3106 vname
= vnode_getname(vp
);
3107 if (vname
!= NULL
) {
3109 if (vname
[0] == '.' && vname
[1] == '_' && vname
[2] != '\0') {
3112 vnode_putname(vname
);
3117 a
.a_desc
= &vnop_link_desc
;
3121 a
.a_context
= context
;
3122 thread_safe
= THREAD_SAFE_FS(vp
);
3125 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3129 _err
= (*tdvp
->v_op
[vnop_link_desc
.vdesc_offset
])(&a
);
3131 unlock_fsnode(vp
, &funnel_state
);
3140 *#% rename fdvp U U U
3141 *#% rename fvp U U U
3142 *#% rename tdvp L U U
3143 *#% rename tvp X U U
3146 struct vnop_rename_args
{
3147 struct vnodeop_desc
*a_desc
;
3150 struct componentname
*a_fcnp
;
3153 struct componentname
*a_tcnp
;
3154 vfs_context_t a_context
;
3158 VNOP_RENAME(struct vnode
*fdvp
, struct vnode
*fvp
, struct componentname
*fcnp
,
3159 struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
3160 vfs_context_t context
)
3163 struct vnop_rename_args a
;
3164 int funnel_state
= 0;
3165 char smallname1
[48];
3166 char smallname2
[48];
3167 char *xfromname
= NULL
;
3168 char *xtoname
= NULL
;
3169 vnode_t lock_first
= NULL
, lock_second
= NULL
;
3170 vnode_t fdvp_unsafe
= NULLVP
;
3171 vnode_t tdvp_unsafe
= NULLVP
;
3173 a
.a_desc
= &vnop_rename_desc
;
3180 a
.a_context
= context
;
3182 if (!THREAD_SAFE_FS(fdvp
))
3184 if (!THREAD_SAFE_FS(tdvp
))
3187 if (fdvp_unsafe
!= NULLVP
) {
3189 * Lock parents in vnode address order to avoid deadlocks
3190 * note that it's possible for the fdvp to be unsafe,
3191 * but the tdvp to be safe because tvp could be a directory
3192 * in the root of a filesystem... in that case, tdvp is the
3193 * in the filesystem that this root is mounted on
3195 if (tdvp_unsafe
== NULL
|| fdvp_unsafe
== tdvp_unsafe
) {
3196 lock_first
= fdvp_unsafe
;
3198 } else if (fdvp_unsafe
< tdvp_unsafe
) {
3199 lock_first
= fdvp_unsafe
;
3200 lock_second
= tdvp_unsafe
;
3202 lock_first
= tdvp_unsafe
;
3203 lock_second
= fdvp_unsafe
;
3205 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) )
3208 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
3209 unlock_fsnode(lock_first
, &funnel_state
);
3214 * Lock both children in vnode address order to avoid deadlocks
3216 if (tvp
== NULL
|| tvp
== fvp
) {
3219 } else if (fvp
< tvp
) {
3226 if ( (_err
= lock_fsnode(lock_first
, NULL
)) )
3229 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
3230 unlock_fsnode(lock_first
, NULL
);
3235 * Save source and destination names (._ AppleDouble files).
3236 * Skip if source already has a "._" prefix.
3238 if (!NATIVE_XATTR(fdvp
) &&
3239 !(fcnp
->cn_nameptr
[0] == '.' && fcnp
->cn_nameptr
[1] == '_')) {
3242 /* Get source attribute file name. */
3243 len
= fcnp
->cn_namelen
+ 3;
3244 if (len
> sizeof(smallname1
)) {
3245 MALLOC(xfromname
, char *, len
, M_TEMP
, M_WAITOK
);
3247 xfromname
= &smallname1
[0];
3249 strcpy(xfromname
, "._");
3250 strncat(xfromname
, fcnp
->cn_nameptr
, fcnp
->cn_namelen
);
3251 xfromname
[len
-1] = '\0';
3253 /* Get destination attribute file name. */
3254 len
= tcnp
->cn_namelen
+ 3;
3255 if (len
> sizeof(smallname2
)) {
3256 MALLOC(xtoname
, char *, len
, M_TEMP
, M_WAITOK
);
3258 xtoname
= &smallname2
[0];
3260 strcpy(xtoname
, "._");
3261 strncat(xtoname
, tcnp
->cn_nameptr
, tcnp
->cn_namelen
);
3262 xtoname
[len
-1] = '\0';
3265 _err
= (*fdvp
->v_op
[vnop_rename_desc
.vdesc_offset
])(&a
);
3267 if (fdvp_unsafe
!= NULLVP
) {
3268 if (lock_second
!= NULL
)
3269 unlock_fsnode(lock_second
, NULL
);
3270 unlock_fsnode(lock_first
, NULL
);
3273 if (tvp
&& tvp
!= fvp
)
3274 vnode_setneedinactive(tvp
);
3278 * Rename any associated extended attibute file (._ AppleDouble file).
3280 if (_err
== 0 && !NATIVE_XATTR(fdvp
) && xfromname
!= NULL
) {
3281 struct nameidata fromnd
, tond
;
3286 * Get source attribute file vnode.
3287 * Note that fdvp already has an iocount reference and
3288 * using DELETE will take an additional reference.
3290 NDINIT(&fromnd
, DELETE
, NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3291 CAST_USER_ADDR_T(xfromname
), context
);
3292 fromnd
.ni_dvp
= fdvp
;
3293 error
= namei(&fromnd
);
3296 /* When source doesn't exist there still may be a destination. */
3297 if (error
== ENOENT
) {
3302 } else if (fromnd
.ni_vp
->v_type
!= VREG
) {
3303 vnode_put(fromnd
.ni_vp
);
3308 struct vnop_remove_args args
;
3311 * Get destination attribute file vnode.
3312 * Note that tdvp already has an iocount reference.
3314 NDINIT(&tond
, DELETE
, NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3315 CAST_USER_ADDR_T(xtoname
), context
);
3317 error
= namei(&tond
);
3321 if (tond
.ni_vp
->v_type
!= VREG
) {
3322 vnode_put(tond
.ni_vp
);
3326 args
.a_desc
= &vnop_remove_desc
;
3328 args
.a_vp
= tond
.ni_vp
;
3329 args
.a_cnp
= &tond
.ni_cnd
;
3330 args
.a_context
= context
;
3332 if (fdvp_unsafe
!= NULLVP
)
3333 error
= lock_fsnode(tond
.ni_vp
, NULL
);
3335 error
= (*tdvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&args
);
3337 if (fdvp_unsafe
!= NULLVP
)
3338 unlock_fsnode(tond
.ni_vp
, NULL
);
3341 vnode_setneedinactive(tond
.ni_vp
);
3343 vnode_put(tond
.ni_vp
);
3349 * Get destination attribute file vnode.
3351 NDINIT(&tond
, RENAME
,
3352 NOCACHE
| NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3353 CAST_USER_ADDR_T(xtoname
), context
);
3355 error
= namei(&tond
);
3358 vnode_put(fromnd
.ni_vp
);
3362 a
.a_desc
= &vnop_rename_desc
;
3364 a
.a_fvp
= fromnd
.ni_vp
;
3365 a
.a_fcnp
= &fromnd
.ni_cnd
;
3367 a
.a_tvp
= tond
.ni_vp
;
3368 a
.a_tcnp
= &tond
.ni_cnd
;
3369 a
.a_context
= context
;
3371 if (fdvp_unsafe
!= NULLVP
) {
3373 * Lock in vnode address order to avoid deadlocks
3375 if (tond
.ni_vp
== NULL
|| tond
.ni_vp
== fromnd
.ni_vp
) {
3376 lock_first
= fromnd
.ni_vp
;
3378 } else if (fromnd
.ni_vp
< tond
.ni_vp
) {
3379 lock_first
= fromnd
.ni_vp
;
3380 lock_second
= tond
.ni_vp
;
3382 lock_first
= tond
.ni_vp
;
3383 lock_second
= fromnd
.ni_vp
;
3385 if ( (error
= lock_fsnode(lock_first
, NULL
)) == 0) {
3386 if (lock_second
!= NULL
&& (error
= lock_fsnode(lock_second
, NULL
)) )
3387 unlock_fsnode(lock_first
, NULL
);
3391 error
= (*fdvp
->v_op
[vnop_rename_desc
.vdesc_offset
])(&a
);
3393 if (fdvp_unsafe
!= NULLVP
) {
3394 if (lock_second
!= NULL
)
3395 unlock_fsnode(lock_second
, NULL
);
3396 unlock_fsnode(lock_first
, NULL
);
3399 vnode_setneedinactive(fromnd
.ni_vp
);
3401 if (tond
.ni_vp
&& tond
.ni_vp
!= fromnd
.ni_vp
)
3402 vnode_setneedinactive(tond
.ni_vp
);
3405 vnode_put(fromnd
.ni_vp
);
3407 vnode_put(tond
.ni_vp
);
3413 if (xfromname
&& xfromname
!= &smallname1
[0]) {
3414 FREE(xfromname
, M_TEMP
);
3416 if (xtoname
&& xtoname
!= &smallname2
[0]) {
3417 FREE(xtoname
, M_TEMP
);
3420 if (fdvp_unsafe
!= NULLVP
) {
3421 if (tdvp_unsafe
!= NULLVP
)
3422 unlock_fsnode(tdvp_unsafe
, NULL
);
3423 unlock_fsnode(fdvp_unsafe
, &funnel_state
);
3435 struct vnop_mkdir_args
{
3436 struct vnodeop_desc
*a_desc
;
3439 struct componentname
*a_cnp
;
3440 struct vnode_attr
*a_vap
;
3441 vfs_context_t a_context
;
3445 VNOP_MKDIR(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
3446 struct vnode_attr
*vap
, vfs_context_t context
)
3449 struct vnop_mkdir_args a
;
3451 int funnel_state
= 0;
3453 a
.a_desc
= &vnop_mkdir_desc
;
3458 a
.a_context
= context
;
3459 thread_safe
= THREAD_SAFE_FS(dvp
);
3462 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3466 _err
= (*dvp
->v_op
[vnop_mkdir_desc
.vdesc_offset
])(&a
);
3467 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
3469 * Remove stale Apple Double file (if any).
3471 xattrfile_remove(dvp
, cnp
->cn_nameptr
, context
, thread_safe
, 0);
3474 unlock_fsnode(dvp
, &funnel_state
);
3487 struct vnop_rmdir_args
{
3488 struct vnodeop_desc
*a_desc
;
3491 struct componentname
*a_cnp
;
3492 vfs_context_t a_context
;
3497 VNOP_RMDIR(struct vnode
*dvp
, struct vnode
*vp
, struct componentname
*cnp
, vfs_context_t context
)
3500 struct vnop_rmdir_args a
;
3502 int funnel_state
= 0;
3504 a
.a_desc
= &vnop_rmdir_desc
;
3508 a
.a_context
= context
;
3509 thread_safe
= THREAD_SAFE_FS(dvp
);
3512 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3516 _err
= (*vp
->v_op
[vnop_rmdir_desc
.vdesc_offset
])(&a
);
3519 vnode_setneedinactive(vp
);
3521 if ( !(NATIVE_XATTR(dvp
)) ) {
3523 * Remove any associated extended attibute file (._ AppleDouble file).
3525 xattrfile_remove(dvp
, cnp
->cn_nameptr
, context
, thread_safe
, 1);
3529 unlock_fsnode(vp
, &funnel_state
);
3535 * Remove a ._ AppleDouble file
3537 #define AD_STALE_SECS (180)
3539 xattrfile_remove(vnode_t dvp
, const char * basename
, vfs_context_t context
, int thread_safe
, int force
) {
3541 struct nameidata nd
;
3543 char *filename
= NULL
;
3546 if ((basename
== NULL
) || (basename
[0] == '\0') ||
3547 (basename
[0] == '.' && basename
[1] == '_')) {
3550 filename
= &smallname
[0];
3551 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
3552 if (len
>= sizeof(smallname
)) {
3553 len
++; /* snprintf result doesn't include '\0' */
3554 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
3555 len
= snprintf(filename
, len
, "._%s", basename
);
3557 NDINIT(&nd
, DELETE
, LOCKLEAF
| NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3558 CAST_USER_ADDR_T(filename
), context
);
3560 if (namei(&nd
) != 0)
3565 if (xvp
->v_type
!= VREG
)
3569 * When creating a new object and a "._" file already
3570 * exists, check to see if its a stale "._" file.
3574 struct vnode_attr va
;
3577 VATTR_WANTED(&va
, va_data_size
);
3578 VATTR_WANTED(&va
, va_modify_time
);
3579 if (VNOP_GETATTR(xvp
, &va
, context
) == 0 &&
3580 VATTR_IS_SUPPORTED(&va
, va_data_size
) &&
3581 VATTR_IS_SUPPORTED(&va
, va_modify_time
) &&
3582 va
.va_data_size
!= 0) {
3586 if ((tv
.tv_sec
> va
.va_modify_time
.tv_sec
) &&
3587 (tv
.tv_sec
- va
.va_modify_time
.tv_sec
) > AD_STALE_SECS
) {
3588 force
= 1; /* must be stale */
3593 struct vnop_remove_args a
;
3596 a
.a_desc
= &vnop_remove_desc
;
3597 a
.a_dvp
= nd
.ni_dvp
;
3599 a
.a_cnp
= &nd
.ni_cnd
;
3600 a
.a_context
= context
;
3603 if ( (lock_fsnode(xvp
, NULL
)) )
3606 error
= (*dvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&a
);
3609 unlock_fsnode(xvp
, NULL
);
3612 vnode_setneedinactive(xvp
);
3615 /* Note: nd.ni_dvp's iocount is dropped by caller of VNOP_XXXX */
3618 if (filename
&& filename
!= &smallname
[0]) {
3619 FREE(filename
, M_TEMP
);
3624 * Shadow uid/gid/mod to a ._ AppleDouble file
3627 xattrfile_setattr(vnode_t dvp
, const char * basename
, struct vnode_attr
* vap
,
3628 vfs_context_t context
, int thread_safe
) {
3630 struct nameidata nd
;
3632 char *filename
= NULL
;
3635 if ((dvp
== NULLVP
) ||
3636 (basename
== NULL
) || (basename
[0] == '\0') ||
3637 (basename
[0] == '.' && basename
[1] == '_')) {
3640 filename
= &smallname
[0];
3641 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
3642 if (len
>= sizeof(smallname
)) {
3643 len
++; /* snprintf result doesn't include '\0' */
3644 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
3645 len
= snprintf(filename
, len
, "._%s", basename
);
3647 NDINIT(&nd
, LOOKUP
, NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3648 CAST_USER_ADDR_T(filename
), context
);
3650 if (namei(&nd
) != 0)
3656 if (xvp
->v_type
== VREG
) {
3657 struct vnop_setattr_args a
;
3659 a
.a_desc
= &vnop_setattr_desc
;
3662 a
.a_context
= context
;
3665 if ( (lock_fsnode(xvp
, NULL
)) )
3668 (void) (*xvp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
3670 unlock_fsnode(xvp
, NULL
);
3676 if (filename
&& filename
!= &smallname
[0]) {
3677 FREE(filename
, M_TEMP
);
3684 *#% symlink dvp L U U
3685 *#% symlink vpp - U -
3688 struct vnop_symlink_args
{
3689 struct vnodeop_desc
*a_desc
;
3692 struct componentname
*a_cnp
;
3693 struct vnode_attr
*a_vap
;
3695 vfs_context_t a_context
;
3700 VNOP_SYMLINK(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
3701 struct vnode_attr
*vap
, char *target
, vfs_context_t context
)
3704 struct vnop_symlink_args a
;
3706 int funnel_state
= 0;
3708 a
.a_desc
= &vnop_symlink_desc
;
3713 a
.a_target
= target
;
3714 a
.a_context
= context
;
3715 thread_safe
= THREAD_SAFE_FS(dvp
);
3718 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3722 _err
= (*dvp
->v_op
[vnop_symlink_desc
.vdesc_offset
])(&a
);
3723 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
3725 * Remove stale Apple Double file (if any).
3727 xattrfile_remove(dvp
, cnp
->cn_nameptr
, context
, thread_safe
, 0);
3730 unlock_fsnode(dvp
, &funnel_state
);
3738 *#% readdir vp L L L
3741 struct vnop_readdir_args
{
3742 struct vnodeop_desc
*a_desc
;
3748 vfs_context_t a_context
;
3753 VNOP_READDIR(struct vnode
*vp
, struct uio
*uio
, int flags
, int *eofflag
,
3754 int *numdirent
, vfs_context_t context
)
3757 struct vnop_readdir_args a
;
3759 int funnel_state
= 0;
3761 a
.a_desc
= &vnop_readdir_desc
;
3765 a
.a_eofflag
= eofflag
;
3766 a
.a_numdirent
= numdirent
;
3767 a
.a_context
= context
;
3768 thread_safe
= THREAD_SAFE_FS(vp
);
3771 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3775 _err
= (*vp
->v_op
[vnop_readdir_desc
.vdesc_offset
])(&a
);
3777 unlock_fsnode(vp
, &funnel_state
);
3785 *#% readdirattr vp L L L
3788 struct vnop_readdirattr_args
{
3789 struct vnodeop_desc
*a_desc
;
3791 struct attrlist
*a_alist
;
3797 u_long
*a_actualcount
;
3798 vfs_context_t a_context
;
3803 VNOP_READDIRATTR(struct vnode
*vp
, struct attrlist
*alist
, struct uio
*uio
, u_long maxcount
,
3804 u_long options
, u_long
*newstate
, int *eofflag
, u_long
*actualcount
, vfs_context_t context
)
3807 struct vnop_readdirattr_args a
;
3809 int funnel_state
= 0;
3811 a
.a_desc
= &vnop_readdirattr_desc
;
3815 a
.a_maxcount
= maxcount
;
3816 a
.a_options
= options
;
3817 a
.a_newstate
= newstate
;
3818 a
.a_eofflag
= eofflag
;
3819 a
.a_actualcount
= actualcount
;
3820 a
.a_context
= context
;
3821 thread_safe
= THREAD_SAFE_FS(vp
);
3824 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3828 _err
= (*vp
->v_op
[vnop_readdirattr_desc
.vdesc_offset
])(&a
);
3830 unlock_fsnode(vp
, &funnel_state
);
3838 *#% readlink vp L L L
3841 struct vnop_readlink_args
{
3842 struct vnodeop_desc
*a_desc
;
3845 vfs_context_t a_context
;
3850 VNOP_READLINK(struct vnode
*vp
, struct uio
*uio
, vfs_context_t context
)
3853 struct vnop_readlink_args a
;
3855 int funnel_state
= 0;
3857 a
.a_desc
= &vnop_readlink_desc
;
3860 a
.a_context
= context
;
3861 thread_safe
= THREAD_SAFE_FS(vp
);
3864 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3868 _err
= (*vp
->v_op
[vnop_readlink_desc
.vdesc_offset
])(&a
);
3870 unlock_fsnode(vp
, &funnel_state
);
3878 *#% inactive vp L U U
3881 struct vnop_inactive_args
{
3882 struct vnodeop_desc
*a_desc
;
3884 vfs_context_t a_context
;
3888 VNOP_INACTIVE(struct vnode
*vp
, vfs_context_t context
)
3891 struct vnop_inactive_args a
;
3893 int funnel_state
= 0;
3895 a
.a_desc
= &vnop_inactive_desc
;
3897 a
.a_context
= context
;
3898 thread_safe
= THREAD_SAFE_FS(vp
);
3901 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3905 _err
= (*vp
->v_op
[vnop_inactive_desc
.vdesc_offset
])(&a
);
3907 unlock_fsnode(vp
, &funnel_state
);
3916 *#% reclaim vp U U U
3919 struct vnop_reclaim_args
{
3920 struct vnodeop_desc
*a_desc
;
3922 vfs_context_t a_context
;
3926 VNOP_RECLAIM(struct vnode
*vp
, vfs_context_t context
)
3929 struct vnop_reclaim_args a
;
3931 int funnel_state
= 0;
3933 a
.a_desc
= &vnop_reclaim_desc
;
3935 a
.a_context
= context
;
3936 thread_safe
= THREAD_SAFE_FS(vp
);
3939 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3941 _err
= (*vp
->v_op
[vnop_reclaim_desc
.vdesc_offset
])(&a
);
3943 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3952 *#% pathconf vp L L L
3955 struct vnop_pathconf_args
{
3956 struct vnodeop_desc
*a_desc
;
3959 register_t
*a_retval
;
3960 vfs_context_t a_context
;
3964 VNOP_PATHCONF(struct vnode
*vp
, int name
, register_t
*retval
, vfs_context_t context
)
3967 struct vnop_pathconf_args a
;
3969 int funnel_state
= 0;
3971 a
.a_desc
= &vnop_pathconf_desc
;
3974 a
.a_retval
= retval
;
3975 a
.a_context
= context
;
3976 thread_safe
= THREAD_SAFE_FS(vp
);
3979 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3983 _err
= (*vp
->v_op
[vnop_pathconf_desc
.vdesc_offset
])(&a
);
3985 unlock_fsnode(vp
, &funnel_state
);
3993 *#% advlock vp U U U
3996 struct vnop_advlock_args
{
3997 struct vnodeop_desc
*a_desc
;
4003 vfs_context_t a_context
;
4007 VNOP_ADVLOCK(struct vnode
*vp
, caddr_t id
, int op
, struct flock
*fl
, int flags
, vfs_context_t context
)
4010 struct vnop_advlock_args a
;
4012 int funnel_state
= 0;
4013 struct uthread
* uth
;
4015 a
.a_desc
= &vnop_advlock_desc
;
4021 a
.a_context
= context
;
4022 thread_safe
= THREAD_SAFE_FS(vp
);
4024 uth
= get_bsdthread_info(current_thread());
4026 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4028 /* Disallow advisory locking on non-seekable vnodes */
4029 if (vnode_isfifo(vp
)) {
4030 _err
= err_advlock(&a
);
4032 if ((vp
->v_flag
& VLOCKLOCAL
)) {
4033 /* Advisory locking done at this layer */
4034 _err
= lf_advlock(&a
);
4036 /* Advisory locking done by underlying filesystem */
4037 _err
= (*vp
->v_op
[vnop_advlock_desc
.vdesc_offset
])(&a
);
4041 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4051 *#% allocate vp L L L
4054 struct vnop_allocate_args
{
4055 struct vnodeop_desc
*a_desc
;
4059 off_t
*a_bytesallocated
;
4061 vfs_context_t a_context
;
4066 VNOP_ALLOCATE(struct vnode
*vp
, off_t length
, u_int32_t flags
, off_t
*bytesallocated
, off_t offset
, vfs_context_t context
)
4069 struct vnop_allocate_args a
;
4071 int funnel_state
= 0;
4073 a
.a_desc
= &vnop_allocate_desc
;
4075 a
.a_length
= length
;
4077 a
.a_bytesallocated
= bytesallocated
;
4078 a
.a_offset
= offset
;
4079 a
.a_context
= context
;
4080 thread_safe
= THREAD_SAFE_FS(vp
);
4083 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4087 _err
= (*vp
->v_op
[vnop_allocate_desc
.vdesc_offset
])(&a
);
4089 unlock_fsnode(vp
, &funnel_state
);
4100 struct vnop_pagein_args
{
4101 struct vnodeop_desc
*a_desc
;
4104 vm_offset_t a_pl_offset
;
4108 vfs_context_t a_context
;
4112 VNOP_PAGEIN(struct vnode
*vp
, upl_t pl
, vm_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t context
)
4115 struct vnop_pagein_args a
;
4117 int funnel_state
= 0;
4119 a
.a_desc
= &vnop_pagein_desc
;
4122 a
.a_pl_offset
= pl_offset
;
4123 a
.a_f_offset
= f_offset
;
4126 a
.a_context
= context
;
4127 thread_safe
= THREAD_SAFE_FS(vp
);
4130 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4132 _err
= (*vp
->v_op
[vnop_pagein_desc
.vdesc_offset
])(&a
);
4134 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4142 *#% pageout vp = = =
4145 struct vnop_pageout_args
{
4146 struct vnodeop_desc
*a_desc
;
4149 vm_offset_t a_pl_offset
;
4153 vfs_context_t a_context
;
4158 VNOP_PAGEOUT(struct vnode
*vp
, upl_t pl
, vm_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t context
)
4161 struct vnop_pageout_args a
;
4163 int funnel_state
= 0;
4165 a
.a_desc
= &vnop_pageout_desc
;
4168 a
.a_pl_offset
= pl_offset
;
4169 a
.a_f_offset
= f_offset
;
4172 a
.a_context
= context
;
4173 thread_safe
= THREAD_SAFE_FS(vp
);
4176 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4178 _err
= (*vp
->v_op
[vnop_pageout_desc
.vdesc_offset
])(&a
);
4180 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4189 *#% searchfs vp L L L
4192 struct vnop_searchfs_args
{
4193 struct vnodeop_desc
*a_desc
;
4195 void *a_searchparams1
;
4196 void *a_searchparams2
;
4197 struct attrlist
*a_searchattrs
;
4198 u_long a_maxmatches
;
4199 struct timeval
*a_timelimit
;
4200 struct attrlist
*a_returnattrs
;
4201 u_long
*a_nummatches
;
4202 u_long a_scriptcode
;
4205 struct searchstate
*a_searchstate
;
4206 vfs_context_t a_context
;
4211 VNOP_SEARCHFS(struct vnode
*vp
, void *searchparams1
, void *searchparams2
, struct attrlist
*searchattrs
, u_long maxmatches
, struct timeval
*timelimit
, struct attrlist
*returnattrs
, u_long
*nummatches
, u_long scriptcode
, u_long options
, struct uio
*uio
, struct searchstate
*searchstate
, vfs_context_t context
)
4214 struct vnop_searchfs_args a
;
4216 int funnel_state
= 0;
4218 a
.a_desc
= &vnop_searchfs_desc
;
4220 a
.a_searchparams1
= searchparams1
;
4221 a
.a_searchparams2
= searchparams2
;
4222 a
.a_searchattrs
= searchattrs
;
4223 a
.a_maxmatches
= maxmatches
;
4224 a
.a_timelimit
= timelimit
;
4225 a
.a_returnattrs
= returnattrs
;
4226 a
.a_nummatches
= nummatches
;
4227 a
.a_scriptcode
= scriptcode
;
4228 a
.a_options
= options
;
4230 a
.a_searchstate
= searchstate
;
4231 a
.a_context
= context
;
4232 thread_safe
= THREAD_SAFE_FS(vp
);
4235 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4239 _err
= (*vp
->v_op
[vnop_searchfs_desc
.vdesc_offset
])(&a
);
4241 unlock_fsnode(vp
, &funnel_state
);
4249 *#% copyfile fvp U U U
4250 *#% copyfile tdvp L U U
4251 *#% copyfile tvp X U U
4254 struct vnop_copyfile_args
{
4255 struct vnodeop_desc
*a_desc
;
4259 struct componentname
*a_tcnp
;
4262 vfs_context_t a_context
;
4266 VNOP_COPYFILE(struct vnode
*fvp
, struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
4267 int mode
, int flags
, vfs_context_t context
)
4270 struct vnop_copyfile_args a
;
4271 a
.a_desc
= &vnop_copyfile_desc
;
4278 a
.a_context
= context
;
4279 _err
= (*fvp
->v_op
[vnop_copyfile_desc
.vdesc_offset
])(&a
);
4285 VNOP_GETXATTR(vnode_t vp
, const char *name
, uio_t uio
, size_t *size
, int options
, vfs_context_t context
)
4287 struct vnop_getxattr_args a
;
4290 int funnel_state
= 0;
4292 a
.a_desc
= &vnop_getxattr_desc
;
4297 a
.a_options
= options
;
4298 a
.a_context
= context
;
4300 thread_safe
= THREAD_SAFE_FS(vp
);
4302 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4306 error
= (*vp
->v_op
[vnop_getxattr_desc
.vdesc_offset
])(&a
);
4308 unlock_fsnode(vp
, &funnel_state
);
4314 VNOP_SETXATTR(vnode_t vp
, const char *name
, uio_t uio
, int options
, vfs_context_t context
)
4316 struct vnop_setxattr_args a
;
4319 int funnel_state
= 0;
4321 a
.a_desc
= &vnop_setxattr_desc
;
4325 a
.a_options
= options
;
4326 a
.a_context
= context
;
4328 thread_safe
= THREAD_SAFE_FS(vp
);
4330 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4334 error
= (*vp
->v_op
[vnop_setxattr_desc
.vdesc_offset
])(&a
);
4336 unlock_fsnode(vp
, &funnel_state
);
4342 VNOP_REMOVEXATTR(vnode_t vp
, const char *name
, int options
, vfs_context_t context
)
4344 struct vnop_removexattr_args a
;
4347 int funnel_state
= 0;
4349 a
.a_desc
= &vnop_removexattr_desc
;
4352 a
.a_options
= options
;
4353 a
.a_context
= context
;
4355 thread_safe
= THREAD_SAFE_FS(vp
);
4357 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4361 error
= (*vp
->v_op
[vnop_removexattr_desc
.vdesc_offset
])(&a
);
4363 unlock_fsnode(vp
, &funnel_state
);
4369 VNOP_LISTXATTR(vnode_t vp
, uio_t uio
, size_t *size
, int options
, vfs_context_t context
)
4371 struct vnop_listxattr_args a
;
4374 int funnel_state
= 0;
4376 a
.a_desc
= &vnop_listxattr_desc
;
4380 a
.a_options
= options
;
4381 a
.a_context
= context
;
4383 thread_safe
= THREAD_SAFE_FS(vp
);
4385 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4389 error
= (*vp
->v_op
[vnop_listxattr_desc
.vdesc_offset
])(&a
);
4391 unlock_fsnode(vp
, &funnel_state
);
4400 *#% blktooff vp = = =
4403 struct vnop_blktooff_args
{
4404 struct vnodeop_desc
*a_desc
;
4411 VNOP_BLKTOOFF(struct vnode
*vp
, daddr64_t lblkno
, off_t
*offset
)
4414 struct vnop_blktooff_args a
;
4416 int funnel_state
= 0;
4418 a
.a_desc
= &vnop_blktooff_desc
;
4420 a
.a_lblkno
= lblkno
;
4421 a
.a_offset
= offset
;
4422 thread_safe
= THREAD_SAFE_FS(vp
);
4425 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4427 _err
= (*vp
->v_op
[vnop_blktooff_desc
.vdesc_offset
])(&a
);
4429 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4437 *#% offtoblk vp = = =
4440 struct vnop_offtoblk_args
{
4441 struct vnodeop_desc
*a_desc
;
4444 daddr64_t
*a_lblkno
;
4448 VNOP_OFFTOBLK(struct vnode
*vp
, off_t offset
, daddr64_t
*lblkno
)
4451 struct vnop_offtoblk_args a
;
4453 int funnel_state
= 0;
4455 a
.a_desc
= &vnop_offtoblk_desc
;
4457 a
.a_offset
= offset
;
4458 a
.a_lblkno
= lblkno
;
4459 thread_safe
= THREAD_SAFE_FS(vp
);
4462 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4464 _err
= (*vp
->v_op
[vnop_offtoblk_desc
.vdesc_offset
])(&a
);
4466 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4474 *#% blockmap vp L L L
4477 struct vnop_blockmap_args
{
4478 struct vnodeop_desc
*a_desc
;
4486 vfs_context_t a_context
;
4490 VNOP_BLOCKMAP(struct vnode
*vp
, off_t foffset
, size_t size
, daddr64_t
*bpn
, size_t *run
, void *poff
, int flags
, vfs_context_t context
)
4493 struct vnop_blockmap_args a
;
4495 int funnel_state
= 0;
4496 struct vfs_context acontext
;
4498 if (context
== NULL
) {
4499 acontext
.vc_proc
= current_proc();
4500 acontext
.vc_ucred
= kauth_cred_get();
4501 context
= &acontext
;
4503 a
.a_desc
= &vnop_blockmap_desc
;
4505 a
.a_foffset
= foffset
;
4511 a
.a_context
= context
;
4512 thread_safe
= THREAD_SAFE_FS(vp
);
4515 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4517 _err
= (*vp
->v_op
[vnop_blockmap_desc
.vdesc_offset
])(&a
);
4519 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4525 struct vnop_strategy_args
{
4526 struct vnodeop_desc
*a_desc
;
4532 VNOP_STRATEGY(struct buf
*bp
)
4535 struct vnop_strategy_args a
;
4536 a
.a_desc
= &vnop_strategy_desc
;
4538 _err
= (*buf_vnode(bp
)->v_op
[vnop_strategy_desc
.vdesc_offset
])(&a
);
4543 struct vnop_bwrite_args
{
4544 struct vnodeop_desc
*a_desc
;
4549 VNOP_BWRITE(struct buf
*bp
)
4552 struct vnop_bwrite_args a
;
4553 a
.a_desc
= &vnop_bwrite_desc
;
4555 _err
= (*buf_vnode(bp
)->v_op
[vnop_bwrite_desc
.vdesc_offset
])(&a
);
4560 struct vnop_kqfilt_add_args
{
4561 struct vnodeop_desc
*a_desc
;
4564 vfs_context_t a_context
;
4568 VNOP_KQFILT_ADD(struct vnode
*vp
, struct knote
*kn
, vfs_context_t context
)
4571 struct vnop_kqfilt_add_args a
;
4573 int funnel_state
= 0;
4575 a
.a_desc
= VDESC(vnop_kqfilt_add
);
4578 a
.a_context
= context
;
4579 thread_safe
= THREAD_SAFE_FS(vp
);
4582 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4586 _err
= (*vp
->v_op
[vnop_kqfilt_add_desc
.vdesc_offset
])(&a
);
4588 unlock_fsnode(vp
, &funnel_state
);
4594 struct vnop_kqfilt_remove_args
{
4595 struct vnodeop_desc
*a_desc
;
4598 vfs_context_t a_context
;
4602 VNOP_KQFILT_REMOVE(struct vnode
*vp
, uintptr_t ident
, vfs_context_t context
)
4605 struct vnop_kqfilt_remove_args a
;
4607 int funnel_state
= 0;
4609 a
.a_desc
= VDESC(vnop_kqfilt_remove
);
4612 a
.a_context
= context
;
4613 thread_safe
= THREAD_SAFE_FS(vp
);
4616 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4620 _err
= (*vp
->v_op
[vnop_kqfilt_remove_desc
.vdesc_offset
])(&a
);
4622 unlock_fsnode(vp
, &funnel_state
);