2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
30 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
32 * Copyright (c) 1989, 1993
33 * The Regents of the University of California. All rights reserved.
34 * (c) UNIX System Laboratories, Inc.
35 * All or some portions of this file are derived from material licensed
36 * to the University of California by American Telephone and Telegraph
37 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
38 * the permission of UNIX System Laboratories, Inc.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * External virtual filesystem routines
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/proc_internal.h>
81 #include <sys/kauth.h>
82 #include <sys/mount.h>
83 #include <sys/mount_internal.h>
85 #include <sys/vnode_internal.h>
87 #include <sys/namei.h>
88 #include <sys/ucred.h>
90 #include <sys/errno.h>
91 #include <sys/malloc.h>
92 #include <sys/domain.h>
94 #include <sys/syslog.h>
97 #include <sys/sysctl.h>
98 #include <sys/filedesc.h>
99 #include <sys/fsevents.h>
100 #include <sys/user.h>
101 #include <sys/lockf.h>
102 #include <sys/xattr.h>
104 #include <kern/assert.h>
105 #include <kern/kalloc.h>
107 #include <miscfs/specfs/specdev.h>
109 #include <mach/mach_types.h>
110 #include <mach/memory_object_types.h>
119 #define THREAD_SAFE_FS(VP) \
120 ((VP)->v_unsafefs ? 0 : 1)
122 #define NATIVE_XATTR(VP) \
123 ((VP)->v_mount ? (VP)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR : 0)
125 static void xattrfile_remove(vnode_t dvp
, const char * basename
, vfs_context_t context
,
126 int thread_safe
, int force
);
127 static void xattrfile_setattr(vnode_t dvp
, const char * basename
, struct vnode_attr
* vap
,
128 vfs_context_t context
, int thread_safe
);
132 vnode_setneedinactive(vnode_t vp
)
137 vp
->v_lflag
|= VL_NEEDINACTIVE
;
143 lock_fsnode(vnode_t vp
, int *funnel_state
)
146 *funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
148 if (vp
->v_unsafefs
) {
149 if (vp
->v_unsafefs
->fsnodeowner
== current_thread()) {
150 vp
->v_unsafefs
->fsnode_count
++;
152 lck_mtx_lock(&vp
->v_unsafefs
->fsnodelock
);
154 if (vp
->v_lflag
& (VL_TERMWANT
| VL_TERMINATE
| VL_DEAD
)) {
155 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
158 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
161 vp
->v_unsafefs
->fsnodeowner
= current_thread();
162 vp
->v_unsafefs
->fsnode_count
= 1;
170 unlock_fsnode(vnode_t vp
, int *funnel_state
)
172 if (vp
->v_unsafefs
) {
173 if (--vp
->v_unsafefs
->fsnode_count
== 0) {
174 vp
->v_unsafefs
->fsnodeowner
= NULL
;
175 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
179 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
184 /* ====================================================================== */
185 /* ************ EXTERNAL KERNEL APIS ********************************** */
186 /* ====================================================================== */
189 * prototypes for exported VFS operations
192 VFS_MOUNT(struct mount
* mp
, vnode_t devvp
, user_addr_t data
, vfs_context_t context
)
196 int funnel_state
= 0;
198 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_mount
== 0))
201 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
205 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
208 if (vfs_context_is64bit(context
)) {
209 if (vfs_64bitready(mp
)) {
210 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, context
);
217 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, context
);
221 (void) thread_funnel_set(kernel_flock
, funnel_state
);
227 VFS_START(struct mount
* mp
, int flags
, vfs_context_t context
)
231 int funnel_state
= 0;
233 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_start
== 0))
236 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
239 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
241 error
= (*mp
->mnt_op
->vfs_start
)(mp
, flags
, context
);
243 (void) thread_funnel_set(kernel_flock
, funnel_state
);
249 VFS_UNMOUNT(struct mount
*mp
, int flags
, vfs_context_t context
)
253 int funnel_state
= 0;
255 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_unmount
== 0))
258 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
261 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
263 error
= (*mp
->mnt_op
->vfs_unmount
)(mp
, flags
, context
);
265 (void) thread_funnel_set(kernel_flock
, funnel_state
);
271 VFS_ROOT(struct mount
* mp
, struct vnode
** vpp
, vfs_context_t context
)
275 int funnel_state
= 0;
276 struct vfs_context acontext
;
278 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_root
== 0))
281 if (context
== NULL
) {
282 acontext
.vc_proc
= current_proc();
283 acontext
.vc_ucred
= kauth_cred_get();
286 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
289 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
291 error
= (*mp
->mnt_op
->vfs_root
)(mp
, vpp
, context
);
293 (void) thread_funnel_set(kernel_flock
, funnel_state
);
299 VFS_QUOTACTL(struct mount
*mp
, int cmd
, uid_t uid
, caddr_t datap
, vfs_context_t context
)
303 int funnel_state
= 0;
305 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_quotactl
== 0))
308 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
311 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
313 error
= (*mp
->mnt_op
->vfs_quotactl
)(mp
, cmd
, uid
, datap
, context
);
315 (void) thread_funnel_set(kernel_flock
, funnel_state
);
321 VFS_GETATTR(struct mount
*mp
, struct vfs_attr
*vfa
, vfs_context_t context
)
325 int funnel_state
= 0;
326 struct vfs_context acontext
;
328 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_getattr
== 0))
331 if (context
== NULL
) {
332 acontext
.vc_proc
= current_proc();
333 acontext
.vc_ucred
= kauth_cred_get();
336 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
339 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
341 error
= (*mp
->mnt_op
->vfs_getattr
)(mp
, vfa
, context
);
343 (void) thread_funnel_set(kernel_flock
, funnel_state
);
349 VFS_SETATTR(struct mount
*mp
, struct vfs_attr
*vfa
, vfs_context_t context
)
353 int funnel_state
= 0;
354 struct vfs_context acontext
;
356 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_setattr
== 0))
359 if (context
== NULL
) {
360 acontext
.vc_proc
= current_proc();
361 acontext
.vc_ucred
= kauth_cred_get();
364 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
367 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
369 error
= (*mp
->mnt_op
->vfs_setattr
)(mp
, vfa
, context
);
371 (void) thread_funnel_set(kernel_flock
, funnel_state
);
377 VFS_SYNC(struct mount
*mp
, int flags
, vfs_context_t context
)
381 int funnel_state
= 0;
382 struct vfs_context acontext
;
384 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_sync
== 0))
387 if (context
== NULL
) {
388 acontext
.vc_proc
= current_proc();
389 acontext
.vc_ucred
= kauth_cred_get();
392 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
395 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
397 error
= (*mp
->mnt_op
->vfs_sync
)(mp
, flags
, context
);
399 (void) thread_funnel_set(kernel_flock
, funnel_state
);
405 VFS_VGET(struct mount
* mp
, ino64_t ino
, struct vnode
**vpp
, vfs_context_t context
)
409 int funnel_state
= 0;
410 struct vfs_context acontext
;
412 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_vget
== 0))
415 if (context
== NULL
) {
416 acontext
.vc_proc
= current_proc();
417 acontext
.vc_ucred
= kauth_cred_get();
420 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
423 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
425 error
= (*mp
->mnt_op
->vfs_vget
)(mp
, ino
, vpp
, context
);
427 (void) thread_funnel_set(kernel_flock
, funnel_state
);
433 VFS_FHTOVP(struct mount
* mp
, int fhlen
, unsigned char * fhp
, vnode_t
* vpp
, vfs_context_t context
)
437 int funnel_state
= 0;
438 struct vfs_context acontext
;
440 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_fhtovp
== 0))
443 if (context
== NULL
) {
444 acontext
.vc_proc
= current_proc();
445 acontext
.vc_ucred
= kauth_cred_get();
448 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
451 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
453 error
= (*mp
->mnt_op
->vfs_fhtovp
)(mp
, fhlen
, fhp
, vpp
, context
);
455 (void) thread_funnel_set(kernel_flock
, funnel_state
);
461 VFS_VPTOFH(struct vnode
* vp
, int *fhlenp
, unsigned char * fhp
, vfs_context_t context
)
465 int funnel_state
= 0;
466 struct vfs_context acontext
;
468 if ((vp
->v_mount
== dead_mountp
) || (vp
->v_mount
->mnt_op
->vfs_vptofh
== 0))
471 if (context
== NULL
) {
472 acontext
.vc_proc
= current_proc();
473 acontext
.vc_ucred
= kauth_cred_get();
476 thread_safe
= THREAD_SAFE_FS(vp
);
479 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
481 error
= (*vp
->v_mount
->mnt_op
->vfs_vptofh
)(vp
, fhlenp
, fhp
, context
);
483 (void) thread_funnel_set(kernel_flock
, funnel_state
);
489 /* returns a copy of vfs type name for the mount_t */
491 vfs_name(mount_t mp
, char * buffer
)
493 strncpy(buffer
, mp
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
496 /* returns vfs type number for the mount_t */
498 vfs_typenum(mount_t mp
)
500 return(mp
->mnt_vtable
->vfc_typenum
);
504 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
506 vfs_flags(mount_t mp
)
508 return((uint64_t)(mp
->mnt_flag
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
)));
511 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
513 vfs_setflags(mount_t mp
, uint64_t flags
)
515 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
517 mp
->mnt_flag
|= lflags
;
520 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
522 vfs_clearflags(mount_t mp
, uint64_t flags
)
524 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
526 mp
->mnt_flag
&= ~lflags
;
529 /* Is the mount_t ronly and upgrade read/write requested? */
531 vfs_iswriteupgrade(mount_t mp
) /* ronly && MNTK_WANTRDWR */
533 return ((mp
->mnt_flag
& MNT_RDONLY
) && (mp
->mnt_kern_flag
& MNTK_WANTRDWR
));
537 /* Is the mount_t mounted ronly */
539 vfs_isrdonly(mount_t mp
)
541 return (mp
->mnt_flag
& MNT_RDONLY
);
544 /* Is the mount_t mounted for filesystem synchronous writes? */
546 vfs_issynchronous(mount_t mp
)
548 return (mp
->mnt_flag
& MNT_SYNCHRONOUS
);
551 /* Is the mount_t mounted read/write? */
553 vfs_isrdwr(mount_t mp
)
555 return ((mp
->mnt_flag
& MNT_RDONLY
) == 0);
559 /* Is mount_t marked for update (ie MNT_UPDATE) */
561 vfs_isupdate(mount_t mp
)
563 return (mp
->mnt_flag
& MNT_UPDATE
);
567 /* Is mount_t marked for reload (ie MNT_RELOAD) */
569 vfs_isreload(mount_t mp
)
571 return ((mp
->mnt_flag
& MNT_UPDATE
) && (mp
->mnt_flag
& MNT_RELOAD
));
574 /* Is mount_t marked for reload (ie MNT_FORCE) */
576 vfs_isforce(mount_t mp
)
578 if ((mp
->mnt_flag
& MNT_FORCE
) || (mp
->mnt_kern_flag
& MNTK_FRCUNMOUNT
))
585 vfs_64bitready(mount_t mp
)
587 if ((mp
->mnt_vtable
->vfc_64bitready
))
594 vfs_authopaque(mount_t mp
)
596 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE
))
603 vfs_authopaqueaccess(mount_t mp
)
605 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE_ACCESS
))
612 vfs_setauthopaque(mount_t mp
)
615 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE
;
620 vfs_setauthopaqueaccess(mount_t mp
)
623 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE_ACCESS
;
628 vfs_clearauthopaque(mount_t mp
)
631 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE
;
636 vfs_clearauthopaqueaccess(mount_t mp
)
639 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE_ACCESS
;
644 vfs_setextendedsecurity(mount_t mp
)
647 mp
->mnt_kern_flag
|= MNTK_EXTENDED_SECURITY
;
652 vfs_clearextendedsecurity(mount_t mp
)
655 mp
->mnt_kern_flag
&= ~MNTK_EXTENDED_SECURITY
;
660 vfs_extendedsecurity(mount_t mp
)
662 return(mp
->mnt_kern_flag
& MNTK_EXTENDED_SECURITY
);
665 /* returns the max size of short symlink in this mount_t */
667 vfs_maxsymlen(mount_t mp
)
669 return(mp
->mnt_maxsymlinklen
);
672 /* set max size of short symlink on mount_t */
674 vfs_setmaxsymlen(mount_t mp
, uint32_t symlen
)
676 mp
->mnt_maxsymlinklen
= symlen
;
679 /* return a pointer to the RO vfs_statfs associated with mount_t */
681 vfs_statfs(mount_t mp
)
683 return(&mp
->mnt_vfsstat
);
687 vfs_getattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
692 if ((error
= VFS_GETATTR(mp
, vfa
, ctx
)) != 0)
696 * If we have a filesystem create time, use it to default some others.
698 if (VFSATTR_IS_SUPPORTED(vfa
, f_create_time
)) {
699 if (VFSATTR_IS_ACTIVE(vfa
, f_modify_time
) && !VFSATTR_IS_SUPPORTED(vfa
, f_modify_time
))
700 VFSATTR_RETURN(vfa
, f_modify_time
, vfa
->f_create_time
);
707 vfs_setattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
711 if (vfs_isrdonly(mp
))
714 error
= VFS_SETATTR(mp
, vfa
, ctx
);
717 * If we had alternate ways of setting vfs attributes, we'd
724 /* return the private data handle stored in mount_t */
726 vfs_fsprivate(mount_t mp
)
728 return(mp
->mnt_data
);
731 /* set the private data handle in mount_t */
733 vfs_setfsprivate(mount_t mp
, void *mntdata
)
735 mp
->mnt_data
= mntdata
;
740 * return the block size of the underlying
741 * device associated with mount_t
744 vfs_devblocksize(mount_t mp
) {
746 return(mp
->mnt_devblocksize
);
751 * return the io attributes associated with mount_t
754 vfs_ioattr(mount_t mp
, struct vfsioattr
*ioattrp
)
757 ioattrp
->io_maxreadcnt
= MAXPHYS
;
758 ioattrp
->io_maxwritecnt
= MAXPHYS
;
759 ioattrp
->io_segreadcnt
= 32;
760 ioattrp
->io_segwritecnt
= 32;
761 ioattrp
->io_maxsegreadsize
= MAXPHYS
;
762 ioattrp
->io_maxsegwritesize
= MAXPHYS
;
763 ioattrp
->io_devblocksize
= DEV_BSIZE
;
765 ioattrp
->io_maxreadcnt
= mp
->mnt_maxreadcnt
;
766 ioattrp
->io_maxwritecnt
= mp
->mnt_maxwritecnt
;
767 ioattrp
->io_segreadcnt
= mp
->mnt_segreadcnt
;
768 ioattrp
->io_segwritecnt
= mp
->mnt_segwritecnt
;
769 ioattrp
->io_maxsegreadsize
= mp
->mnt_maxsegreadsize
;
770 ioattrp
->io_maxsegwritesize
= mp
->mnt_maxsegwritesize
;
771 ioattrp
->io_devblocksize
= mp
->mnt_devblocksize
;
773 ioattrp
->io_reserved
[0] = 0;
774 ioattrp
->io_reserved
[1] = 0;
775 ioattrp
->io_reserved
[2] = 0;
780 * set the IO attributes associated with mount_t
783 vfs_setioattr(mount_t mp
, struct vfsioattr
* ioattrp
)
787 mp
->mnt_maxreadcnt
= ioattrp
->io_maxreadcnt
;
788 mp
->mnt_maxwritecnt
= ioattrp
->io_maxwritecnt
;
789 mp
->mnt_segreadcnt
= ioattrp
->io_segreadcnt
;
790 mp
->mnt_segwritecnt
= ioattrp
->io_segwritecnt
;
791 mp
->mnt_maxsegreadsize
= ioattrp
->io_maxsegreadsize
;
792 mp
->mnt_maxsegwritesize
= ioattrp
->io_maxsegwritesize
;
793 mp
->mnt_devblocksize
= ioattrp
->io_devblocksize
;
797 * Add a new filesystem into the kernel specified in passed in
798 * vfstable structure. It fills in the vnode
799 * dispatch vector that is to be passed to when vnodes are created.
800 * It returns a handle which is to be used to when the FS is to be removed
802 typedef int (*PFI
)(void *);
803 extern int vfs_opv_numops
;
805 vfs_fsadd(struct vfs_fsentry
*vfe
, vfstable_t
* handle
)
808 struct vfstable
*newvfstbl
= NULL
;
810 int (***opv_desc_vector_p
)(void *);
811 int (**opv_desc_vector
)(void *);
812 struct vnodeopv_entry_desc
*opve_descp
;
818 * This routine is responsible for all the initialization that would
819 * ordinarily be done as part of the system startup;
822 if (vfe
== (struct vfs_fsentry
*)0)
825 desccount
= vfe
->vfe_vopcnt
;
826 if ((desccount
<=0) || ((desccount
> 5)) || (vfe
->vfe_vfsops
== (struct vfsops
*)NULL
)
827 || (vfe
->vfe_opvdescs
== (struct vnodeopv_desc
**)NULL
))
831 MALLOC(newvfstbl
, void *, sizeof(struct vfstable
), M_TEMP
,
833 bzero(newvfstbl
, sizeof(struct vfstable
));
834 newvfstbl
->vfc_vfsops
= vfe
->vfe_vfsops
;
835 strncpy(&newvfstbl
->vfc_name
[0], vfe
->vfe_fsname
, MFSNAMELEN
);
836 if ((vfe
->vfe_flags
& VFS_TBLNOTYPENUM
))
837 newvfstbl
->vfc_typenum
= maxvfsconf
++;
839 newvfstbl
->vfc_typenum
= vfe
->vfe_fstypenum
;
841 newvfstbl
->vfc_refcount
= 0;
842 newvfstbl
->vfc_flags
= 0;
843 newvfstbl
->vfc_mountroot
= NULL
;
844 newvfstbl
->vfc_next
= NULL
;
845 newvfstbl
->vfc_threadsafe
= 0;
846 newvfstbl
->vfc_vfsflags
= 0;
847 if (vfe
->vfe_flags
& VFS_TBL64BITREADY
)
848 newvfstbl
->vfc_64bitready
= 1;
849 if (vfe
->vfe_flags
& VFS_TBLTHREADSAFE
)
850 newvfstbl
->vfc_threadsafe
= 1;
851 if (vfe
->vfe_flags
& VFS_TBLFSNODELOCK
)
852 newvfstbl
->vfc_threadsafe
= 1;
853 if ((vfe
->vfe_flags
& VFS_TBLLOCALVOL
) == VFS_TBLLOCALVOL
)
854 newvfstbl
->vfc_flags
|= MNT_LOCAL
;
855 if (vfe
->vfe_flags
& VFS_TBLLOCALVOL
)
856 newvfstbl
->vfc_vfsflags
|= VFC_VFSLOCALARGS
;
858 newvfstbl
->vfc_vfsflags
|= VFC_VFSGENERICARGS
;
862 * Allocate and init the vectors.
863 * Also handle backwards compatibility.
865 * We allocate one large block to hold all <desccount>
866 * vnode operation vectors stored contiguously.
868 /* XXX - shouldn't be M_TEMP */
870 descsize
= desccount
* vfs_opv_numops
* sizeof(PFI
);
871 MALLOC(descptr
, PFI
*, descsize
,
873 bzero(descptr
, descsize
);
875 newvfstbl
->vfc_descptr
= descptr
;
876 newvfstbl
->vfc_descsize
= descsize
;
879 for (i
= 0; i
< desccount
; i
++ ) {
880 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
882 * Fill in the caller's pointer to the start of the i'th vector.
883 * They'll need to supply it when calling vnode_create.
885 opv_desc_vector
= descptr
+ i
* vfs_opv_numops
;
886 *opv_desc_vector_p
= opv_desc_vector
;
888 for (j
= 0; vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
].opve_op
; j
++) {
889 opve_descp
= &(vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
]);
892 * Sanity check: is this operation listed
893 * in the list of operations? We check this
894 * by seeing if its offest is zero. Since
895 * the default routine should always be listed
896 * first, it should be the only one with a zero
897 * offset. Any other operation with a zero
898 * offset is probably not listed in
899 * vfs_op_descs, and so is probably an error.
901 * A panic here means the layer programmer
902 * has committed the all-too common bug
903 * of adding a new operation to the layer's
904 * list of vnode operations but
905 * not adding the operation to the system-wide
906 * list of supported operations.
908 if (opve_descp
->opve_op
->vdesc_offset
== 0 &&
909 opve_descp
->opve_op
->vdesc_offset
!= VOFFSET(vnop_default
)) {
910 printf("vfs_fsadd: operation %s not listed in %s.\n",
911 opve_descp
->opve_op
->vdesc_name
,
913 panic("vfs_fsadd: bad operation");
916 * Fill in this entry.
918 opv_desc_vector
[opve_descp
->opve_op
->vdesc_offset
] =
919 opve_descp
->opve_impl
;
924 * Finally, go back and replace unfilled routines
925 * with their default. (Sigh, an O(n^3) algorithm. I
926 * could make it better, but that'd be work, and n is small.)
928 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
931 * Force every operations vector to have a default routine.
933 opv_desc_vector
= *opv_desc_vector_p
;
934 if (opv_desc_vector
[VOFFSET(vnop_default
)] == NULL
)
935 panic("vfs_fsadd: operation vector without default routine.");
936 for (j
= 0; j
< vfs_opv_numops
; j
++)
937 if (opv_desc_vector
[j
] == NULL
)
939 opv_desc_vector
[VOFFSET(vnop_default
)];
941 } /* end of each vnodeopv_desc parsing */
945 *handle
= vfstable_add(newvfstbl
);
947 if (newvfstbl
->vfc_typenum
<= maxvfsconf
)
948 maxvfsconf
= newvfstbl
->vfc_typenum
+ 1;
951 if (newvfstbl
->vfc_vfsops
->vfs_init
)
952 (*newvfstbl
->vfc_vfsops
->vfs_init
)((struct vfsconf
*)handle
);
954 FREE(newvfstbl
, M_TEMP
);
960 * Removes the filesystem from kernel.
961 * The argument passed in is the handle that was given when
962 * file system was added
965 vfs_fsremove(vfstable_t handle
)
967 struct vfstable
* vfstbl
= (struct vfstable
*)handle
;
968 void *old_desc
= NULL
;
971 /* Preflight check for any mounts */
973 if ( vfstbl
->vfc_refcount
!= 0 ) {
980 * save the old descriptor; the free cannot occur unconditionally,
981 * since vfstable_del() may fail.
983 if (vfstbl
->vfc_descptr
&& vfstbl
->vfc_descsize
) {
984 old_desc
= vfstbl
->vfc_descptr
;
986 err
= vfstable_del(vfstbl
);
988 /* free the descriptor if the delete was successful */
989 if (err
== 0 && old_desc
) {
990 FREE(old_desc
, M_TEMP
);
997 * This returns a reference to mount_t
998 * which should be dropped using vfs_mountrele().
999 * Not doing so will leak a mountpoint
1000 * and associated data structures.
1003 vfs_mountref(__unused mount_t mp
) /* gives a reference */
1008 /* This drops the reference on mount_t that was acquired */
1010 vfs_mountrele(__unused mount_t mp
) /* drops reference */
1016 vfs_context_pid(vfs_context_t context
)
1018 return (context
->vc_proc
->p_pid
);
1022 vfs_context_suser(vfs_context_t context
)
1024 return (suser(context
->vc_ucred
, 0));
1027 vfs_context_issignal(vfs_context_t context
, sigset_t mask
)
1029 if (context
->vc_proc
)
1030 return(proc_pendingsignals(context
->vc_proc
, mask
));
1035 vfs_context_is64bit(vfs_context_t context
)
1037 if (context
->vc_proc
)
1038 return(proc_is64bit(context
->vc_proc
));
1043 vfs_context_proc(vfs_context_t context
)
1045 return (context
->vc_proc
);
1049 vfs_context_create(vfs_context_t context
)
1051 struct vfs_context
* newcontext
;
1053 newcontext
= (struct vfs_context
*)kalloc(sizeof(struct vfs_context
));
1057 newcontext
->vc_proc
= context
->vc_proc
;
1058 newcontext
->vc_ucred
= context
->vc_ucred
;
1060 newcontext
->vc_proc
= proc_self();
1061 newcontext
->vc_ucred
= kauth_cred_get();
1065 return((vfs_context_t
)0);
1069 vfs_context_rele(vfs_context_t context
)
1072 kfree(context
, sizeof(struct vfs_context
));
1078 vfs_context_ucred(vfs_context_t context
)
1080 return (context
->vc_ucred
);
1084 * Return true if the context is owned by the superuser.
1087 vfs_context_issuser(vfs_context_t context
)
1089 return(context
->vc_ucred
->cr_uid
== 0);
1093 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1097 * Convert between vnode types and inode formats (since POSIX.1
1098 * defines mode word of stat structure in terms of inode formats).
1101 vnode_iftovt(int mode
)
1103 return(iftovt_tab
[((mode
) & S_IFMT
) >> 12]);
1107 vnode_vttoif(enum vtype indx
)
1109 return(vttoif_tab
[(int)(indx
)]);
1113 vnode_makeimode(int indx
, int mode
)
1115 return (int)(VTTOIF(indx
) | (mode
));
1120 * vnode manipulation functions.
1123 /* returns system root vnode reference; It should be dropped using vrele() */
1129 error
= vnode_get(rootvnode
);
1131 return ((vnode_t
)0);
1138 vnode_vid(vnode_t vp
)
1140 return ((uint32_t)(vp
->v_id
));
1143 /* returns a mount reference; drop it with vfs_mountrelease() */
1145 vnode_mount(vnode_t vp
)
1147 return (vp
->v_mount
);
1150 /* returns a mount reference iff vnode_t is a dir and is a mount point */
1152 vnode_mountedhere(vnode_t vp
)
1156 if ((vp
->v_type
== VDIR
) && ((mp
= vp
->v_mountedhere
) != NULL
) &&
1157 (mp
->mnt_vnodecovered
== vp
))
1160 return (mount_t
)NULL
;
1163 /* returns vnode type of vnode_t */
1165 vnode_vtype(vnode_t vp
)
1167 return (vp
->v_type
);
1170 /* returns FS specific node saved in vnode */
1172 vnode_fsnode(vnode_t vp
)
1174 return (vp
->v_data
);
1178 vnode_clearfsnode(vnode_t vp
)
1184 vnode_specrdev(vnode_t vp
)
1190 /* Accessor functions */
1191 /* is vnode_t a root vnode */
1193 vnode_isvroot(vnode_t vp
)
1195 return ((vp
->v_flag
& VROOT
)? 1 : 0);
1198 /* is vnode_t a system vnode */
1200 vnode_issystem(vnode_t vp
)
1202 return ((vp
->v_flag
& VSYSTEM
)? 1 : 0);
1205 /* if vnode_t mount operation in progress */
1207 vnode_ismount(vnode_t vp
)
1209 return ((vp
->v_flag
& VMOUNT
)? 1 : 0);
1212 /* is this vnode under recyle now */
1214 vnode_isrecycled(vnode_t vp
)
1219 ret
= (vp
->v_lflag
& (VL_TERMINATE
|VL_DEAD
))? 1 : 0;
1224 /* is vnode_t marked to not keep data cached once it's been consumed */
1226 vnode_isnocache(vnode_t vp
)
1228 return ((vp
->v_flag
& VNOCACHE_DATA
)? 1 : 0);
1232 * has sequential readahead been disabled on this vnode
1235 vnode_isnoreadahead(vnode_t vp
)
1237 return ((vp
->v_flag
& VRAOFF
)? 1 : 0);
1240 /* is vnode_t a standard one? */
1242 vnode_isstandard(vnode_t vp
)
1244 return ((vp
->v_flag
& VSTANDARD
)? 1 : 0);
1247 /* don't vflush() if SKIPSYSTEM */
1249 vnode_isnoflush(vnode_t vp
)
1251 return ((vp
->v_flag
& VNOFLUSH
)? 1 : 0);
1254 /* is vnode_t a regular file */
1256 vnode_isreg(vnode_t vp
)
1258 return ((vp
->v_type
== VREG
)? 1 : 0);
1261 /* is vnode_t a directory? */
1263 vnode_isdir(vnode_t vp
)
1265 return ((vp
->v_type
== VDIR
)? 1 : 0);
1268 /* is vnode_t a symbolic link ? */
1270 vnode_islnk(vnode_t vp
)
1272 return ((vp
->v_type
== VLNK
)? 1 : 0);
1275 /* is vnode_t a fifo ? */
1277 vnode_isfifo(vnode_t vp
)
1279 return ((vp
->v_type
== VFIFO
)? 1 : 0);
1282 /* is vnode_t a block device? */
1284 vnode_isblk(vnode_t vp
)
1286 return ((vp
->v_type
== VBLK
)? 1 : 0);
1289 /* is vnode_t a char device? */
1291 vnode_ischr(vnode_t vp
)
1293 return ((vp
->v_type
== VCHR
)? 1 : 0);
1296 /* is vnode_t a socket? */
1298 vnode_issock(vnode_t vp
)
1300 return ((vp
->v_type
== VSOCK
)? 1 : 0);
1304 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1306 vnode_setnocache(vnode_t vp
)
1309 vp
->v_flag
|= VNOCACHE_DATA
;
1314 vnode_clearnocache(vnode_t vp
)
1317 vp
->v_flag
&= ~VNOCACHE_DATA
;
1322 vnode_setnoreadahead(vnode_t vp
)
1325 vp
->v_flag
|= VRAOFF
;
1330 vnode_clearnoreadahead(vnode_t vp
)
1333 vp
->v_flag
&= ~VRAOFF
;
1338 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1340 vnode_setnoflush(vnode_t vp
)
1343 vp
->v_flag
|= VNOFLUSH
;
1348 vnode_clearnoflush(vnode_t vp
)
1351 vp
->v_flag
&= ~VNOFLUSH
;
1356 /* is vnode_t a blkdevice and has a FS mounted on it */
1358 vnode_ismountedon(vnode_t vp
)
1360 return ((vp
->v_specflags
& SI_MOUNTEDON
)? 1 : 0);
1364 vnode_setmountedon(vnode_t vp
)
1367 vp
->v_specflags
|= SI_MOUNTEDON
;
1372 vnode_clearmountedon(vnode_t vp
)
1375 vp
->v_specflags
&= ~SI_MOUNTEDON
;
1381 vnode_settag(vnode_t vp
, int tag
)
1388 vnode_tag(vnode_t vp
)
1394 vnode_parent(vnode_t vp
)
1397 return(vp
->v_parent
);
1401 vnode_setparent(vnode_t vp
, vnode_t dvp
)
1407 vnode_name(vnode_t vp
)
1409 /* we try to keep v_name a reasonable name for the node */
1414 vnode_setname(vnode_t vp
, char * name
)
1419 /* return the registered FS name when adding the FS to kernel */
1421 vnode_vfsname(vnode_t vp
, char * buf
)
1423 strncpy(buf
, vp
->v_mount
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
1426 /* return the FS type number */
1428 vnode_vfstypenum(vnode_t vp
)
1430 return(vp
->v_mount
->mnt_vtable
->vfc_typenum
);
1434 vnode_vfs64bitready(vnode_t vp
)
1437 if ((vp
->v_mount
->mnt_vtable
->vfc_64bitready
))
1445 /* return the visible flags on associated mount point of vnode_t */
1447 vnode_vfsvisflags(vnode_t vp
)
1449 return(vp
->v_mount
->mnt_flag
& MNT_VISFLAGMASK
);
1452 /* return the command modifier flags on associated mount point of vnode_t */
1454 vnode_vfscmdflags(vnode_t vp
)
1456 return(vp
->v_mount
->mnt_flag
& MNT_CMDFLAGS
);
1459 /* return the max symlink of short links of vnode_t */
1461 vnode_vfsmaxsymlen(vnode_t vp
)
1463 return(vp
->v_mount
->mnt_maxsymlinklen
);
1466 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
1468 vnode_vfsstatfs(vnode_t vp
)
1470 return(&vp
->v_mount
->mnt_vfsstat
);
1473 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
1475 vnode_vfsfsprivate(vnode_t vp
)
1477 return(vp
->v_mount
->mnt_data
);
1480 /* is vnode_t in a rdonly mounted FS */
1482 vnode_vfsisrdonly(vnode_t vp
)
1484 return ((vp
->v_mount
->mnt_flag
& MNT_RDONLY
)? 1 : 0);
1488 /* returns vnode ref to current working directory */
1490 current_workingdir(void)
1492 struct proc
*p
= current_proc();
1495 if ( (vp
= p
->p_fd
->fd_cdir
) ) {
1496 if ( (vnode_getwithref(vp
)) )
1502 /* returns vnode ref to current root(chroot) directory */
1504 current_rootdir(void)
1506 struct proc
*p
= current_proc();
1509 if ( (vp
= p
->p_fd
->fd_rdir
) ) {
1510 if ( (vnode_getwithref(vp
)) )
1517 vnode_get_filesec(vnode_t vp
, kauth_filesec_t
*fsecp
, vfs_context_t ctx
)
1519 kauth_filesec_t fsec
;
1522 size_t xsize
, rsize
;
1529 /* find out how big the EA is */
1530 if (vn_getxattr(vp
, KAUTH_FILESEC_XATTR
, NULL
, &xsize
, XATTR_NOSECURITY
, ctx
) != 0) {
1531 /* no EA, no filesec */
1532 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
1534 /* either way, we are done */
1538 /* how many entries would fit? */
1539 fsec_size
= KAUTH_FILESEC_COUNT(xsize
);
1541 /* get buffer and uio */
1542 if (((fsec
= kauth_filesec_alloc(fsec_size
)) == NULL
) ||
1543 ((fsec_uio
= uio_create(1, 0, UIO_SYSSPACE
, UIO_READ
)) == NULL
) ||
1544 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), xsize
)) {
1545 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
1550 /* read security attribute */
1552 if ((error
= vn_getxattr(vp
,
1553 KAUTH_FILESEC_XATTR
,
1559 /* no attribute - no security data */
1560 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
1562 /* either way, we are done */
1567 * Validate security structure. If it's corrupt, we will
1570 if (rsize
< KAUTH_FILESEC_SIZE(0)) {
1571 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize
);
1574 if (fsec
->fsec_magic
!= KAUTH_FILESEC_MAGIC
) {
1575 KAUTH_DEBUG("ACL - BAD MAGIC %x", fsec
->fsec_magic
);
1578 if ((fsec
->fsec_acl
.acl_entrycount
!= KAUTH_FILESEC_NOACL
) &&
1579 (fsec
->fsec_acl
.acl_entrycount
> KAUTH_ACL_MAX_ENTRIES
)) {
1580 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", fsec
->fsec_entrycount
);
1583 if ((fsec
->fsec_acl
.acl_entrycount
!= KAUTH_FILESEC_NOACL
) &&
1584 (KAUTH_FILESEC_SIZE(fsec
->fsec_acl
.acl_entrycount
) > rsize
)) {
1585 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", fsec
->fsec_acl
.acl_entrycount
, rsize
);
1594 kauth_filesec_free(fsec
);
1595 if (fsec_uio
!= NULL
)
1603 vnode_set_filesec(vnode_t vp
, kauth_filesec_t fsec
, kauth_acl_t acl
, vfs_context_t ctx
)
1610 if ((fsec_uio
= uio_create(2, 0, UIO_SYSSPACE
, UIO_WRITE
)) == NULL
) {
1611 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
1615 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), sizeof(struct kauth_filesec
) - sizeof(struct kauth_acl
));
1616 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(acl
), KAUTH_ACL_COPYSIZE(acl
));
1617 error
= vn_setxattr(vp
,
1618 KAUTH_FILESEC_XATTR
,
1620 XATTR_NOSECURITY
, /* we have auth'ed already */
1622 VFS_DEBUG(ctx
, vp
, "SETATTR - set ACL returning %d", error
);
1625 if (fsec_uio
!= NULL
)
1632 vnode_getattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
1634 kauth_filesec_t fsec
;
1640 /* don't ask for extended security data if the filesystem doesn't support it */
1641 if (!vfs_extendedsecurity(vnode_mount(vp
))) {
1642 VATTR_CLEAR_ACTIVE(vap
, va_acl
);
1643 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
1644 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
1648 * If the caller wants size values we might have to synthesise, give the
1649 * filesystem the opportunity to supply better intermediate results.
1651 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
1652 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
1653 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
1654 VATTR_SET_ACTIVE(vap
, va_data_size
);
1655 VATTR_SET_ACTIVE(vap
, va_data_alloc
);
1656 VATTR_SET_ACTIVE(vap
, va_total_size
);
1657 VATTR_SET_ACTIVE(vap
, va_total_alloc
);
1660 error
= VNOP_GETATTR(vp
, vap
, ctx
);
1662 KAUTH_DEBUG("ERROR - returning %d", error
);
1667 * If extended security data was requested but not returned, try the fallback
1670 if (VATTR_NOT_RETURNED(vap
, va_acl
) || VATTR_NOT_RETURNED(vap
, va_uuuid
) || VATTR_NOT_RETURNED(vap
, va_guuid
)) {
1673 if ((vp
->v_type
== VDIR
) || (vp
->v_type
== VLNK
) || (vp
->v_type
== VREG
)) {
1674 /* try to get the filesec */
1675 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0)
1678 /* if no filesec, no attributes */
1680 VATTR_RETURN(vap
, va_acl
, NULL
);
1681 VATTR_RETURN(vap
, va_uuuid
, kauth_null_guid
);
1682 VATTR_RETURN(vap
, va_guuid
, kauth_null_guid
);
1685 /* looks good, try to return what we were asked for */
1686 VATTR_RETURN(vap
, va_uuuid
, fsec
->fsec_owner
);
1687 VATTR_RETURN(vap
, va_guuid
, fsec
->fsec_group
);
1689 /* only return the ACL if we were actually asked for it */
1690 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
1691 if (fsec
->fsec_acl
.acl_entrycount
== KAUTH_FILESEC_NOACL
) {
1692 VATTR_RETURN(vap
, va_acl
, NULL
);
1694 facl
= kauth_acl_alloc(fsec
->fsec_acl
.acl_entrycount
);
1696 kauth_filesec_free(fsec
);
1700 bcopy(&fsec
->fsec_acl
, facl
, KAUTH_ACL_COPYSIZE(&fsec
->fsec_acl
));
1701 VATTR_RETURN(vap
, va_acl
, facl
);
1704 kauth_filesec_free(fsec
);
1708 * If someone gave us an unsolicited filesec, toss it. We promise that
1709 * we're OK with a filesystem giving us anything back, but our callers
1710 * only expect what they asked for.
1712 if (VATTR_IS_SUPPORTED(vap
, va_acl
) && !VATTR_IS_ACTIVE(vap
, va_acl
)) {
1713 if (vap
->va_acl
!= NULL
)
1714 kauth_acl_free(vap
->va_acl
);
1715 VATTR_CLEAR_SUPPORTED(vap
, va_acl
);
1718 #if 0 /* enable when we have a filesystem only supporting UUIDs */
1720 * Handle the case where we need a UID/GID, but only have extended
1721 * security information.
1723 if (VATTR_NOT_RETURNED(vap
, va_uid
) &&
1724 VATTR_IS_SUPPORTED(vap
, va_uuuid
) &&
1725 !kauth_guid_equal(&vap
->va_uuuid
, &kauth_null_guid
)) {
1726 if ((error
= kauth_cred_guid2uid(&vap
->va_uuuid
, &nuid
)) == 0)
1727 VATTR_RETURN(vap
, va_uid
, nuid
);
1729 if (VATTR_NOT_RETURNED(vap
, va_gid
) &&
1730 VATTR_IS_SUPPORTED(vap
, va_guuid
) &&
1731 !kauth_guid_equal(&vap
->va_guuid
, &kauth_null_guid
)) {
1732 if ((error
= kauth_cred_guid2gid(&vap
->va_guuid
, &ngid
)) == 0)
1733 VATTR_RETURN(vap
, va_gid
, ngid
);
1738 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
1740 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
1741 if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
1742 nuid
= vp
->v_mount
->mnt_fsowner
;
1743 if (nuid
== KAUTH_UID_NONE
)
1745 } else if (VATTR_IS_SUPPORTED(vap
, va_uid
)) {
1748 /* this will always be something sensible */
1749 nuid
= vp
->v_mount
->mnt_fsowner
;
1751 if ((nuid
== 99) && !vfs_context_issuser(ctx
))
1752 nuid
= kauth_cred_getuid(vfs_context_ucred(ctx
));
1753 VATTR_RETURN(vap
, va_uid
, nuid
);
1755 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
1756 if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
1757 ngid
= vp
->v_mount
->mnt_fsgroup
;
1758 if (ngid
== KAUTH_GID_NONE
)
1760 } else if (VATTR_IS_SUPPORTED(vap
, va_gid
)) {
1763 /* this will always be something sensible */
1764 ngid
= vp
->v_mount
->mnt_fsgroup
;
1766 if ((ngid
== 99) && !vfs_context_issuser(ctx
))
1767 ngid
= kauth_cred_getgid(vfs_context_ucred(ctx
));
1768 VATTR_RETURN(vap
, va_gid
, ngid
);
1772 * Synthesise some values that can be reasonably guessed.
1774 if (!VATTR_IS_SUPPORTED(vap
, va_iosize
))
1775 VATTR_RETURN(vap
, va_iosize
, vp
->v_mount
->mnt_vfsstat
.f_iosize
);
1777 if (!VATTR_IS_SUPPORTED(vap
, va_flags
))
1778 VATTR_RETURN(vap
, va_flags
, 0);
1780 if (!VATTR_IS_SUPPORTED(vap
, va_filerev
))
1781 VATTR_RETURN(vap
, va_filerev
, 0);
1783 if (!VATTR_IS_SUPPORTED(vap
, va_gen
))
1784 VATTR_RETURN(vap
, va_gen
, 0);
1787 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
1789 if (!VATTR_IS_SUPPORTED(vap
, va_data_size
))
1790 VATTR_RETURN(vap
, va_data_size
, 0);
1792 /* do we want any of the possibly-computed values? */
1793 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
1794 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
1795 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
1796 /* make sure f_bsize is valid */
1797 if (vp
->v_mount
->mnt_vfsstat
.f_bsize
== 0) {
1798 if ((error
= vfs_update_vfsstat(vp
->v_mount
, ctx
)) != 0)
1802 /* default va_data_alloc from va_data_size */
1803 if (!VATTR_IS_SUPPORTED(vap
, va_data_alloc
))
1804 VATTR_RETURN(vap
, va_data_alloc
, roundup(vap
->va_data_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
1806 /* default va_total_size from va_data_size */
1807 if (!VATTR_IS_SUPPORTED(vap
, va_total_size
))
1808 VATTR_RETURN(vap
, va_total_size
, vap
->va_data_size
);
1810 /* default va_total_alloc from va_total_size which is guaranteed at this point */
1811 if (!VATTR_IS_SUPPORTED(vap
, va_total_alloc
))
1812 VATTR_RETURN(vap
, va_total_alloc
, roundup(vap
->va_total_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
1816 * If we don't have a change time, pull it from the modtime.
1818 if (!VATTR_IS_SUPPORTED(vap
, va_change_time
) && VATTR_IS_SUPPORTED(vap
, va_modify_time
))
1819 VATTR_RETURN(vap
, va_change_time
, vap
->va_modify_time
);
1822 * This is really only supported for the creation VNOPs, but since the field is there
1823 * we should populate it correctly.
1825 VATTR_RETURN(vap
, va_type
, vp
->v_type
);
1828 * The fsid can be obtained from the mountpoint directly.
1830 VATTR_RETURN(vap
, va_fsid
, vp
->v_mount
->mnt_vfsstat
.f_fsid
.val
[0]);
1838 vnode_setattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
1840 int error
, is_ownership_change
=0;
1843 * Make sure the filesystem is mounted R/W.
1844 * If not, return an error.
1846 if (vfs_isrdonly(vp
->v_mount
))
1850 * If ownership is being ignored on this volume, we silently discard
1851 * ownership changes.
1853 if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
1854 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
1855 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
1858 if (VATTR_IS_ACTIVE(vap
, va_uid
) || VATTR_IS_ACTIVE(vap
, va_gid
)) {
1859 is_ownership_change
= 1;
1863 * Make sure that extended security is enabled if we're going to try
1866 if (!vfs_extendedsecurity(vnode_mount(vp
)) &&
1867 (VATTR_IS_ACTIVE(vap
, va_acl
) || VATTR_IS_ACTIVE(vap
, va_uuuid
) || VATTR_IS_ACTIVE(vap
, va_guuid
))) {
1868 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
1872 error
= VNOP_SETATTR(vp
, vap
, ctx
);
1874 if ((error
== 0) && !VATTR_ALL_SUPPORTED(vap
))
1875 error
= vnode_setattr_fallback(vp
, vap
, ctx
);
1878 * If we have changed any of the things about the file that are likely
1879 * to result in changes to authorisation results, blow the vnode auth
1882 if (VATTR_IS_SUPPORTED(vap
, va_mode
) ||
1883 VATTR_IS_SUPPORTED(vap
, va_uid
) ||
1884 VATTR_IS_SUPPORTED(vap
, va_gid
) ||
1885 VATTR_IS_SUPPORTED(vap
, va_flags
) ||
1886 VATTR_IS_SUPPORTED(vap
, va_acl
) ||
1887 VATTR_IS_SUPPORTED(vap
, va_uuuid
) ||
1888 VATTR_IS_SUPPORTED(vap
, va_guuid
))
1889 vnode_uncache_credentials(vp
);
1890 // only send a stat_changed event if this is more than
1891 // just an access time update
1892 if (error
== 0 && (vap
->va_active
!= VNODE_ATTR_BIT(va_access_time
))) {
1893 if (need_fsevent(FSE_STAT_CHANGED
, vp
) || (is_ownership_change
&& need_fsevent(FSE_CHOWN
, vp
))) {
1894 if (is_ownership_change
== 0)
1895 add_fsevent(FSE_STAT_CHANGED
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
1897 add_fsevent(FSE_CHOWN
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
1904 * Following an operation which sets attributes (setattr, create, etc.) we may
1905 * need to perform fallback operations to get attributes saved.
1908 vnode_setattr_fallback(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
1910 kauth_filesec_t fsec
;
1912 struct kauth_filesec lfsec
;
1918 * Extended security fallback via extended attributes.
1920 * Note that we do not free the filesec; the caller is expected to do this.
1922 if (VATTR_NOT_RETURNED(vap
, va_acl
) ||
1923 VATTR_NOT_RETURNED(vap
, va_uuuid
) ||
1924 VATTR_NOT_RETURNED(vap
, va_guuid
)) {
1925 VFS_DEBUG(ctx
, vp
, "SETATTR - doing filesec fallback");
1928 * Fail for file types that we don't permit extended security to be set on.
1930 if ((vp
->v_type
!= VDIR
) && (vp
->v_type
!= VLNK
) && (vp
->v_type
!= VREG
)) {
1931 VFS_DEBUG(ctx
, vp
, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp
));
1937 * If we don't have all the extended security items, we need to fetch the existing
1938 * data to perform a read-modify-write operation.
1941 if (!VATTR_IS_ACTIVE(vap
, va_acl
) ||
1942 !VATTR_IS_ACTIVE(vap
, va_uuuid
) ||
1943 !VATTR_IS_ACTIVE(vap
, va_guuid
)) {
1944 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0) {
1945 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error
);
1949 /* if we didn't get a filesec, use our local one */
1951 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
1954 KAUTH_DEBUG("SETATTR - updating existing filesec");
1957 facl
= &fsec
->fsec_acl
;
1959 /* if we're using the local filesec, we need to initialise it */
1960 if (fsec
== &lfsec
) {
1961 fsec
->fsec_magic
= KAUTH_FILESEC_MAGIC
;
1962 fsec
->fsec_owner
= kauth_null_guid
;
1963 fsec
->fsec_group
= kauth_null_guid
;
1964 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
1965 facl
->acl_flags
= 0;
1969 * Update with the supplied attributes.
1971 if (VATTR_IS_ACTIVE(vap
, va_uuuid
)) {
1972 KAUTH_DEBUG("SETATTR - updating owner UUID");
1973 fsec
->fsec_owner
= vap
->va_uuuid
;
1974 VATTR_SET_SUPPORTED(vap
, va_uuuid
);
1976 if (VATTR_IS_ACTIVE(vap
, va_guuid
)) {
1977 KAUTH_DEBUG("SETATTR - updating group UUID");
1978 fsec
->fsec_group
= vap
->va_guuid
;
1979 VATTR_SET_SUPPORTED(vap
, va_guuid
);
1981 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
1982 if (vap
->va_acl
== NULL
) {
1983 KAUTH_DEBUG("SETATTR - removing ACL");
1984 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
1986 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap
->va_acl
->acl_entrycount
);
1989 VATTR_SET_SUPPORTED(vap
, va_acl
);
1993 * If the filesec data is all invalid, we can just remove the EA completely.
1995 if ((facl
->acl_entrycount
== KAUTH_FILESEC_NOACL
) &&
1996 kauth_guid_equal(&fsec
->fsec_owner
, &kauth_null_guid
) &&
1997 kauth_guid_equal(&fsec
->fsec_group
, &kauth_null_guid
)) {
1998 error
= vn_removexattr(vp
, KAUTH_FILESEC_XATTR
, XATTR_NOSECURITY
, ctx
);
1999 /* no attribute is ok, nothing to delete */
2000 if (error
== ENOATTR
)
2002 VFS_DEBUG(ctx
, vp
, "SETATTR - remove filesec returning %d", error
);
2005 error
= vnode_set_filesec(vp
, fsec
, facl
, ctx
);
2006 VFS_DEBUG(ctx
, vp
, "SETATTR - update filesec returning %d", error
);
2009 /* if we fetched a filesec, dispose of the buffer */
2011 kauth_filesec_free(fsec
);
2019 * Definition of vnode operations.
2025 *#% lookup dvp L ? ?
2026 *#% lookup vpp - L -
2028 struct vnop_lookup_args
{
2029 struct vnodeop_desc
*a_desc
;
2032 struct componentname
*a_cnp
;
2033 vfs_context_t a_context
;
2038 VNOP_LOOKUP(vnode_t dvp
, vnode_t
*vpp
, struct componentname
*cnp
, vfs_context_t context
)
2041 struct vnop_lookup_args a
;
2044 int funnel_state
= 0;
2046 a
.a_desc
= &vnop_lookup_desc
;
2050 a
.a_context
= context
;
2051 thread_safe
= THREAD_SAFE_FS(dvp
);
2053 vnode_cache_credentials(dvp
, context
);
2056 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2060 _err
= (*dvp
->v_op
[vnop_lookup_desc
.vdesc_offset
])(&a
);
2065 if ( (cnp
->cn_flags
& ISLASTCN
) ) {
2066 if ( (cnp
->cn_flags
& LOCKPARENT
) ) {
2067 if ( !(cnp
->cn_flags
& FSNODELOCKHELD
) ) {
2069 * leave the fsnode lock held on
2070 * the directory, but restore the funnel...
2071 * also indicate that we need to drop the
2072 * fsnode_lock when we're done with the
2073 * system call processing for this path
2075 cnp
->cn_flags
|= FSNODELOCKHELD
;
2077 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2082 unlock_fsnode(dvp
, &funnel_state
);
2090 *#% create dvp L L L
2091 *#% create vpp - L -
2095 struct vnop_create_args
{
2096 struct vnodeop_desc
*a_desc
;
2099 struct componentname
*a_cnp
;
2100 struct vnode_attr
*a_vap
;
2101 vfs_context_t a_context
;
2105 VNOP_CREATE(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t context
)
2108 struct vnop_create_args a
;
2110 int funnel_state
= 0;
2112 a
.a_desc
= &vnop_create_desc
;
2117 a
.a_context
= context
;
2118 thread_safe
= THREAD_SAFE_FS(dvp
);
2121 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2125 _err
= (*dvp
->v_op
[vnop_create_desc
.vdesc_offset
])(&a
);
2126 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
2128 * Remove stale Apple Double file (if any).
2130 xattrfile_remove(dvp
, cnp
->cn_nameptr
, context
, thread_safe
, 0);
2133 unlock_fsnode(dvp
, &funnel_state
);
2141 *#% whiteout dvp L L L
2142 *#% whiteout cnp - - -
2143 *#% whiteout flag - - -
2146 struct vnop_whiteout_args
{
2147 struct vnodeop_desc
*a_desc
;
2149 struct componentname
*a_cnp
;
2151 vfs_context_t a_context
;
2155 VNOP_WHITEOUT(vnode_t dvp
, struct componentname
* cnp
, int flags
, vfs_context_t context
)
2158 struct vnop_whiteout_args a
;
2160 int funnel_state
= 0;
2162 a
.a_desc
= &vnop_whiteout_desc
;
2166 a
.a_context
= context
;
2167 thread_safe
= THREAD_SAFE_FS(dvp
);
2170 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2174 _err
= (*dvp
->v_op
[vnop_whiteout_desc
.vdesc_offset
])(&a
);
2176 unlock_fsnode(dvp
, &funnel_state
);
2188 struct vnop_mknod_args
{
2189 struct vnodeop_desc
*a_desc
;
2192 struct componentname
*a_cnp
;
2193 struct vnode_attr
*a_vap
;
2194 vfs_context_t a_context
;
2198 VNOP_MKNOD(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t context
)
2202 struct vnop_mknod_args a
;
2204 int funnel_state
= 0;
2206 a
.a_desc
= &vnop_mknod_desc
;
2211 a
.a_context
= context
;
2212 thread_safe
= THREAD_SAFE_FS(dvp
);
2215 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2219 _err
= (*dvp
->v_op
[vnop_mknod_desc
.vdesc_offset
])(&a
);
2221 unlock_fsnode(dvp
, &funnel_state
);
2232 struct vnop_open_args
{
2233 struct vnodeop_desc
*a_desc
;
2236 vfs_context_t a_context
;
2240 VNOP_OPEN(vnode_t vp
, int mode
, vfs_context_t context
)
2243 struct vnop_open_args a
;
2245 int funnel_state
= 0;
2246 struct vfs_context acontext
;
2248 if (context
== NULL
) {
2249 acontext
.vc_proc
= current_proc();
2250 acontext
.vc_ucred
= kauth_cred_get();
2251 context
= &acontext
;
2253 a
.a_desc
= &vnop_open_desc
;
2256 a
.a_context
= context
;
2257 thread_safe
= THREAD_SAFE_FS(vp
);
2260 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2261 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2262 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2263 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2268 _err
= (*vp
->v_op
[vnop_open_desc
.vdesc_offset
])(&a
);
2270 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2271 unlock_fsnode(vp
, NULL
);
2273 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2284 struct vnop_close_args
{
2285 struct vnodeop_desc
*a_desc
;
2288 vfs_context_t a_context
;
2292 VNOP_CLOSE(vnode_t vp
, int fflag
, vfs_context_t context
)
2295 struct vnop_close_args a
;
2297 int funnel_state
= 0;
2298 struct vfs_context acontext
;
2300 if (context
== NULL
) {
2301 acontext
.vc_proc
= current_proc();
2302 acontext
.vc_ucred
= kauth_cred_get();
2303 context
= &acontext
;
2305 a
.a_desc
= &vnop_close_desc
;
2308 a
.a_context
= context
;
2309 thread_safe
= THREAD_SAFE_FS(vp
);
2312 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2313 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2314 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2315 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2320 _err
= (*vp
->v_op
[vnop_close_desc
.vdesc_offset
])(&a
);
2322 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2323 unlock_fsnode(vp
, NULL
);
2325 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2336 struct vnop_access_args
{
2337 struct vnodeop_desc
*a_desc
;
2340 vfs_context_t a_context
;
2344 VNOP_ACCESS(vnode_t vp
, int action
, vfs_context_t context
)
2347 struct vnop_access_args a
;
2349 int funnel_state
= 0;
2350 struct vfs_context acontext
;
2352 if (context
== NULL
) {
2353 acontext
.vc_proc
= current_proc();
2354 acontext
.vc_ucred
= kauth_cred_get();
2355 context
= &acontext
;
2357 a
.a_desc
= &vnop_access_desc
;
2359 a
.a_action
= action
;
2360 a
.a_context
= context
;
2361 thread_safe
= THREAD_SAFE_FS(vp
);
2364 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2368 _err
= (*vp
->v_op
[vnop_access_desc
.vdesc_offset
])(&a
);
2370 unlock_fsnode(vp
, &funnel_state
);
2378 *#% getattr vp = = =
2381 struct vnop_getattr_args
{
2382 struct vnodeop_desc
*a_desc
;
2384 struct vnode_attr
*a_vap
;
2385 vfs_context_t a_context
;
2389 VNOP_GETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t context
)
2392 struct vnop_getattr_args a
;
2396 a
.a_desc
= &vnop_getattr_desc
;
2399 a
.a_context
= context
;
2400 thread_safe
= THREAD_SAFE_FS(vp
);
2403 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2407 _err
= (*vp
->v_op
[vnop_getattr_desc
.vdesc_offset
])(&a
);
2409 unlock_fsnode(vp
, &funnel_state
);
2417 *#% setattr vp L L L
2420 struct vnop_setattr_args
{
2421 struct vnodeop_desc
*a_desc
;
2423 struct vnode_attr
*a_vap
;
2424 vfs_context_t a_context
;
2428 VNOP_SETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t context
)
2431 struct vnop_setattr_args a
;
2435 a
.a_desc
= &vnop_setattr_desc
;
2438 a
.a_context
= context
;
2439 thread_safe
= THREAD_SAFE_FS(vp
);
2442 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2446 _err
= (*vp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
2449 * Shadow uid/gid/mod change to extended attibute file.
2451 if (_err
== 0 && !NATIVE_XATTR(vp
)) {
2452 struct vnode_attr va
;
2456 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
2457 VATTR_SET(&va
, va_uid
, vap
->va_uid
);
2460 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
2461 VATTR_SET(&va
, va_gid
, vap
->va_gid
);
2464 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
2465 VATTR_SET(&va
, va_mode
, vap
->va_mode
);
2472 dvp
= vnode_getparent(vp
);
2473 vname
= vnode_getname(vp
);
2475 xattrfile_setattr(dvp
, vname
, &va
, context
, thread_safe
);
2479 vnode_putname(vname
);
2483 unlock_fsnode(vp
, &funnel_state
);
2491 *#% getattrlist vp = = =
2494 struct vnop_getattrlist_args
{
2495 struct vnodeop_desc
*a_desc
;
2497 struct attrlist
*a_alist
;
2500 vfs_context_t a_context
;
2504 VNOP_GETATTRLIST(vnode_t vp
, struct attrlist
* alist
, struct uio
* uio
, int options
, vfs_context_t context
)
2507 struct vnop_getattrlist_args a
;
2509 int funnel_state
= 0;
2511 a
.a_desc
= &vnop_getattrlist_desc
;
2515 a
.a_options
= options
;
2516 a
.a_context
= context
;
2517 thread_safe
= THREAD_SAFE_FS(vp
);
2520 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2524 _err
= (*vp
->v_op
[vnop_getattrlist_desc
.vdesc_offset
])(&a
);
2526 unlock_fsnode(vp
, &funnel_state
);
2534 *#% setattrlist vp L L L
2537 struct vnop_setattrlist_args
{
2538 struct vnodeop_desc
*a_desc
;
2540 struct attrlist
*a_alist
;
2543 vfs_context_t a_context
;
2547 VNOP_SETATTRLIST(vnode_t vp
, struct attrlist
* alist
, struct uio
* uio
, int options
, vfs_context_t context
)
2550 struct vnop_setattrlist_args a
;
2552 int funnel_state
= 0;
2554 a
.a_desc
= &vnop_setattrlist_desc
;
2558 a
.a_options
= options
;
2559 a
.a_context
= context
;
2560 thread_safe
= THREAD_SAFE_FS(vp
);
2563 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2567 _err
= (*vp
->v_op
[vnop_setattrlist_desc
.vdesc_offset
])(&a
);
2569 vnode_uncache_credentials(vp
);
2572 unlock_fsnode(vp
, &funnel_state
);
2584 struct vnop_read_args
{
2585 struct vnodeop_desc
*a_desc
;
2589 vfs_context_t a_context
;
2593 VNOP_READ(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t context
)
2596 struct vnop_read_args a
;
2598 int funnel_state
= 0;
2599 struct vfs_context acontext
;
2601 if (context
== NULL
) {
2602 acontext
.vc_proc
= current_proc();
2603 acontext
.vc_ucred
= kauth_cred_get();
2604 context
= &acontext
;
2607 a
.a_desc
= &vnop_read_desc
;
2610 a
.a_ioflag
= ioflag
;
2611 a
.a_context
= context
;
2612 thread_safe
= THREAD_SAFE_FS(vp
);
2615 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2616 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2617 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2618 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2623 _err
= (*vp
->v_op
[vnop_read_desc
.vdesc_offset
])(&a
);
2626 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2627 unlock_fsnode(vp
, NULL
);
2629 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2641 struct vnop_write_args
{
2642 struct vnodeop_desc
*a_desc
;
2646 vfs_context_t a_context
;
2650 VNOP_WRITE(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t context
)
2652 struct vnop_write_args a
;
2655 int funnel_state
= 0;
2656 struct vfs_context acontext
;
2658 if (context
== NULL
) {
2659 acontext
.vc_proc
= current_proc();
2660 acontext
.vc_ucred
= kauth_cred_get();
2661 context
= &acontext
;
2664 a
.a_desc
= &vnop_write_desc
;
2667 a
.a_ioflag
= ioflag
;
2668 a
.a_context
= context
;
2669 thread_safe
= THREAD_SAFE_FS(vp
);
2672 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2673 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2674 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2675 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2680 _err
= (*vp
->v_op
[vnop_write_desc
.vdesc_offset
])(&a
);
2683 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2684 unlock_fsnode(vp
, NULL
);
2686 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2698 struct vnop_ioctl_args
{
2699 struct vnodeop_desc
*a_desc
;
2704 vfs_context_t a_context
;
2708 VNOP_IOCTL(vnode_t vp
, u_long command
, caddr_t data
, int fflag
, vfs_context_t context
)
2711 struct vnop_ioctl_args a
;
2713 int funnel_state
= 0;
2714 struct vfs_context acontext
;
2716 if (context
== NULL
) {
2717 acontext
.vc_proc
= current_proc();
2718 acontext
.vc_ucred
= kauth_cred_get();
2719 context
= &acontext
;
2722 if (vfs_context_is64bit(context
)) {
2723 if (!vnode_vfs64bitready(vp
)) {
2728 a
.a_desc
= &vnop_ioctl_desc
;
2730 a
.a_command
= command
;
2733 a
.a_context
= context
;
2734 thread_safe
= THREAD_SAFE_FS(vp
);
2737 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2738 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2739 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2740 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2745 _err
= (*vp
->v_op
[vnop_ioctl_desc
.vdesc_offset
])(&a
);
2747 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2748 unlock_fsnode(vp
, NULL
);
2750 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2762 struct vnop_select_args
{
2763 struct vnodeop_desc
*a_desc
;
2768 vfs_context_t a_context
;
2772 VNOP_SELECT(vnode_t vp
, int which
, int fflags
, void * wql
, vfs_context_t context
)
2775 struct vnop_select_args a
;
2777 int funnel_state
= 0;
2778 struct vfs_context acontext
;
2780 if (context
== NULL
) {
2781 acontext
.vc_proc
= current_proc();
2782 acontext
.vc_ucred
= kauth_cred_get();
2783 context
= &acontext
;
2785 a
.a_desc
= &vnop_select_desc
;
2788 a
.a_fflags
= fflags
;
2789 a
.a_context
= context
;
2791 thread_safe
= THREAD_SAFE_FS(vp
);
2794 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2795 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2796 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2797 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2802 _err
= (*vp
->v_op
[vnop_select_desc
.vdesc_offset
])(&a
);
2804 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2805 unlock_fsnode(vp
, NULL
);
2807 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2816 *#% exchange fvp L L L
2817 *#% exchange tvp L L L
2820 struct vnop_exchange_args
{
2821 struct vnodeop_desc
*a_desc
;
2825 vfs_context_t a_context
;
2829 VNOP_EXCHANGE(vnode_t fvp
, vnode_t tvp
, int options
, vfs_context_t context
)
2832 struct vnop_exchange_args a
;
2834 int funnel_state
= 0;
2835 vnode_t lock_first
= NULL
, lock_second
= NULL
;
2837 a
.a_desc
= &vnop_exchange_desc
;
2840 a
.a_options
= options
;
2841 a
.a_context
= context
;
2842 thread_safe
= THREAD_SAFE_FS(fvp
);
2846 * Lock in vnode address order to avoid deadlocks
2855 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) ) {
2858 if ( (_err
= lock_fsnode(lock_second
, NULL
)) ) {
2859 unlock_fsnode(lock_first
, &funnel_state
);
2863 _err
= (*fvp
->v_op
[vnop_exchange_desc
.vdesc_offset
])(&a
);
2865 unlock_fsnode(lock_second
, NULL
);
2866 unlock_fsnode(lock_first
, &funnel_state
);
2878 struct vnop_revoke_args
{
2879 struct vnodeop_desc
*a_desc
;
2882 vfs_context_t a_context
;
2886 VNOP_REVOKE(vnode_t vp
, int flags
, vfs_context_t context
)
2888 struct vnop_revoke_args a
;
2891 int funnel_state
= 0;
2893 a
.a_desc
= &vnop_revoke_desc
;
2896 a
.a_context
= context
;
2897 thread_safe
= THREAD_SAFE_FS(vp
);
2900 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2902 _err
= (*vp
->v_op
[vnop_revoke_desc
.vdesc_offset
])(&a
);
2904 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2916 struct vnop_mmap_args
{
2917 struct vnodeop_desc
*a_desc
;
2920 vfs_context_t a_context
;
2924 VNOP_MMAP(vnode_t vp
, int fflags
, vfs_context_t context
)
2927 struct vnop_mmap_args a
;
2929 int funnel_state
= 0;
2931 a
.a_desc
= &vnop_mmap_desc
;
2933 a
.a_fflags
= fflags
;
2934 a
.a_context
= context
;
2935 thread_safe
= THREAD_SAFE_FS(vp
);
2938 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2942 _err
= (*vp
->v_op
[vnop_mmap_desc
.vdesc_offset
])(&a
);
2944 unlock_fsnode(vp
, &funnel_state
);
2953 *# mnomap - vp U U U
2956 struct vnop_mnomap_args
{
2957 struct vnodeop_desc
*a_desc
;
2959 vfs_context_t a_context
;
2963 VNOP_MNOMAP(vnode_t vp
, vfs_context_t context
)
2966 struct vnop_mnomap_args a
;
2968 int funnel_state
= 0;
2970 a
.a_desc
= &vnop_mnomap_desc
;
2972 a
.a_context
= context
;
2973 thread_safe
= THREAD_SAFE_FS(vp
);
2976 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2980 _err
= (*vp
->v_op
[vnop_mnomap_desc
.vdesc_offset
])(&a
);
2982 unlock_fsnode(vp
, &funnel_state
);
2994 struct vnop_fsync_args
{
2995 struct vnodeop_desc
*a_desc
;
2998 vfs_context_t a_context
;
3002 VNOP_FSYNC(vnode_t vp
, int waitfor
, vfs_context_t context
)
3004 struct vnop_fsync_args a
;
3007 int funnel_state
= 0;
3009 a
.a_desc
= &vnop_fsync_desc
;
3011 a
.a_waitfor
= waitfor
;
3012 a
.a_context
= context
;
3013 thread_safe
= THREAD_SAFE_FS(vp
);
3016 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3020 _err
= (*vp
->v_op
[vnop_fsync_desc
.vdesc_offset
])(&a
);
3022 unlock_fsnode(vp
, &funnel_state
);
3031 *#% remove dvp L U U
3035 struct vnop_remove_args
{
3036 struct vnodeop_desc
*a_desc
;
3039 struct componentname
*a_cnp
;
3041 vfs_context_t a_context
;
3045 VNOP_REMOVE(vnode_t dvp
, vnode_t vp
, struct componentname
* cnp
, int flags
, vfs_context_t context
)
3048 struct vnop_remove_args a
;
3050 int funnel_state
= 0;
3052 a
.a_desc
= &vnop_remove_desc
;
3057 a
.a_context
= context
;
3058 thread_safe
= THREAD_SAFE_FS(dvp
);
3061 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3065 _err
= (*dvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&a
);
3068 vnode_setneedinactive(vp
);
3070 if ( !(NATIVE_XATTR(dvp
)) ) {
3072 * Remove any associated extended attibute file (._ AppleDouble file).
3074 xattrfile_remove(dvp
, cnp
->cn_nameptr
, context
, thread_safe
, 1);
3078 unlock_fsnode(vp
, &funnel_state
);
3091 struct vnop_link_args
{
3092 struct vnodeop_desc
*a_desc
;
3095 struct componentname
*a_cnp
;
3096 vfs_context_t a_context
;
3100 VNOP_LINK(vnode_t vp
, vnode_t tdvp
, struct componentname
* cnp
, vfs_context_t context
)
3103 struct vnop_link_args a
;
3105 int funnel_state
= 0;
3108 * For file systems with non-native extended attributes,
3109 * disallow linking to an existing "._" Apple Double file.
3111 if ( !NATIVE_XATTR(tdvp
) && (vp
->v_type
== VREG
)) {
3114 vname
= vnode_getname(vp
);
3115 if (vname
!= NULL
) {
3117 if (vname
[0] == '.' && vname
[1] == '_' && vname
[2] != '\0') {
3120 vnode_putname(vname
);
3125 a
.a_desc
= &vnop_link_desc
;
3129 a
.a_context
= context
;
3130 thread_safe
= THREAD_SAFE_FS(vp
);
3133 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3137 _err
= (*tdvp
->v_op
[vnop_link_desc
.vdesc_offset
])(&a
);
3139 unlock_fsnode(vp
, &funnel_state
);
3148 *#% rename fdvp U U U
3149 *#% rename fvp U U U
3150 *#% rename tdvp L U U
3151 *#% rename tvp X U U
3154 struct vnop_rename_args
{
3155 struct vnodeop_desc
*a_desc
;
3158 struct componentname
*a_fcnp
;
3161 struct componentname
*a_tcnp
;
3162 vfs_context_t a_context
;
3166 VNOP_RENAME(struct vnode
*fdvp
, struct vnode
*fvp
, struct componentname
*fcnp
,
3167 struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
3168 vfs_context_t context
)
3171 struct vnop_rename_args a
;
3172 int funnel_state
= 0;
3173 char smallname1
[48];
3174 char smallname2
[48];
3175 char *xfromname
= NULL
;
3176 char *xtoname
= NULL
;
3177 vnode_t lock_first
= NULL
, lock_second
= NULL
;
3178 vnode_t fdvp_unsafe
= NULLVP
;
3179 vnode_t tdvp_unsafe
= NULLVP
;
3181 a
.a_desc
= &vnop_rename_desc
;
3188 a
.a_context
= context
;
3190 if (!THREAD_SAFE_FS(fdvp
))
3192 if (!THREAD_SAFE_FS(tdvp
))
3195 if (fdvp_unsafe
!= NULLVP
) {
3197 * Lock parents in vnode address order to avoid deadlocks
3198 * note that it's possible for the fdvp to be unsafe,
3199 * but the tdvp to be safe because tvp could be a directory
3200 * in the root of a filesystem... in that case, tdvp is the
3201 * in the filesystem that this root is mounted on
3203 if (tdvp_unsafe
== NULL
|| fdvp_unsafe
== tdvp_unsafe
) {
3204 lock_first
= fdvp_unsafe
;
3206 } else if (fdvp_unsafe
< tdvp_unsafe
) {
3207 lock_first
= fdvp_unsafe
;
3208 lock_second
= tdvp_unsafe
;
3210 lock_first
= tdvp_unsafe
;
3211 lock_second
= fdvp_unsafe
;
3213 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) )
3216 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
3217 unlock_fsnode(lock_first
, &funnel_state
);
3222 * Lock both children in vnode address order to avoid deadlocks
3224 if (tvp
== NULL
|| tvp
== fvp
) {
3227 } else if (fvp
< tvp
) {
3234 if ( (_err
= lock_fsnode(lock_first
, NULL
)) )
3237 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
3238 unlock_fsnode(lock_first
, NULL
);
3243 * Save source and destination names (._ AppleDouble files).
3244 * Skip if source already has a "._" prefix.
3246 if (!NATIVE_XATTR(fdvp
) &&
3247 !(fcnp
->cn_nameptr
[0] == '.' && fcnp
->cn_nameptr
[1] == '_')) {
3250 /* Get source attribute file name. */
3251 len
= fcnp
->cn_namelen
+ 3;
3252 if (len
> sizeof(smallname1
)) {
3253 MALLOC(xfromname
, char *, len
, M_TEMP
, M_WAITOK
);
3255 xfromname
= &smallname1
[0];
3257 strcpy(xfromname
, "._");
3258 strncat(xfromname
, fcnp
->cn_nameptr
, fcnp
->cn_namelen
);
3259 xfromname
[len
-1] = '\0';
3261 /* Get destination attribute file name. */
3262 len
= tcnp
->cn_namelen
+ 3;
3263 if (len
> sizeof(smallname2
)) {
3264 MALLOC(xtoname
, char *, len
, M_TEMP
, M_WAITOK
);
3266 xtoname
= &smallname2
[0];
3268 strcpy(xtoname
, "._");
3269 strncat(xtoname
, tcnp
->cn_nameptr
, tcnp
->cn_namelen
);
3270 xtoname
[len
-1] = '\0';
3273 _err
= (*fdvp
->v_op
[vnop_rename_desc
.vdesc_offset
])(&a
);
3275 if (fdvp_unsafe
!= NULLVP
) {
3276 if (lock_second
!= NULL
)
3277 unlock_fsnode(lock_second
, NULL
);
3278 unlock_fsnode(lock_first
, NULL
);
3281 if (tvp
&& tvp
!= fvp
)
3282 vnode_setneedinactive(tvp
);
3286 * Rename any associated extended attibute file (._ AppleDouble file).
3288 if (_err
== 0 && !NATIVE_XATTR(fdvp
) && xfromname
!= NULL
) {
3289 struct nameidata fromnd
, tond
;
3294 * Get source attribute file vnode.
3295 * Note that fdvp already has an iocount reference and
3296 * using DELETE will take an additional reference.
3298 NDINIT(&fromnd
, DELETE
, NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3299 CAST_USER_ADDR_T(xfromname
), context
);
3300 fromnd
.ni_dvp
= fdvp
;
3301 error
= namei(&fromnd
);
3304 /* When source doesn't exist there still may be a destination. */
3305 if (error
== ENOENT
) {
3310 } else if (fromnd
.ni_vp
->v_type
!= VREG
) {
3311 vnode_put(fromnd
.ni_vp
);
3316 struct vnop_remove_args args
;
3319 * Get destination attribute file vnode.
3320 * Note that tdvp already has an iocount reference.
3322 NDINIT(&tond
, DELETE
, NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3323 CAST_USER_ADDR_T(xtoname
), context
);
3325 error
= namei(&tond
);
3329 if (tond
.ni_vp
->v_type
!= VREG
) {
3330 vnode_put(tond
.ni_vp
);
3334 args
.a_desc
= &vnop_remove_desc
;
3336 args
.a_vp
= tond
.ni_vp
;
3337 args
.a_cnp
= &tond
.ni_cnd
;
3338 args
.a_context
= context
;
3340 if (fdvp_unsafe
!= NULLVP
)
3341 error
= lock_fsnode(tond
.ni_vp
, NULL
);
3343 error
= (*tdvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&args
);
3345 if (fdvp_unsafe
!= NULLVP
)
3346 unlock_fsnode(tond
.ni_vp
, NULL
);
3349 vnode_setneedinactive(tond
.ni_vp
);
3351 vnode_put(tond
.ni_vp
);
3357 * Get destination attribute file vnode.
3359 NDINIT(&tond
, RENAME
,
3360 NOCACHE
| NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3361 CAST_USER_ADDR_T(xtoname
), context
);
3363 error
= namei(&tond
);
3366 vnode_put(fromnd
.ni_vp
);
3370 a
.a_desc
= &vnop_rename_desc
;
3372 a
.a_fvp
= fromnd
.ni_vp
;
3373 a
.a_fcnp
= &fromnd
.ni_cnd
;
3375 a
.a_tvp
= tond
.ni_vp
;
3376 a
.a_tcnp
= &tond
.ni_cnd
;
3377 a
.a_context
= context
;
3379 if (fdvp_unsafe
!= NULLVP
) {
3381 * Lock in vnode address order to avoid deadlocks
3383 if (tond
.ni_vp
== NULL
|| tond
.ni_vp
== fromnd
.ni_vp
) {
3384 lock_first
= fromnd
.ni_vp
;
3386 } else if (fromnd
.ni_vp
< tond
.ni_vp
) {
3387 lock_first
= fromnd
.ni_vp
;
3388 lock_second
= tond
.ni_vp
;
3390 lock_first
= tond
.ni_vp
;
3391 lock_second
= fromnd
.ni_vp
;
3393 if ( (error
= lock_fsnode(lock_first
, NULL
)) == 0) {
3394 if (lock_second
!= NULL
&& (error
= lock_fsnode(lock_second
, NULL
)) )
3395 unlock_fsnode(lock_first
, NULL
);
3399 error
= (*fdvp
->v_op
[vnop_rename_desc
.vdesc_offset
])(&a
);
3401 if (fdvp_unsafe
!= NULLVP
) {
3402 if (lock_second
!= NULL
)
3403 unlock_fsnode(lock_second
, NULL
);
3404 unlock_fsnode(lock_first
, NULL
);
3407 vnode_setneedinactive(fromnd
.ni_vp
);
3409 if (tond
.ni_vp
&& tond
.ni_vp
!= fromnd
.ni_vp
)
3410 vnode_setneedinactive(tond
.ni_vp
);
3413 vnode_put(fromnd
.ni_vp
);
3415 vnode_put(tond
.ni_vp
);
3421 if (xfromname
&& xfromname
!= &smallname1
[0]) {
3422 FREE(xfromname
, M_TEMP
);
3424 if (xtoname
&& xtoname
!= &smallname2
[0]) {
3425 FREE(xtoname
, M_TEMP
);
3428 if (fdvp_unsafe
!= NULLVP
) {
3429 if (tdvp_unsafe
!= NULLVP
)
3430 unlock_fsnode(tdvp_unsafe
, NULL
);
3431 unlock_fsnode(fdvp_unsafe
, &funnel_state
);
3443 struct vnop_mkdir_args
{
3444 struct vnodeop_desc
*a_desc
;
3447 struct componentname
*a_cnp
;
3448 struct vnode_attr
*a_vap
;
3449 vfs_context_t a_context
;
3453 VNOP_MKDIR(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
3454 struct vnode_attr
*vap
, vfs_context_t context
)
3457 struct vnop_mkdir_args a
;
3459 int funnel_state
= 0;
3461 a
.a_desc
= &vnop_mkdir_desc
;
3466 a
.a_context
= context
;
3467 thread_safe
= THREAD_SAFE_FS(dvp
);
3470 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3474 _err
= (*dvp
->v_op
[vnop_mkdir_desc
.vdesc_offset
])(&a
);
3475 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
3477 * Remove stale Apple Double file (if any).
3479 xattrfile_remove(dvp
, cnp
->cn_nameptr
, context
, thread_safe
, 0);
3482 unlock_fsnode(dvp
, &funnel_state
);
3495 struct vnop_rmdir_args
{
3496 struct vnodeop_desc
*a_desc
;
3499 struct componentname
*a_cnp
;
3500 vfs_context_t a_context
;
3505 VNOP_RMDIR(struct vnode
*dvp
, struct vnode
*vp
, struct componentname
*cnp
, vfs_context_t context
)
3508 struct vnop_rmdir_args a
;
3510 int funnel_state
= 0;
3512 a
.a_desc
= &vnop_rmdir_desc
;
3516 a
.a_context
= context
;
3517 thread_safe
= THREAD_SAFE_FS(dvp
);
3520 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3524 _err
= (*vp
->v_op
[vnop_rmdir_desc
.vdesc_offset
])(&a
);
3527 vnode_setneedinactive(vp
);
3529 if ( !(NATIVE_XATTR(dvp
)) ) {
3531 * Remove any associated extended attibute file (._ AppleDouble file).
3533 xattrfile_remove(dvp
, cnp
->cn_nameptr
, context
, thread_safe
, 1);
3537 unlock_fsnode(vp
, &funnel_state
);
3543 * Remove a ._ AppleDouble file
3545 #define AD_STALE_SECS (180)
3547 xattrfile_remove(vnode_t dvp
, const char * basename
, vfs_context_t context
, int thread_safe
, int force
) {
3549 struct nameidata nd
;
3551 char *filename
= NULL
;
3554 if ((basename
== NULL
) || (basename
[0] == '\0') ||
3555 (basename
[0] == '.' && basename
[1] == '_')) {
3558 filename
= &smallname
[0];
3559 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
3560 if (len
>= sizeof(smallname
)) {
3561 len
++; /* snprintf result doesn't include '\0' */
3562 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
3563 len
= snprintf(filename
, len
, "._%s", basename
);
3565 NDINIT(&nd
, DELETE
, LOCKLEAF
| NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3566 CAST_USER_ADDR_T(filename
), context
);
3568 if (namei(&nd
) != 0)
3573 if (xvp
->v_type
!= VREG
)
3577 * When creating a new object and a "._" file already
3578 * exists, check to see if its a stale "._" file.
3582 struct vnode_attr va
;
3585 VATTR_WANTED(&va
, va_data_size
);
3586 VATTR_WANTED(&va
, va_modify_time
);
3587 if (VNOP_GETATTR(xvp
, &va
, context
) == 0 &&
3588 VATTR_IS_SUPPORTED(&va
, va_data_size
) &&
3589 VATTR_IS_SUPPORTED(&va
, va_modify_time
) &&
3590 va
.va_data_size
!= 0) {
3594 if ((tv
.tv_sec
> va
.va_modify_time
.tv_sec
) &&
3595 (tv
.tv_sec
- va
.va_modify_time
.tv_sec
) > AD_STALE_SECS
) {
3596 force
= 1; /* must be stale */
3601 struct vnop_remove_args a
;
3604 a
.a_desc
= &vnop_remove_desc
;
3605 a
.a_dvp
= nd
.ni_dvp
;
3607 a
.a_cnp
= &nd
.ni_cnd
;
3608 a
.a_context
= context
;
3611 if ( (lock_fsnode(xvp
, NULL
)) )
3614 error
= (*dvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&a
);
3617 unlock_fsnode(xvp
, NULL
);
3620 vnode_setneedinactive(xvp
);
3623 /* Note: nd.ni_dvp's iocount is dropped by caller of VNOP_XXXX */
3626 if (filename
&& filename
!= &smallname
[0]) {
3627 FREE(filename
, M_TEMP
);
3632 * Shadow uid/gid/mod to a ._ AppleDouble file
3635 xattrfile_setattr(vnode_t dvp
, const char * basename
, struct vnode_attr
* vap
,
3636 vfs_context_t context
, int thread_safe
) {
3638 struct nameidata nd
;
3640 char *filename
= NULL
;
3643 if ((dvp
== NULLVP
) ||
3644 (basename
== NULL
) || (basename
[0] == '\0') ||
3645 (basename
[0] == '.' && basename
[1] == '_')) {
3648 filename
= &smallname
[0];
3649 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
3650 if (len
>= sizeof(smallname
)) {
3651 len
++; /* snprintf result doesn't include '\0' */
3652 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
3653 len
= snprintf(filename
, len
, "._%s", basename
);
3655 NDINIT(&nd
, LOOKUP
, NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3656 CAST_USER_ADDR_T(filename
), context
);
3658 if (namei(&nd
) != 0)
3664 if (xvp
->v_type
== VREG
) {
3665 struct vnop_setattr_args a
;
3667 a
.a_desc
= &vnop_setattr_desc
;
3670 a
.a_context
= context
;
3673 if ( (lock_fsnode(xvp
, NULL
)) )
3676 (void) (*xvp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
3678 unlock_fsnode(xvp
, NULL
);
3684 if (filename
&& filename
!= &smallname
[0]) {
3685 FREE(filename
, M_TEMP
);
3692 *#% symlink dvp L U U
3693 *#% symlink vpp - U -
3696 struct vnop_symlink_args
{
3697 struct vnodeop_desc
*a_desc
;
3700 struct componentname
*a_cnp
;
3701 struct vnode_attr
*a_vap
;
3703 vfs_context_t a_context
;
3708 VNOP_SYMLINK(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
3709 struct vnode_attr
*vap
, char *target
, vfs_context_t context
)
3712 struct vnop_symlink_args a
;
3714 int funnel_state
= 0;
3716 a
.a_desc
= &vnop_symlink_desc
;
3721 a
.a_target
= target
;
3722 a
.a_context
= context
;
3723 thread_safe
= THREAD_SAFE_FS(dvp
);
3726 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3730 _err
= (*dvp
->v_op
[vnop_symlink_desc
.vdesc_offset
])(&a
);
3731 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
3733 * Remove stale Apple Double file (if any).
3735 xattrfile_remove(dvp
, cnp
->cn_nameptr
, context
, thread_safe
, 0);
3738 unlock_fsnode(dvp
, &funnel_state
);
3746 *#% readdir vp L L L
3749 struct vnop_readdir_args
{
3750 struct vnodeop_desc
*a_desc
;
3756 vfs_context_t a_context
;
3761 VNOP_READDIR(struct vnode
*vp
, struct uio
*uio
, int flags
, int *eofflag
,
3762 int *numdirent
, vfs_context_t context
)
3765 struct vnop_readdir_args a
;
3767 int funnel_state
= 0;
3769 a
.a_desc
= &vnop_readdir_desc
;
3773 a
.a_eofflag
= eofflag
;
3774 a
.a_numdirent
= numdirent
;
3775 a
.a_context
= context
;
3776 thread_safe
= THREAD_SAFE_FS(vp
);
3779 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3783 _err
= (*vp
->v_op
[vnop_readdir_desc
.vdesc_offset
])(&a
);
3785 unlock_fsnode(vp
, &funnel_state
);
3793 *#% readdirattr vp L L L
3796 struct vnop_readdirattr_args
{
3797 struct vnodeop_desc
*a_desc
;
3799 struct attrlist
*a_alist
;
3805 u_long
*a_actualcount
;
3806 vfs_context_t a_context
;
3811 VNOP_READDIRATTR(struct vnode
*vp
, struct attrlist
*alist
, struct uio
*uio
, u_long maxcount
,
3812 u_long options
, u_long
*newstate
, int *eofflag
, u_long
*actualcount
, vfs_context_t context
)
3815 struct vnop_readdirattr_args a
;
3817 int funnel_state
= 0;
3819 a
.a_desc
= &vnop_readdirattr_desc
;
3823 a
.a_maxcount
= maxcount
;
3824 a
.a_options
= options
;
3825 a
.a_newstate
= newstate
;
3826 a
.a_eofflag
= eofflag
;
3827 a
.a_actualcount
= actualcount
;
3828 a
.a_context
= context
;
3829 thread_safe
= THREAD_SAFE_FS(vp
);
3832 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3836 _err
= (*vp
->v_op
[vnop_readdirattr_desc
.vdesc_offset
])(&a
);
3838 unlock_fsnode(vp
, &funnel_state
);
3846 *#% readlink vp L L L
3849 struct vnop_readlink_args
{
3850 struct vnodeop_desc
*a_desc
;
3853 vfs_context_t a_context
;
3858 VNOP_READLINK(struct vnode
*vp
, struct uio
*uio
, vfs_context_t context
)
3861 struct vnop_readlink_args a
;
3863 int funnel_state
= 0;
3865 a
.a_desc
= &vnop_readlink_desc
;
3868 a
.a_context
= context
;
3869 thread_safe
= THREAD_SAFE_FS(vp
);
3872 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3876 _err
= (*vp
->v_op
[vnop_readlink_desc
.vdesc_offset
])(&a
);
3878 unlock_fsnode(vp
, &funnel_state
);
3886 *#% inactive vp L U U
3889 struct vnop_inactive_args
{
3890 struct vnodeop_desc
*a_desc
;
3892 vfs_context_t a_context
;
3896 VNOP_INACTIVE(struct vnode
*vp
, vfs_context_t context
)
3899 struct vnop_inactive_args a
;
3901 int funnel_state
= 0;
3903 a
.a_desc
= &vnop_inactive_desc
;
3905 a
.a_context
= context
;
3906 thread_safe
= THREAD_SAFE_FS(vp
);
3909 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3913 _err
= (*vp
->v_op
[vnop_inactive_desc
.vdesc_offset
])(&a
);
3915 unlock_fsnode(vp
, &funnel_state
);
3924 *#% reclaim vp U U U
3927 struct vnop_reclaim_args
{
3928 struct vnodeop_desc
*a_desc
;
3930 vfs_context_t a_context
;
3934 VNOP_RECLAIM(struct vnode
*vp
, vfs_context_t context
)
3937 struct vnop_reclaim_args a
;
3939 int funnel_state
= 0;
3941 a
.a_desc
= &vnop_reclaim_desc
;
3943 a
.a_context
= context
;
3944 thread_safe
= THREAD_SAFE_FS(vp
);
3947 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3949 _err
= (*vp
->v_op
[vnop_reclaim_desc
.vdesc_offset
])(&a
);
3951 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3960 *#% pathconf vp L L L
3963 struct vnop_pathconf_args
{
3964 struct vnodeop_desc
*a_desc
;
3967 register_t
*a_retval
;
3968 vfs_context_t a_context
;
3972 VNOP_PATHCONF(struct vnode
*vp
, int name
, register_t
*retval
, vfs_context_t context
)
3975 struct vnop_pathconf_args a
;
3977 int funnel_state
= 0;
3979 a
.a_desc
= &vnop_pathconf_desc
;
3982 a
.a_retval
= retval
;
3983 a
.a_context
= context
;
3984 thread_safe
= THREAD_SAFE_FS(vp
);
3987 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3991 _err
= (*vp
->v_op
[vnop_pathconf_desc
.vdesc_offset
])(&a
);
3993 unlock_fsnode(vp
, &funnel_state
);
4001 *#% advlock vp U U U
4004 struct vnop_advlock_args
{
4005 struct vnodeop_desc
*a_desc
;
4011 vfs_context_t a_context
;
4015 VNOP_ADVLOCK(struct vnode
*vp
, caddr_t id
, int op
, struct flock
*fl
, int flags
, vfs_context_t context
)
4018 struct vnop_advlock_args a
;
4020 int funnel_state
= 0;
4021 struct uthread
* uth
;
4023 a
.a_desc
= &vnop_advlock_desc
;
4029 a
.a_context
= context
;
4030 thread_safe
= THREAD_SAFE_FS(vp
);
4032 uth
= get_bsdthread_info(current_thread());
4034 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4036 /* Disallow advisory locking on non-seekable vnodes */
4037 if (vnode_isfifo(vp
)) {
4038 _err
= err_advlock(&a
);
4040 if ((vp
->v_flag
& VLOCKLOCAL
)) {
4041 /* Advisory locking done at this layer */
4042 _err
= lf_advlock(&a
);
4044 /* Advisory locking done by underlying filesystem */
4045 _err
= (*vp
->v_op
[vnop_advlock_desc
.vdesc_offset
])(&a
);
4049 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4059 *#% allocate vp L L L
4062 struct vnop_allocate_args
{
4063 struct vnodeop_desc
*a_desc
;
4067 off_t
*a_bytesallocated
;
4069 vfs_context_t a_context
;
4074 VNOP_ALLOCATE(struct vnode
*vp
, off_t length
, u_int32_t flags
, off_t
*bytesallocated
, off_t offset
, vfs_context_t context
)
4077 struct vnop_allocate_args a
;
4079 int funnel_state
= 0;
4081 a
.a_desc
= &vnop_allocate_desc
;
4083 a
.a_length
= length
;
4085 a
.a_bytesallocated
= bytesallocated
;
4086 a
.a_offset
= offset
;
4087 a
.a_context
= context
;
4088 thread_safe
= THREAD_SAFE_FS(vp
);
4091 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4095 _err
= (*vp
->v_op
[vnop_allocate_desc
.vdesc_offset
])(&a
);
4097 unlock_fsnode(vp
, &funnel_state
);
4108 struct vnop_pagein_args
{
4109 struct vnodeop_desc
*a_desc
;
4112 vm_offset_t a_pl_offset
;
4116 vfs_context_t a_context
;
4120 VNOP_PAGEIN(struct vnode
*vp
, upl_t pl
, vm_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t context
)
4123 struct vnop_pagein_args a
;
4125 int funnel_state
= 0;
4127 a
.a_desc
= &vnop_pagein_desc
;
4130 a
.a_pl_offset
= pl_offset
;
4131 a
.a_f_offset
= f_offset
;
4134 a
.a_context
= context
;
4135 thread_safe
= THREAD_SAFE_FS(vp
);
4138 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4140 _err
= (*vp
->v_op
[vnop_pagein_desc
.vdesc_offset
])(&a
);
4142 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4150 *#% pageout vp = = =
4153 struct vnop_pageout_args
{
4154 struct vnodeop_desc
*a_desc
;
4157 vm_offset_t a_pl_offset
;
4161 vfs_context_t a_context
;
4166 VNOP_PAGEOUT(struct vnode
*vp
, upl_t pl
, vm_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t context
)
4169 struct vnop_pageout_args a
;
4171 int funnel_state
= 0;
4173 a
.a_desc
= &vnop_pageout_desc
;
4176 a
.a_pl_offset
= pl_offset
;
4177 a
.a_f_offset
= f_offset
;
4180 a
.a_context
= context
;
4181 thread_safe
= THREAD_SAFE_FS(vp
);
4184 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4186 _err
= (*vp
->v_op
[vnop_pageout_desc
.vdesc_offset
])(&a
);
4188 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4197 *#% searchfs vp L L L
4200 struct vnop_searchfs_args
{
4201 struct vnodeop_desc
*a_desc
;
4203 void *a_searchparams1
;
4204 void *a_searchparams2
;
4205 struct attrlist
*a_searchattrs
;
4206 u_long a_maxmatches
;
4207 struct timeval
*a_timelimit
;
4208 struct attrlist
*a_returnattrs
;
4209 u_long
*a_nummatches
;
4210 u_long a_scriptcode
;
4213 struct searchstate
*a_searchstate
;
4214 vfs_context_t a_context
;
4219 VNOP_SEARCHFS(struct vnode
*vp
, void *searchparams1
, void *searchparams2
, struct attrlist
*searchattrs
, u_long maxmatches
, struct timeval
*timelimit
, struct attrlist
*returnattrs
, u_long
*nummatches
, u_long scriptcode
, u_long options
, struct uio
*uio
, struct searchstate
*searchstate
, vfs_context_t context
)
4222 struct vnop_searchfs_args a
;
4224 int funnel_state
= 0;
4226 a
.a_desc
= &vnop_searchfs_desc
;
4228 a
.a_searchparams1
= searchparams1
;
4229 a
.a_searchparams2
= searchparams2
;
4230 a
.a_searchattrs
= searchattrs
;
4231 a
.a_maxmatches
= maxmatches
;
4232 a
.a_timelimit
= timelimit
;
4233 a
.a_returnattrs
= returnattrs
;
4234 a
.a_nummatches
= nummatches
;
4235 a
.a_scriptcode
= scriptcode
;
4236 a
.a_options
= options
;
4238 a
.a_searchstate
= searchstate
;
4239 a
.a_context
= context
;
4240 thread_safe
= THREAD_SAFE_FS(vp
);
4243 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4247 _err
= (*vp
->v_op
[vnop_searchfs_desc
.vdesc_offset
])(&a
);
4249 unlock_fsnode(vp
, &funnel_state
);
4257 *#% copyfile fvp U U U
4258 *#% copyfile tdvp L U U
4259 *#% copyfile tvp X U U
4262 struct vnop_copyfile_args
{
4263 struct vnodeop_desc
*a_desc
;
4267 struct componentname
*a_tcnp
;
4270 vfs_context_t a_context
;
4274 VNOP_COPYFILE(struct vnode
*fvp
, struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
4275 int mode
, int flags
, vfs_context_t context
)
4278 struct vnop_copyfile_args a
;
4279 a
.a_desc
= &vnop_copyfile_desc
;
4286 a
.a_context
= context
;
4287 _err
= (*fvp
->v_op
[vnop_copyfile_desc
.vdesc_offset
])(&a
);
4293 VNOP_GETXATTR(vnode_t vp
, const char *name
, uio_t uio
, size_t *size
, int options
, vfs_context_t context
)
4295 struct vnop_getxattr_args a
;
4298 int funnel_state
= 0;
4300 a
.a_desc
= &vnop_getxattr_desc
;
4305 a
.a_options
= options
;
4306 a
.a_context
= context
;
4308 thread_safe
= THREAD_SAFE_FS(vp
);
4310 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4314 error
= (*vp
->v_op
[vnop_getxattr_desc
.vdesc_offset
])(&a
);
4316 unlock_fsnode(vp
, &funnel_state
);
4322 VNOP_SETXATTR(vnode_t vp
, const char *name
, uio_t uio
, int options
, vfs_context_t context
)
4324 struct vnop_setxattr_args a
;
4327 int funnel_state
= 0;
4329 a
.a_desc
= &vnop_setxattr_desc
;
4333 a
.a_options
= options
;
4334 a
.a_context
= context
;
4336 thread_safe
= THREAD_SAFE_FS(vp
);
4338 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4342 error
= (*vp
->v_op
[vnop_setxattr_desc
.vdesc_offset
])(&a
);
4344 unlock_fsnode(vp
, &funnel_state
);
4350 VNOP_REMOVEXATTR(vnode_t vp
, const char *name
, int options
, vfs_context_t context
)
4352 struct vnop_removexattr_args a
;
4355 int funnel_state
= 0;
4357 a
.a_desc
= &vnop_removexattr_desc
;
4360 a
.a_options
= options
;
4361 a
.a_context
= context
;
4363 thread_safe
= THREAD_SAFE_FS(vp
);
4365 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4369 error
= (*vp
->v_op
[vnop_removexattr_desc
.vdesc_offset
])(&a
);
4371 unlock_fsnode(vp
, &funnel_state
);
4377 VNOP_LISTXATTR(vnode_t vp
, uio_t uio
, size_t *size
, int options
, vfs_context_t context
)
4379 struct vnop_listxattr_args a
;
4382 int funnel_state
= 0;
4384 a
.a_desc
= &vnop_listxattr_desc
;
4388 a
.a_options
= options
;
4389 a
.a_context
= context
;
4391 thread_safe
= THREAD_SAFE_FS(vp
);
4393 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4397 error
= (*vp
->v_op
[vnop_listxattr_desc
.vdesc_offset
])(&a
);
4399 unlock_fsnode(vp
, &funnel_state
);
4408 *#% blktooff vp = = =
4411 struct vnop_blktooff_args
{
4412 struct vnodeop_desc
*a_desc
;
4419 VNOP_BLKTOOFF(struct vnode
*vp
, daddr64_t lblkno
, off_t
*offset
)
4422 struct vnop_blktooff_args a
;
4424 int funnel_state
= 0;
4426 a
.a_desc
= &vnop_blktooff_desc
;
4428 a
.a_lblkno
= lblkno
;
4429 a
.a_offset
= offset
;
4430 thread_safe
= THREAD_SAFE_FS(vp
);
4433 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4435 _err
= (*vp
->v_op
[vnop_blktooff_desc
.vdesc_offset
])(&a
);
4437 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4445 *#% offtoblk vp = = =
4448 struct vnop_offtoblk_args
{
4449 struct vnodeop_desc
*a_desc
;
4452 daddr64_t
*a_lblkno
;
4456 VNOP_OFFTOBLK(struct vnode
*vp
, off_t offset
, daddr64_t
*lblkno
)
4459 struct vnop_offtoblk_args a
;
4461 int funnel_state
= 0;
4463 a
.a_desc
= &vnop_offtoblk_desc
;
4465 a
.a_offset
= offset
;
4466 a
.a_lblkno
= lblkno
;
4467 thread_safe
= THREAD_SAFE_FS(vp
);
4470 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4472 _err
= (*vp
->v_op
[vnop_offtoblk_desc
.vdesc_offset
])(&a
);
4474 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4482 *#% blockmap vp L L L
4485 struct vnop_blockmap_args
{
4486 struct vnodeop_desc
*a_desc
;
4494 vfs_context_t a_context
;
4498 VNOP_BLOCKMAP(struct vnode
*vp
, off_t foffset
, size_t size
, daddr64_t
*bpn
, size_t *run
, void *poff
, int flags
, vfs_context_t context
)
4501 struct vnop_blockmap_args a
;
4503 int funnel_state
= 0;
4504 struct vfs_context acontext
;
4506 if (context
== NULL
) {
4507 acontext
.vc_proc
= current_proc();
4508 acontext
.vc_ucred
= kauth_cred_get();
4509 context
= &acontext
;
4511 a
.a_desc
= &vnop_blockmap_desc
;
4513 a
.a_foffset
= foffset
;
4519 a
.a_context
= context
;
4520 thread_safe
= THREAD_SAFE_FS(vp
);
4523 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4525 _err
= (*vp
->v_op
[vnop_blockmap_desc
.vdesc_offset
])(&a
);
4527 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4533 struct vnop_strategy_args
{
4534 struct vnodeop_desc
*a_desc
;
4540 VNOP_STRATEGY(struct buf
*bp
)
4543 struct vnop_strategy_args a
;
4544 a
.a_desc
= &vnop_strategy_desc
;
4546 _err
= (*buf_vnode(bp
)->v_op
[vnop_strategy_desc
.vdesc_offset
])(&a
);
4551 struct vnop_bwrite_args
{
4552 struct vnodeop_desc
*a_desc
;
4557 VNOP_BWRITE(struct buf
*bp
)
4560 struct vnop_bwrite_args a
;
4561 a
.a_desc
= &vnop_bwrite_desc
;
4563 _err
= (*buf_vnode(bp
)->v_op
[vnop_bwrite_desc
.vdesc_offset
])(&a
);
4568 struct vnop_kqfilt_add_args
{
4569 struct vnodeop_desc
*a_desc
;
4572 vfs_context_t a_context
;
4576 VNOP_KQFILT_ADD(struct vnode
*vp
, struct knote
*kn
, vfs_context_t context
)
4579 struct vnop_kqfilt_add_args a
;
4581 int funnel_state
= 0;
4583 a
.a_desc
= VDESC(vnop_kqfilt_add
);
4586 a
.a_context
= context
;
4587 thread_safe
= THREAD_SAFE_FS(vp
);
4590 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4594 _err
= (*vp
->v_op
[vnop_kqfilt_add_desc
.vdesc_offset
])(&a
);
4596 unlock_fsnode(vp
, &funnel_state
);
4602 struct vnop_kqfilt_remove_args
{
4603 struct vnodeop_desc
*a_desc
;
4606 vfs_context_t a_context
;
4610 VNOP_KQFILT_REMOVE(struct vnode
*vp
, uintptr_t ident
, vfs_context_t context
)
4613 struct vnop_kqfilt_remove_args a
;
4615 int funnel_state
= 0;
4617 a
.a_desc
= VDESC(vnop_kqfilt_remove
);
4620 a
.a_context
= context
;
4621 thread_safe
= THREAD_SAFE_FS(vp
);
4624 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4628 _err
= (*vp
->v_op
[vnop_kqfilt_remove_desc
.vdesc_offset
])(&a
);
4630 unlock_fsnode(vp
, &funnel_state
);