2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 * External virtual filesystem routines
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/proc_internal.h>
79 #include <sys/kauth.h>
80 #include <sys/mount.h>
81 #include <sys/mount_internal.h>
83 #include <sys/vnode_internal.h>
85 #include <sys/namei.h>
86 #include <sys/ucred.h>
88 #include <sys/errno.h>
89 #include <sys/malloc.h>
90 #include <sys/domain.h>
92 #include <sys/syslog.h>
95 #include <sys/sysctl.h>
96 #include <sys/filedesc.h>
97 #include <sys/fsevents.h>
99 #include <sys/lockf.h>
100 #include <sys/xattr.h>
102 #include <kern/assert.h>
103 #include <kern/kalloc.h>
105 #include <libkern/OSByteOrder.h>
107 #include <miscfs/specfs/specdev.h>
109 #include <mach/mach_types.h>
110 #include <mach/memory_object_types.h>
119 #define THREAD_SAFE_FS(VP) \
120 ((VP)->v_unsafefs ? 0 : 1)
122 #define NATIVE_XATTR(VP) \
123 ((VP)->v_mount ? (VP)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR : 0)
125 static void xattrfile_remove(vnode_t dvp
, const char * basename
, vfs_context_t context
,
126 int thread_safe
, int force
);
127 static void xattrfile_setattr(vnode_t dvp
, const char * basename
, struct vnode_attr
* vap
,
128 vfs_context_t context
, int thread_safe
);
132 vnode_setneedinactive(vnode_t vp
)
137 vp
->v_lflag
|= VL_NEEDINACTIVE
;
143 lock_fsnode(vnode_t vp
, int *funnel_state
)
146 *funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
148 if (vp
->v_unsafefs
) {
149 if (vp
->v_unsafefs
->fsnodeowner
== current_thread()) {
150 vp
->v_unsafefs
->fsnode_count
++;
152 lck_mtx_lock(&vp
->v_unsafefs
->fsnodelock
);
154 if (vp
->v_lflag
& (VL_TERMWANT
| VL_TERMINATE
| VL_DEAD
)) {
155 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
158 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
161 vp
->v_unsafefs
->fsnodeowner
= current_thread();
162 vp
->v_unsafefs
->fsnode_count
= 1;
170 unlock_fsnode(vnode_t vp
, int *funnel_state
)
172 if (vp
->v_unsafefs
) {
173 if (--vp
->v_unsafefs
->fsnode_count
== 0) {
174 vp
->v_unsafefs
->fsnodeowner
= NULL
;
175 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
179 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
184 /* ====================================================================== */
185 /* ************ EXTERNAL KERNEL APIS ********************************** */
186 /* ====================================================================== */
189 * prototypes for exported VFS operations
192 VFS_MOUNT(struct mount
* mp
, vnode_t devvp
, user_addr_t data
, vfs_context_t context
)
196 int funnel_state
= 0;
198 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_mount
== 0))
201 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
205 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
208 if (vfs_context_is64bit(context
)) {
209 if (vfs_64bitready(mp
)) {
210 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, context
);
217 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, context
);
221 (void) thread_funnel_set(kernel_flock
, funnel_state
);
227 VFS_START(struct mount
* mp
, int flags
, vfs_context_t context
)
231 int funnel_state
= 0;
233 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_start
== 0))
236 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
239 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
241 error
= (*mp
->mnt_op
->vfs_start
)(mp
, flags
, context
);
243 (void) thread_funnel_set(kernel_flock
, funnel_state
);
249 VFS_UNMOUNT(struct mount
*mp
, int flags
, vfs_context_t context
)
253 int funnel_state
= 0;
255 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_unmount
== 0))
258 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
261 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
263 error
= (*mp
->mnt_op
->vfs_unmount
)(mp
, flags
, context
);
265 (void) thread_funnel_set(kernel_flock
, funnel_state
);
271 VFS_ROOT(struct mount
* mp
, struct vnode
** vpp
, vfs_context_t context
)
275 int funnel_state
= 0;
276 struct vfs_context acontext
;
278 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_root
== 0))
281 if (context
== NULL
) {
282 acontext
.vc_proc
= current_proc();
283 acontext
.vc_ucred
= kauth_cred_get();
286 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
289 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
291 error
= (*mp
->mnt_op
->vfs_root
)(mp
, vpp
, context
);
293 (void) thread_funnel_set(kernel_flock
, funnel_state
);
299 VFS_QUOTACTL(struct mount
*mp
, int cmd
, uid_t uid
, caddr_t datap
, vfs_context_t context
)
303 int funnel_state
= 0;
305 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_quotactl
== 0))
308 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
311 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
313 error
= (*mp
->mnt_op
->vfs_quotactl
)(mp
, cmd
, uid
, datap
, context
);
315 (void) thread_funnel_set(kernel_flock
, funnel_state
);
321 VFS_GETATTR(struct mount
*mp
, struct vfs_attr
*vfa
, vfs_context_t context
)
325 int funnel_state
= 0;
326 struct vfs_context acontext
;
328 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_getattr
== 0))
331 if (context
== NULL
) {
332 acontext
.vc_proc
= current_proc();
333 acontext
.vc_ucred
= kauth_cred_get();
336 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
339 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
341 error
= (*mp
->mnt_op
->vfs_getattr
)(mp
, vfa
, context
);
343 (void) thread_funnel_set(kernel_flock
, funnel_state
);
349 VFS_SETATTR(struct mount
*mp
, struct vfs_attr
*vfa
, vfs_context_t context
)
353 int funnel_state
= 0;
354 struct vfs_context acontext
;
356 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_setattr
== 0))
359 if (context
== NULL
) {
360 acontext
.vc_proc
= current_proc();
361 acontext
.vc_ucred
= kauth_cred_get();
364 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
367 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
369 error
= (*mp
->mnt_op
->vfs_setattr
)(mp
, vfa
, context
);
371 (void) thread_funnel_set(kernel_flock
, funnel_state
);
377 VFS_SYNC(struct mount
*mp
, int flags
, vfs_context_t context
)
381 int funnel_state
= 0;
382 struct vfs_context acontext
;
384 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_sync
== 0))
387 if (context
== NULL
) {
388 acontext
.vc_proc
= current_proc();
389 acontext
.vc_ucred
= kauth_cred_get();
392 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
395 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
397 error
= (*mp
->mnt_op
->vfs_sync
)(mp
, flags
, context
);
399 (void) thread_funnel_set(kernel_flock
, funnel_state
);
405 VFS_VGET(struct mount
* mp
, ino64_t ino
, struct vnode
**vpp
, vfs_context_t context
)
409 int funnel_state
= 0;
410 struct vfs_context acontext
;
412 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_vget
== 0))
415 if (context
== NULL
) {
416 acontext
.vc_proc
= current_proc();
417 acontext
.vc_ucred
= kauth_cred_get();
420 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
423 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
425 error
= (*mp
->mnt_op
->vfs_vget
)(mp
, ino
, vpp
, context
);
427 (void) thread_funnel_set(kernel_flock
, funnel_state
);
433 VFS_FHTOVP(struct mount
* mp
, int fhlen
, unsigned char * fhp
, vnode_t
* vpp
, vfs_context_t context
)
437 int funnel_state
= 0;
438 struct vfs_context acontext
;
440 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_fhtovp
== 0))
443 if (context
== NULL
) {
444 acontext
.vc_proc
= current_proc();
445 acontext
.vc_ucred
= kauth_cred_get();
448 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
451 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
453 error
= (*mp
->mnt_op
->vfs_fhtovp
)(mp
, fhlen
, fhp
, vpp
, context
);
455 (void) thread_funnel_set(kernel_flock
, funnel_state
);
461 VFS_VPTOFH(struct vnode
* vp
, int *fhlenp
, unsigned char * fhp
, vfs_context_t context
)
465 int funnel_state
= 0;
466 struct vfs_context acontext
;
468 if ((vp
->v_mount
== dead_mountp
) || (vp
->v_mount
->mnt_op
->vfs_vptofh
== 0))
471 if (context
== NULL
) {
472 acontext
.vc_proc
= current_proc();
473 acontext
.vc_ucred
= kauth_cred_get();
476 thread_safe
= THREAD_SAFE_FS(vp
);
479 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
481 error
= (*vp
->v_mount
->mnt_op
->vfs_vptofh
)(vp
, fhlenp
, fhp
, context
);
483 (void) thread_funnel_set(kernel_flock
, funnel_state
);
489 /* returns a copy of vfs type name for the mount_t */
491 vfs_name(mount_t mp
, char * buffer
)
493 strncpy(buffer
, mp
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
496 /* returns vfs type number for the mount_t */
498 vfs_typenum(mount_t mp
)
500 return(mp
->mnt_vtable
->vfc_typenum
);
504 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
506 vfs_flags(mount_t mp
)
508 return((uint64_t)(mp
->mnt_flag
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
)));
511 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
513 vfs_setflags(mount_t mp
, uint64_t flags
)
515 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
517 mp
->mnt_flag
|= lflags
;
520 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
522 vfs_clearflags(mount_t mp
, uint64_t flags
)
524 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
526 mp
->mnt_flag
&= ~lflags
;
529 /* Is the mount_t ronly and upgrade read/write requested? */
531 vfs_iswriteupgrade(mount_t mp
) /* ronly && MNTK_WANTRDWR */
533 return ((mp
->mnt_flag
& MNT_RDONLY
) && (mp
->mnt_kern_flag
& MNTK_WANTRDWR
));
537 /* Is the mount_t mounted ronly */
539 vfs_isrdonly(mount_t mp
)
541 return (mp
->mnt_flag
& MNT_RDONLY
);
544 /* Is the mount_t mounted for filesystem synchronous writes? */
546 vfs_issynchronous(mount_t mp
)
548 return (mp
->mnt_flag
& MNT_SYNCHRONOUS
);
551 /* Is the mount_t mounted read/write? */
553 vfs_isrdwr(mount_t mp
)
555 return ((mp
->mnt_flag
& MNT_RDONLY
) == 0);
559 /* Is mount_t marked for update (ie MNT_UPDATE) */
561 vfs_isupdate(mount_t mp
)
563 return (mp
->mnt_flag
& MNT_UPDATE
);
567 /* Is mount_t marked for reload (ie MNT_RELOAD) */
569 vfs_isreload(mount_t mp
)
571 return ((mp
->mnt_flag
& MNT_UPDATE
) && (mp
->mnt_flag
& MNT_RELOAD
));
574 /* Is mount_t marked for reload (ie MNT_FORCE) */
576 vfs_isforce(mount_t mp
)
578 if ((mp
->mnt_flag
& MNT_FORCE
) || (mp
->mnt_kern_flag
& MNTK_FRCUNMOUNT
))
585 vfs_64bitready(mount_t mp
)
587 if ((mp
->mnt_vtable
->vfc_64bitready
))
594 vfs_authopaque(mount_t mp
)
596 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE
))
603 vfs_authopaqueaccess(mount_t mp
)
605 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE_ACCESS
))
612 vfs_setauthopaque(mount_t mp
)
615 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE
;
620 vfs_setauthopaqueaccess(mount_t mp
)
623 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE_ACCESS
;
628 vfs_clearauthopaque(mount_t mp
)
631 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE
;
636 vfs_clearauthopaqueaccess(mount_t mp
)
639 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE_ACCESS
;
644 vfs_setextendedsecurity(mount_t mp
)
647 mp
->mnt_kern_flag
|= MNTK_EXTENDED_SECURITY
;
652 vfs_clearextendedsecurity(mount_t mp
)
655 mp
->mnt_kern_flag
&= ~MNTK_EXTENDED_SECURITY
;
660 vfs_extendedsecurity(mount_t mp
)
662 return(mp
->mnt_kern_flag
& MNTK_EXTENDED_SECURITY
);
665 /* returns the max size of short symlink in this mount_t */
667 vfs_maxsymlen(mount_t mp
)
669 return(mp
->mnt_maxsymlinklen
);
672 /* set max size of short symlink on mount_t */
674 vfs_setmaxsymlen(mount_t mp
, uint32_t symlen
)
676 mp
->mnt_maxsymlinklen
= symlen
;
679 /* return a pointer to the RO vfs_statfs associated with mount_t */
681 vfs_statfs(mount_t mp
)
683 return(&mp
->mnt_vfsstat
);
687 vfs_getattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
692 if ((error
= VFS_GETATTR(mp
, vfa
, ctx
)) != 0)
696 * If we have a filesystem create time, use it to default some others.
698 if (VFSATTR_IS_SUPPORTED(vfa
, f_create_time
)) {
699 if (VFSATTR_IS_ACTIVE(vfa
, f_modify_time
) && !VFSATTR_IS_SUPPORTED(vfa
, f_modify_time
))
700 VFSATTR_RETURN(vfa
, f_modify_time
, vfa
->f_create_time
);
707 vfs_setattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
711 if (vfs_isrdonly(mp
))
714 error
= VFS_SETATTR(mp
, vfa
, ctx
);
717 * If we had alternate ways of setting vfs attributes, we'd
724 /* return the private data handle stored in mount_t */
726 vfs_fsprivate(mount_t mp
)
728 return(mp
->mnt_data
);
731 /* set the private data handle in mount_t */
733 vfs_setfsprivate(mount_t mp
, void *mntdata
)
735 mp
->mnt_data
= mntdata
;
740 * return the block size of the underlying
741 * device associated with mount_t
744 vfs_devblocksize(mount_t mp
) {
746 return(mp
->mnt_devblocksize
);
751 * return the io attributes associated with mount_t
754 vfs_ioattr(mount_t mp
, struct vfsioattr
*ioattrp
)
757 ioattrp
->io_maxreadcnt
= MAXPHYS
;
758 ioattrp
->io_maxwritecnt
= MAXPHYS
;
759 ioattrp
->io_segreadcnt
= 32;
760 ioattrp
->io_segwritecnt
= 32;
761 ioattrp
->io_maxsegreadsize
= MAXPHYS
;
762 ioattrp
->io_maxsegwritesize
= MAXPHYS
;
763 ioattrp
->io_devblocksize
= DEV_BSIZE
;
765 ioattrp
->io_maxreadcnt
= mp
->mnt_maxreadcnt
;
766 ioattrp
->io_maxwritecnt
= mp
->mnt_maxwritecnt
;
767 ioattrp
->io_segreadcnt
= mp
->mnt_segreadcnt
;
768 ioattrp
->io_segwritecnt
= mp
->mnt_segwritecnt
;
769 ioattrp
->io_maxsegreadsize
= mp
->mnt_maxsegreadsize
;
770 ioattrp
->io_maxsegwritesize
= mp
->mnt_maxsegwritesize
;
771 ioattrp
->io_devblocksize
= mp
->mnt_devblocksize
;
773 ioattrp
->io_reserved
[0] = 0;
774 ioattrp
->io_reserved
[1] = 0;
775 ioattrp
->io_reserved
[2] = 0;
780 * set the IO attributes associated with mount_t
783 vfs_setioattr(mount_t mp
, struct vfsioattr
* ioattrp
)
787 mp
->mnt_maxreadcnt
= ioattrp
->io_maxreadcnt
;
788 mp
->mnt_maxwritecnt
= ioattrp
->io_maxwritecnt
;
789 mp
->mnt_segreadcnt
= ioattrp
->io_segreadcnt
;
790 mp
->mnt_segwritecnt
= ioattrp
->io_segwritecnt
;
791 mp
->mnt_maxsegreadsize
= ioattrp
->io_maxsegreadsize
;
792 mp
->mnt_maxsegwritesize
= ioattrp
->io_maxsegwritesize
;
793 mp
->mnt_devblocksize
= ioattrp
->io_devblocksize
;
797 * Add a new filesystem into the kernel specified in passed in
798 * vfstable structure. It fills in the vnode
799 * dispatch vector that is to be passed to when vnodes are created.
800 * It returns a handle which is to be used to when the FS is to be removed
802 typedef int (*PFI
)(void *);
803 extern int vfs_opv_numops
;
805 vfs_fsadd(struct vfs_fsentry
*vfe
, vfstable_t
* handle
)
808 struct vfstable
*newvfstbl
= NULL
;
810 int (***opv_desc_vector_p
)(void *);
811 int (**opv_desc_vector
)(void *);
812 struct vnodeopv_entry_desc
*opve_descp
;
818 * This routine is responsible for all the initialization that would
819 * ordinarily be done as part of the system startup;
822 if (vfe
== (struct vfs_fsentry
*)0)
825 desccount
= vfe
->vfe_vopcnt
;
826 if ((desccount
<=0) || ((desccount
> 5)) || (vfe
->vfe_vfsops
== (struct vfsops
*)NULL
)
827 || (vfe
->vfe_opvdescs
== (struct vnodeopv_desc
**)NULL
))
831 MALLOC(newvfstbl
, void *, sizeof(struct vfstable
), M_TEMP
,
833 bzero(newvfstbl
, sizeof(struct vfstable
));
834 newvfstbl
->vfc_vfsops
= vfe
->vfe_vfsops
;
835 strncpy(&newvfstbl
->vfc_name
[0], vfe
->vfe_fsname
, MFSNAMELEN
);
836 if ((vfe
->vfe_flags
& VFS_TBLNOTYPENUM
))
837 newvfstbl
->vfc_typenum
= maxvfsconf
++;
839 newvfstbl
->vfc_typenum
= vfe
->vfe_fstypenum
;
841 newvfstbl
->vfc_refcount
= 0;
842 newvfstbl
->vfc_flags
= 0;
843 newvfstbl
->vfc_mountroot
= NULL
;
844 newvfstbl
->vfc_next
= NULL
;
845 newvfstbl
->vfc_threadsafe
= 0;
846 newvfstbl
->vfc_vfsflags
= 0;
847 if (vfe
->vfe_flags
& VFS_TBL64BITREADY
)
848 newvfstbl
->vfc_64bitready
= 1;
849 if (vfe
->vfe_flags
& VFS_TBLTHREADSAFE
)
850 newvfstbl
->vfc_threadsafe
= 1;
851 if (vfe
->vfe_flags
& VFS_TBLFSNODELOCK
)
852 newvfstbl
->vfc_threadsafe
= 1;
853 if ((vfe
->vfe_flags
& VFS_TBLLOCALVOL
) == VFS_TBLLOCALVOL
)
854 newvfstbl
->vfc_flags
|= MNT_LOCAL
;
855 if (vfe
->vfe_flags
& VFS_TBLLOCALVOL
)
856 newvfstbl
->vfc_vfsflags
|= VFC_VFSLOCALARGS
;
858 newvfstbl
->vfc_vfsflags
|= VFC_VFSGENERICARGS
;
862 * Allocate and init the vectors.
863 * Also handle backwards compatibility.
865 * We allocate one large block to hold all <desccount>
866 * vnode operation vectors stored contiguously.
868 /* XXX - shouldn't be M_TEMP */
870 descsize
= desccount
* vfs_opv_numops
* sizeof(PFI
);
871 MALLOC(descptr
, PFI
*, descsize
,
873 bzero(descptr
, descsize
);
875 newvfstbl
->vfc_descptr
= descptr
;
876 newvfstbl
->vfc_descsize
= descsize
;
879 for (i
= 0; i
< desccount
; i
++ ) {
880 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
882 * Fill in the caller's pointer to the start of the i'th vector.
883 * They'll need to supply it when calling vnode_create.
885 opv_desc_vector
= descptr
+ i
* vfs_opv_numops
;
886 *opv_desc_vector_p
= opv_desc_vector
;
888 for (j
= 0; vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
].opve_op
; j
++) {
889 opve_descp
= &(vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
]);
892 * Sanity check: is this operation listed
893 * in the list of operations? We check this
894 * by seeing if its offest is zero. Since
895 * the default routine should always be listed
896 * first, it should be the only one with a zero
897 * offset. Any other operation with a zero
898 * offset is probably not listed in
899 * vfs_op_descs, and so is probably an error.
901 * A panic here means the layer programmer
902 * has committed the all-too common bug
903 * of adding a new operation to the layer's
904 * list of vnode operations but
905 * not adding the operation to the system-wide
906 * list of supported operations.
908 if (opve_descp
->opve_op
->vdesc_offset
== 0 &&
909 opve_descp
->opve_op
->vdesc_offset
!= VOFFSET(vnop_default
)) {
910 printf("vfs_fsadd: operation %s not listed in %s.\n",
911 opve_descp
->opve_op
->vdesc_name
,
913 panic("vfs_fsadd: bad operation");
916 * Fill in this entry.
918 opv_desc_vector
[opve_descp
->opve_op
->vdesc_offset
] =
919 opve_descp
->opve_impl
;
924 * Finally, go back and replace unfilled routines
925 * with their default. (Sigh, an O(n^3) algorithm. I
926 * could make it better, but that'd be work, and n is small.)
928 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
931 * Force every operations vector to have a default routine.
933 opv_desc_vector
= *opv_desc_vector_p
;
934 if (opv_desc_vector
[VOFFSET(vnop_default
)] == NULL
)
935 panic("vfs_fsadd: operation vector without default routine.");
936 for (j
= 0; j
< vfs_opv_numops
; j
++)
937 if (opv_desc_vector
[j
] == NULL
)
939 opv_desc_vector
[VOFFSET(vnop_default
)];
941 } /* end of each vnodeopv_desc parsing */
945 *handle
= vfstable_add(newvfstbl
);
947 if (newvfstbl
->vfc_typenum
<= maxvfsconf
)
948 maxvfsconf
= newvfstbl
->vfc_typenum
+ 1;
951 if (newvfstbl
->vfc_vfsops
->vfs_init
)
952 (*newvfstbl
->vfc_vfsops
->vfs_init
)((struct vfsconf
*)handle
);
954 FREE(newvfstbl
, M_TEMP
);
960 * Removes the filesystem from kernel.
961 * The argument passed in is the handle that was given when
962 * file system was added
965 vfs_fsremove(vfstable_t handle
)
967 struct vfstable
* vfstbl
= (struct vfstable
*)handle
;
968 void *old_desc
= NULL
;
971 /* Preflight check for any mounts */
973 if ( vfstbl
->vfc_refcount
!= 0 ) {
980 * save the old descriptor; the free cannot occur unconditionally,
981 * since vfstable_del() may fail.
983 if (vfstbl
->vfc_descptr
&& vfstbl
->vfc_descsize
) {
984 old_desc
= vfstbl
->vfc_descptr
;
986 err
= vfstable_del(vfstbl
);
988 /* free the descriptor if the delete was successful */
989 if (err
== 0 && old_desc
) {
990 FREE(old_desc
, M_TEMP
);
997 * This returns a reference to mount_t
998 * which should be dropped using vfs_mountrele().
999 * Not doing so will leak a mountpoint
1000 * and associated data structures.
1003 vfs_mountref(__unused mount_t mp
) /* gives a reference */
1008 /* This drops the reference on mount_t that was acquired */
1010 vfs_mountrele(__unused mount_t mp
) /* drops reference */
1016 vfs_context_pid(vfs_context_t context
)
1018 return (context
->vc_proc
->p_pid
);
1022 vfs_context_suser(vfs_context_t context
)
1024 return (suser(context
->vc_ucred
, 0));
1027 vfs_context_issignal(vfs_context_t context
, sigset_t mask
)
1029 if (context
->vc_proc
)
1030 return(proc_pendingsignals(context
->vc_proc
, mask
));
1035 vfs_context_is64bit(vfs_context_t context
)
1037 if (context
->vc_proc
)
1038 return(proc_is64bit(context
->vc_proc
));
1043 vfs_context_proc(vfs_context_t context
)
1045 return (context
->vc_proc
);
1049 vfs_context_create(vfs_context_t context
)
1051 struct vfs_context
* newcontext
;
1053 newcontext
= (struct vfs_context
*)kalloc(sizeof(struct vfs_context
));
1056 kauth_cred_t safecred
;
1058 newcontext
->vc_proc
= context
->vc_proc
;
1059 safecred
= context
->vc_ucred
;
1061 newcontext
->vc_proc
= proc_self();
1062 safecred
= kauth_cred_get();
1064 if (IS_VALID_CRED(safecred
))
1065 kauth_cred_ref(safecred
);
1066 newcontext
->vc_ucred
= safecred
;
1069 return((vfs_context_t
)0);
1073 vfs_context_rele(vfs_context_t context
)
1076 if (IS_VALID_CRED(context
->vc_ucred
))
1077 kauth_cred_unref(&context
->vc_ucred
);
1078 kfree(context
, sizeof(struct vfs_context
));
1085 vfs_context_ucred(vfs_context_t context
)
1087 return (context
->vc_ucred
);
1091 * Return true if the context is owned by the superuser.
1094 vfs_context_issuser(vfs_context_t context
)
1096 return(context
->vc_ucred
->cr_uid
== 0);
1100 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1104 * Convert between vnode types and inode formats (since POSIX.1
1105 * defines mode word of stat structure in terms of inode formats).
1108 vnode_iftovt(int mode
)
1110 return(iftovt_tab
[((mode
) & S_IFMT
) >> 12]);
1114 vnode_vttoif(enum vtype indx
)
1116 return(vttoif_tab
[(int)(indx
)]);
1120 vnode_makeimode(int indx
, int mode
)
1122 return (int)(VTTOIF(indx
) | (mode
));
1127 * vnode manipulation functions.
1130 /* returns system root vnode reference; It should be dropped using vrele() */
1136 error
= vnode_get(rootvnode
);
1138 return ((vnode_t
)0);
1145 vnode_vid(vnode_t vp
)
1147 return ((uint32_t)(vp
->v_id
));
1150 /* returns a mount reference; drop it with vfs_mountrelease() */
1152 vnode_mount(vnode_t vp
)
1154 return (vp
->v_mount
);
1157 /* returns a mount reference iff vnode_t is a dir and is a mount point */
1159 vnode_mountedhere(vnode_t vp
)
1163 if ((vp
->v_type
== VDIR
) && ((mp
= vp
->v_mountedhere
) != NULL
) &&
1164 (mp
->mnt_vnodecovered
== vp
))
1167 return (mount_t
)NULL
;
1170 /* returns vnode type of vnode_t */
1172 vnode_vtype(vnode_t vp
)
1174 return (vp
->v_type
);
1177 /* returns FS specific node saved in vnode */
1179 vnode_fsnode(vnode_t vp
)
1181 return (vp
->v_data
);
1185 vnode_clearfsnode(vnode_t vp
)
1191 vnode_specrdev(vnode_t vp
)
1197 /* Accessor functions */
1198 /* is vnode_t a root vnode */
1200 vnode_isvroot(vnode_t vp
)
1202 return ((vp
->v_flag
& VROOT
)? 1 : 0);
1205 /* is vnode_t a system vnode */
1207 vnode_issystem(vnode_t vp
)
1209 return ((vp
->v_flag
& VSYSTEM
)? 1 : 0);
1212 /* if vnode_t mount operation in progress */
1214 vnode_ismount(vnode_t vp
)
1216 return ((vp
->v_flag
& VMOUNT
)? 1 : 0);
1219 /* is this vnode under recyle now */
1221 vnode_isrecycled(vnode_t vp
)
1226 ret
= (vp
->v_lflag
& (VL_TERMINATE
|VL_DEAD
))? 1 : 0;
1231 /* is vnode_t marked to not keep data cached once it's been consumed */
1233 vnode_isnocache(vnode_t vp
)
1235 return ((vp
->v_flag
& VNOCACHE_DATA
)? 1 : 0);
1239 * has sequential readahead been disabled on this vnode
1242 vnode_isnoreadahead(vnode_t vp
)
1244 return ((vp
->v_flag
& VRAOFF
)? 1 : 0);
1247 /* is vnode_t a standard one? */
1249 vnode_isstandard(vnode_t vp
)
1251 return ((vp
->v_flag
& VSTANDARD
)? 1 : 0);
1254 /* don't vflush() if SKIPSYSTEM */
1256 vnode_isnoflush(vnode_t vp
)
1258 return ((vp
->v_flag
& VNOFLUSH
)? 1 : 0);
1261 /* is vnode_t a regular file */
1263 vnode_isreg(vnode_t vp
)
1265 return ((vp
->v_type
== VREG
)? 1 : 0);
1268 /* is vnode_t a directory? */
1270 vnode_isdir(vnode_t vp
)
1272 return ((vp
->v_type
== VDIR
)? 1 : 0);
1275 /* is vnode_t a symbolic link ? */
1277 vnode_islnk(vnode_t vp
)
1279 return ((vp
->v_type
== VLNK
)? 1 : 0);
1282 /* is vnode_t a fifo ? */
1284 vnode_isfifo(vnode_t vp
)
1286 return ((vp
->v_type
== VFIFO
)? 1 : 0);
1289 /* is vnode_t a block device? */
1291 vnode_isblk(vnode_t vp
)
1293 return ((vp
->v_type
== VBLK
)? 1 : 0);
1296 /* is vnode_t a char device? */
1298 vnode_ischr(vnode_t vp
)
1300 return ((vp
->v_type
== VCHR
)? 1 : 0);
1303 /* is vnode_t a socket? */
1305 vnode_issock(vnode_t vp
)
1307 return ((vp
->v_type
== VSOCK
)? 1 : 0);
1311 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1313 vnode_setnocache(vnode_t vp
)
1316 vp
->v_flag
|= VNOCACHE_DATA
;
1321 vnode_clearnocache(vnode_t vp
)
1324 vp
->v_flag
&= ~VNOCACHE_DATA
;
1329 vnode_setnoreadahead(vnode_t vp
)
1332 vp
->v_flag
|= VRAOFF
;
1337 vnode_clearnoreadahead(vnode_t vp
)
1340 vp
->v_flag
&= ~VRAOFF
;
1345 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1347 vnode_setnoflush(vnode_t vp
)
1350 vp
->v_flag
|= VNOFLUSH
;
1355 vnode_clearnoflush(vnode_t vp
)
1358 vp
->v_flag
&= ~VNOFLUSH
;
1363 /* is vnode_t a blkdevice and has a FS mounted on it */
1365 vnode_ismountedon(vnode_t vp
)
1367 return ((vp
->v_specflags
& SI_MOUNTEDON
)? 1 : 0);
1371 vnode_setmountedon(vnode_t vp
)
1374 vp
->v_specflags
|= SI_MOUNTEDON
;
1379 vnode_clearmountedon(vnode_t vp
)
1382 vp
->v_specflags
&= ~SI_MOUNTEDON
;
1388 vnode_settag(vnode_t vp
, int tag
)
1395 vnode_tag(vnode_t vp
)
1401 vnode_parent(vnode_t vp
)
1404 return(vp
->v_parent
);
1408 vnode_setparent(vnode_t vp
, vnode_t dvp
)
1414 vnode_name(vnode_t vp
)
1416 /* we try to keep v_name a reasonable name for the node */
1421 vnode_setname(vnode_t vp
, char * name
)
1426 /* return the registered FS name when adding the FS to kernel */
1428 vnode_vfsname(vnode_t vp
, char * buf
)
1430 strncpy(buf
, vp
->v_mount
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
1433 /* return the FS type number */
1435 vnode_vfstypenum(vnode_t vp
)
1437 return(vp
->v_mount
->mnt_vtable
->vfc_typenum
);
1441 vnode_vfs64bitready(vnode_t vp
)
1444 if ((vp
->v_mount
->mnt_vtable
->vfc_64bitready
))
1452 /* return the visible flags on associated mount point of vnode_t */
1454 vnode_vfsvisflags(vnode_t vp
)
1456 return(vp
->v_mount
->mnt_flag
& MNT_VISFLAGMASK
);
1459 /* return the command modifier flags on associated mount point of vnode_t */
1461 vnode_vfscmdflags(vnode_t vp
)
1463 return(vp
->v_mount
->mnt_flag
& MNT_CMDFLAGS
);
1466 /* return the max symlink of short links of vnode_t */
1468 vnode_vfsmaxsymlen(vnode_t vp
)
1470 return(vp
->v_mount
->mnt_maxsymlinklen
);
1473 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
1475 vnode_vfsstatfs(vnode_t vp
)
1477 return(&vp
->v_mount
->mnt_vfsstat
);
1480 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
1482 vnode_vfsfsprivate(vnode_t vp
)
1484 return(vp
->v_mount
->mnt_data
);
1487 /* is vnode_t in a rdonly mounted FS */
1489 vnode_vfsisrdonly(vnode_t vp
)
1491 return ((vp
->v_mount
->mnt_flag
& MNT_RDONLY
)? 1 : 0);
1495 /* returns vnode ref to current working directory */
1497 current_workingdir(void)
1499 struct proc
*p
= current_proc();
1502 if ( (vp
= p
->p_fd
->fd_cdir
) ) {
1503 if ( (vnode_getwithref(vp
)) )
1509 /* returns vnode ref to current root(chroot) directory */
1511 current_rootdir(void)
1513 struct proc
*p
= current_proc();
1516 if ( (vp
= p
->p_fd
->fd_rdir
) ) {
1517 if ( (vnode_getwithref(vp
)) )
1524 * Get a filesec and optional acl contents from an extended attribute.
1525 * Function will attempt to retrive ACL, UUID, and GUID information using a
1526 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
1528 * Parameters: vp The vnode on which to operate.
1529 * fsecp The filesec (and ACL, if any) being
1531 * ctx The vnode context in which the
1532 * operation is to be attempted.
1534 * Returns: 0 Success
1537 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
1538 * host byte order, as will be the ACL contents, if any.
1539 * Internally, we will cannonize these values from network (PPC)
1540 * byte order after we retrieve them so that the on-disk contents
1541 * of the extended attribute are identical for both PPC and Intel
1542 * (if we were not being required to provide this service via
1543 * fallback, this would be the job of the filesystem
1544 * 'VNOP_GETATTR' call).
1546 * We use ntohl() because it has a transitive property on Intel
1547 * machines and no effect on PPC mancines. This guarantees us
1549 * XXX: Deleting rather than ignoreing a corrupt security structure is
1550 * probably the only way to reset it without assistance from an
1551 * file system integrity checking tool. Right now we ignore it.
1553 * XXX: We should enummerate the possible errno values here, and where
1554 * in the code they originated.
1557 vnode_get_filesec(vnode_t vp
, kauth_filesec_t
*fsecp
, vfs_context_t ctx
)
1559 kauth_filesec_t fsec
;
1562 size_t xsize
, rsize
;
1565 uint32_t host_fsec_magic
;
1566 uint32_t host_acl_entrycount
;
1572 /* find out how big the EA is */
1573 if (vn_getxattr(vp
, KAUTH_FILESEC_XATTR
, NULL
, &xsize
, XATTR_NOSECURITY
, ctx
) != 0) {
1574 /* no EA, no filesec */
1575 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
1577 /* either way, we are done */
1582 * To be valid, a kauth_filesec_t must be large enough to hold a zero
1583 * ACE entrly ACL, and if it's larger than that, it must have the right
1584 * number of bytes such that it contains an atomic number of ACEs,
1585 * rather than partial entries. Otherwise, we ignore it.
1587 if (!KAUTH_FILESEC_VALID(xsize
)) {
1588 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize
);
1593 /* how many entries would fit? */
1594 fsec_size
= KAUTH_FILESEC_COUNT(xsize
);
1596 /* get buffer and uio */
1597 if (((fsec
= kauth_filesec_alloc(fsec_size
)) == NULL
) ||
1598 ((fsec_uio
= uio_create(1, 0, UIO_SYSSPACE
, UIO_READ
)) == NULL
) ||
1599 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), xsize
)) {
1600 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
1605 /* read security attribute */
1607 if ((error
= vn_getxattr(vp
,
1608 KAUTH_FILESEC_XATTR
,
1614 /* no attribute - no security data */
1615 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
1617 /* either way, we are done */
1622 * Validate security structure; the validation must take place in host
1623 * byte order. If it's corrupt, we will just ignore it.
1626 /* Validate the size before trying to convert it */
1627 if (rsize
< KAUTH_FILESEC_SIZE(0)) {
1628 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize
);
1632 /* Validate the magic number before trying to convert it */
1633 host_fsec_magic
= ntohl(KAUTH_FILESEC_MAGIC
);
1634 if (fsec
->fsec_magic
!= host_fsec_magic
) {
1635 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic
);
1639 /* Validate the entry count before trying to convert it. */
1640 host_acl_entrycount
= ntohl(fsec
->fsec_acl
.acl_entrycount
);
1641 if (host_acl_entrycount
!= KAUTH_FILESEC_NOACL
) {
1642 if (host_acl_entrycount
> KAUTH_ACL_MAX_ENTRIES
) {
1643 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount
);
1646 if (KAUTH_FILESEC_SIZE(host_acl_entrycount
) > rsize
) {
1647 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount
, rsize
);
1652 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST
, fsec
, NULL
);
1659 kauth_filesec_free(fsec
);
1660 if (fsec_uio
!= NULL
)
1668 * Set a filesec and optional acl contents into an extended attribute.
1669 * function will attempt to store ACL, UUID, and GUID information using a
1670 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
1671 * may or may not point to the `fsec->fsec_acl`, depending on whether the
1672 * original caller supplied an acl.
1674 * Parameters: vp The vnode on which to operate.
1675 * fsec The filesec being set.
1676 * acl The acl to be associated with 'fsec'.
1677 * ctx The vnode context in which the
1678 * operation is to be attempted.
1680 * Returns: 0 Success
1683 * Notes: Both the fsec and the acl are always valid.
1685 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
1686 * as are the acl contents, if they are used. Internally, we will
1687 * cannonize these values into network (PPC) byte order before we
1688 * attempt to write them so that the on-disk contents of the
1689 * extended attribute are identical for both PPC and Intel (if we
1690 * were not being required to provide this service via fallback,
1691 * this would be the job of the filesystem 'VNOP_SETATTR' call).
1692 * We reverse this process on the way out, so we leave with the
1693 * same byte order we started with.
1695 * XXX: We should enummerate the possible errno values here, and where
1696 * in the code they originated.
1699 vnode_set_filesec(vnode_t vp
, kauth_filesec_t fsec
, kauth_acl_t acl
, vfs_context_t ctx
)
1704 uint32_t saved_acl_copysize
;
1708 if ((fsec_uio
= uio_create(2, 0, UIO_SYSSPACE
, UIO_WRITE
)) == NULL
) {
1709 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
1714 * Save the pre-converted ACL copysize, because it gets swapped too
1715 * if we are running with the wrong endianness.
1717 saved_acl_copysize
= KAUTH_ACL_COPYSIZE(acl
);
1719 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK
, fsec
, acl
);
1721 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), sizeof(struct kauth_filesec
) - sizeof(struct kauth_acl
));
1722 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(acl
), saved_acl_copysize
);
1723 error
= vn_setxattr(vp
,
1724 KAUTH_FILESEC_XATTR
,
1726 XATTR_NOSECURITY
, /* we have auth'ed already */
1728 VFS_DEBUG(ctx
, vp
, "SETATTR - set ACL returning %d", error
);
1730 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST
, fsec
, acl
);
1733 if (fsec_uio
!= NULL
)
1740 vnode_getattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
1742 kauth_filesec_t fsec
;
1748 /* don't ask for extended security data if the filesystem doesn't support it */
1749 if (!vfs_extendedsecurity(vnode_mount(vp
))) {
1750 VATTR_CLEAR_ACTIVE(vap
, va_acl
);
1751 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
1752 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
1756 * If the caller wants size values we might have to synthesise, give the
1757 * filesystem the opportunity to supply better intermediate results.
1759 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
1760 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
1761 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
1762 VATTR_SET_ACTIVE(vap
, va_data_size
);
1763 VATTR_SET_ACTIVE(vap
, va_data_alloc
);
1764 VATTR_SET_ACTIVE(vap
, va_total_size
);
1765 VATTR_SET_ACTIVE(vap
, va_total_alloc
);
1768 error
= VNOP_GETATTR(vp
, vap
, ctx
);
1770 KAUTH_DEBUG("ERROR - returning %d", error
);
1775 * If extended security data was requested but not returned, try the fallback
1778 if (VATTR_NOT_RETURNED(vap
, va_acl
) || VATTR_NOT_RETURNED(vap
, va_uuuid
) || VATTR_NOT_RETURNED(vap
, va_guuid
)) {
1781 if ((vp
->v_type
== VDIR
) || (vp
->v_type
== VLNK
) || (vp
->v_type
== VREG
)) {
1782 /* try to get the filesec */
1783 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0)
1786 /* if no filesec, no attributes */
1788 VATTR_RETURN(vap
, va_acl
, NULL
);
1789 VATTR_RETURN(vap
, va_uuuid
, kauth_null_guid
);
1790 VATTR_RETURN(vap
, va_guuid
, kauth_null_guid
);
1793 /* looks good, try to return what we were asked for */
1794 VATTR_RETURN(vap
, va_uuuid
, fsec
->fsec_owner
);
1795 VATTR_RETURN(vap
, va_guuid
, fsec
->fsec_group
);
1797 /* only return the ACL if we were actually asked for it */
1798 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
1799 if (fsec
->fsec_acl
.acl_entrycount
== KAUTH_FILESEC_NOACL
) {
1800 VATTR_RETURN(vap
, va_acl
, NULL
);
1802 facl
= kauth_acl_alloc(fsec
->fsec_acl
.acl_entrycount
);
1804 kauth_filesec_free(fsec
);
1808 bcopy(&fsec
->fsec_acl
, facl
, KAUTH_ACL_COPYSIZE(&fsec
->fsec_acl
));
1809 VATTR_RETURN(vap
, va_acl
, facl
);
1812 kauth_filesec_free(fsec
);
1816 * If someone gave us an unsolicited filesec, toss it. We promise that
1817 * we're OK with a filesystem giving us anything back, but our callers
1818 * only expect what they asked for.
1820 if (VATTR_IS_SUPPORTED(vap
, va_acl
) && !VATTR_IS_ACTIVE(vap
, va_acl
)) {
1821 if (vap
->va_acl
!= NULL
)
1822 kauth_acl_free(vap
->va_acl
);
1823 VATTR_CLEAR_SUPPORTED(vap
, va_acl
);
1826 #if 0 /* enable when we have a filesystem only supporting UUIDs */
1828 * Handle the case where we need a UID/GID, but only have extended
1829 * security information.
1831 if (VATTR_NOT_RETURNED(vap
, va_uid
) &&
1832 VATTR_IS_SUPPORTED(vap
, va_uuuid
) &&
1833 !kauth_guid_equal(&vap
->va_uuuid
, &kauth_null_guid
)) {
1834 if ((error
= kauth_cred_guid2uid(&vap
->va_uuuid
, &nuid
)) == 0)
1835 VATTR_RETURN(vap
, va_uid
, nuid
);
1837 if (VATTR_NOT_RETURNED(vap
, va_gid
) &&
1838 VATTR_IS_SUPPORTED(vap
, va_guuid
) &&
1839 !kauth_guid_equal(&vap
->va_guuid
, &kauth_null_guid
)) {
1840 if ((error
= kauth_cred_guid2gid(&vap
->va_guuid
, &ngid
)) == 0)
1841 VATTR_RETURN(vap
, va_gid
, ngid
);
1846 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
1848 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
1849 if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
1850 nuid
= vp
->v_mount
->mnt_fsowner
;
1851 if (nuid
== KAUTH_UID_NONE
)
1853 } else if (VATTR_IS_SUPPORTED(vap
, va_uid
)) {
1856 /* this will always be something sensible */
1857 nuid
= vp
->v_mount
->mnt_fsowner
;
1859 if ((nuid
== 99) && !vfs_context_issuser(ctx
))
1860 nuid
= kauth_cred_getuid(vfs_context_ucred(ctx
));
1861 VATTR_RETURN(vap
, va_uid
, nuid
);
1863 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
1864 if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
1865 ngid
= vp
->v_mount
->mnt_fsgroup
;
1866 if (ngid
== KAUTH_GID_NONE
)
1868 } else if (VATTR_IS_SUPPORTED(vap
, va_gid
)) {
1871 /* this will always be something sensible */
1872 ngid
= vp
->v_mount
->mnt_fsgroup
;
1874 if ((ngid
== 99) && !vfs_context_issuser(ctx
))
1875 ngid
= kauth_cred_getgid(vfs_context_ucred(ctx
));
1876 VATTR_RETURN(vap
, va_gid
, ngid
);
1880 * Synthesise some values that can be reasonably guessed.
1882 if (!VATTR_IS_SUPPORTED(vap
, va_iosize
))
1883 VATTR_RETURN(vap
, va_iosize
, vp
->v_mount
->mnt_vfsstat
.f_iosize
);
1885 if (!VATTR_IS_SUPPORTED(vap
, va_flags
))
1886 VATTR_RETURN(vap
, va_flags
, 0);
1888 if (!VATTR_IS_SUPPORTED(vap
, va_filerev
))
1889 VATTR_RETURN(vap
, va_filerev
, 0);
1891 if (!VATTR_IS_SUPPORTED(vap
, va_gen
))
1892 VATTR_RETURN(vap
, va_gen
, 0);
1895 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
1897 if (!VATTR_IS_SUPPORTED(vap
, va_data_size
))
1898 VATTR_RETURN(vap
, va_data_size
, 0);
1900 /* do we want any of the possibly-computed values? */
1901 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
1902 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
1903 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
1904 /* make sure f_bsize is valid */
1905 if (vp
->v_mount
->mnt_vfsstat
.f_bsize
== 0) {
1906 if ((error
= vfs_update_vfsstat(vp
->v_mount
, ctx
)) != 0)
1910 /* default va_data_alloc from va_data_size */
1911 if (!VATTR_IS_SUPPORTED(vap
, va_data_alloc
))
1912 VATTR_RETURN(vap
, va_data_alloc
, roundup(vap
->va_data_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
1914 /* default va_total_size from va_data_size */
1915 if (!VATTR_IS_SUPPORTED(vap
, va_total_size
))
1916 VATTR_RETURN(vap
, va_total_size
, vap
->va_data_size
);
1918 /* default va_total_alloc from va_total_size which is guaranteed at this point */
1919 if (!VATTR_IS_SUPPORTED(vap
, va_total_alloc
))
1920 VATTR_RETURN(vap
, va_total_alloc
, roundup(vap
->va_total_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
1924 * If we don't have a change time, pull it from the modtime.
1926 if (!VATTR_IS_SUPPORTED(vap
, va_change_time
) && VATTR_IS_SUPPORTED(vap
, va_modify_time
))
1927 VATTR_RETURN(vap
, va_change_time
, vap
->va_modify_time
);
1930 * This is really only supported for the creation VNOPs, but since the field is there
1931 * we should populate it correctly.
1933 VATTR_RETURN(vap
, va_type
, vp
->v_type
);
1936 * The fsid can be obtained from the mountpoint directly.
1938 VATTR_RETURN(vap
, va_fsid
, vp
->v_mount
->mnt_vfsstat
.f_fsid
.val
[0]);
1946 * Set the attributes on a vnode in a vnode context.
1948 * Parameters: vp The vnode whose attributes to set.
1949 * vap A pointer to the attributes to set.
1950 * ctx The vnode context in which the
1951 * operation is to be attempted.
1953 * Returns: 0 Success
1956 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
1958 * The contents of the data area pointed to by 'vap' may be
1959 * modified if the vnode is on a filesystem which has been
1960 * mounted with ingore ownership flags, or by the underlyng
1961 * VFS itself, or by the fallback code, if the underlying VFS
1962 * does not support ACL, UUID, or GUUID attributes directly.
1964 * XXX: We should enummerate the possible errno values here, and where
1965 * in the code they originated.
1968 vnode_setattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
1970 int error
, is_ownership_change
=0;
1973 * Make sure the filesystem is mounted R/W.
1974 * If not, return an error.
1976 if (vfs_isrdonly(vp
->v_mount
)) {
1982 * If ownership is being ignored on this volume, we silently discard
1983 * ownership changes.
1985 if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
1986 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
1987 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
1990 if (VATTR_IS_ACTIVE(vap
, va_uid
) || VATTR_IS_ACTIVE(vap
, va_gid
)) {
1991 is_ownership_change
= 1;
1995 * Make sure that extended security is enabled if we're going to try
1998 if (!vfs_extendedsecurity(vnode_mount(vp
)) &&
1999 (VATTR_IS_ACTIVE(vap
, va_acl
) || VATTR_IS_ACTIVE(vap
, va_uuuid
) || VATTR_IS_ACTIVE(vap
, va_guuid
))) {
2000 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2005 error
= VNOP_SETATTR(vp
, vap
, ctx
);
2007 if ((error
== 0) && !VATTR_ALL_SUPPORTED(vap
))
2008 error
= vnode_setattr_fallback(vp
, vap
, ctx
);
2011 * If we have changed any of the things about the file that are likely
2012 * to result in changes to authorisation results, blow the vnode auth
2015 if (VATTR_IS_SUPPORTED(vap
, va_mode
) ||
2016 VATTR_IS_SUPPORTED(vap
, va_uid
) ||
2017 VATTR_IS_SUPPORTED(vap
, va_gid
) ||
2018 VATTR_IS_SUPPORTED(vap
, va_flags
) ||
2019 VATTR_IS_SUPPORTED(vap
, va_acl
) ||
2020 VATTR_IS_SUPPORTED(vap
, va_uuuid
) ||
2021 VATTR_IS_SUPPORTED(vap
, va_guuid
))
2022 vnode_uncache_credentials(vp
);
2023 // only send a stat_changed event if this is more than
2024 // just an access time update
2025 if (error
== 0 && (vap
->va_active
!= VNODE_ATTR_BIT(va_access_time
))) {
2026 if (need_fsevent(FSE_STAT_CHANGED
, vp
) || (is_ownership_change
&& need_fsevent(FSE_CHOWN
, vp
))) {
2027 if (is_ownership_change
== 0)
2028 add_fsevent(FSE_STAT_CHANGED
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
2030 add_fsevent(FSE_CHOWN
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
2039 * Fallback for setting the attributes on a vnode in a vnode context. This
2040 * Function will attempt to store ACL, UUID, and GUID information utilizing
2041 * a read/modify/write operation against an EA used as a backing store for
2044 * Parameters: vp The vnode whose attributes to set.
2045 * vap A pointer to the attributes to set.
2046 * ctx The vnode context in which the
2047 * operation is to be attempted.
2049 * Returns: 0 Success
2052 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2053 * as are the fsec and lfsec, if they are used.
2055 * The contents of the data area pointed to by 'vap' may be
2056 * modified to indicate that the attribute is supported for
2057 * any given requested attribute.
2059 * XXX: We should enummerate the possible errno values here, and where
2060 * in the code they originated.
2063 vnode_setattr_fallback(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2065 kauth_filesec_t fsec
;
2067 struct kauth_filesec lfsec
;
2073 * Extended security fallback via extended attributes.
2075 * Note that we do not free the filesec; the caller is expected to
2078 if (VATTR_NOT_RETURNED(vap
, va_acl
) ||
2079 VATTR_NOT_RETURNED(vap
, va_uuuid
) ||
2080 VATTR_NOT_RETURNED(vap
, va_guuid
)) {
2081 VFS_DEBUG(ctx
, vp
, "SETATTR - doing filesec fallback");
2084 * Fail for file types that we don't permit extended security
2087 if ((vp
->v_type
!= VDIR
) && (vp
->v_type
!= VLNK
) && (vp
->v_type
!= VREG
)) {
2088 VFS_DEBUG(ctx
, vp
, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp
));
2094 * If we don't have all the extended security items, we need
2095 * to fetch the existing data to perform a read-modify-write
2099 if (!VATTR_IS_ACTIVE(vap
, va_acl
) ||
2100 !VATTR_IS_ACTIVE(vap
, va_uuuid
) ||
2101 !VATTR_IS_ACTIVE(vap
, va_guuid
)) {
2102 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0) {
2103 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error
);
2107 /* if we didn't get a filesec, use our local one */
2109 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2112 KAUTH_DEBUG("SETATTR - updating existing filesec");
2115 facl
= &fsec
->fsec_acl
;
2117 /* if we're using the local filesec, we need to initialise it */
2118 if (fsec
== &lfsec
) {
2119 fsec
->fsec_magic
= KAUTH_FILESEC_MAGIC
;
2120 fsec
->fsec_owner
= kauth_null_guid
;
2121 fsec
->fsec_group
= kauth_null_guid
;
2122 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
2123 facl
->acl_flags
= 0;
2127 * Update with the supplied attributes.
2129 if (VATTR_IS_ACTIVE(vap
, va_uuuid
)) {
2130 KAUTH_DEBUG("SETATTR - updating owner UUID");
2131 fsec
->fsec_owner
= vap
->va_uuuid
;
2132 VATTR_SET_SUPPORTED(vap
, va_uuuid
);
2134 if (VATTR_IS_ACTIVE(vap
, va_guuid
)) {
2135 KAUTH_DEBUG("SETATTR - updating group UUID");
2136 fsec
->fsec_group
= vap
->va_guuid
;
2137 VATTR_SET_SUPPORTED(vap
, va_guuid
);
2139 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
2140 if (vap
->va_acl
== NULL
) {
2141 KAUTH_DEBUG("SETATTR - removing ACL");
2142 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
2144 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap
->va_acl
->acl_entrycount
);
2147 VATTR_SET_SUPPORTED(vap
, va_acl
);
2151 * If the filesec data is all invalid, we can just remove
2152 * the EA completely.
2154 if ((facl
->acl_entrycount
== KAUTH_FILESEC_NOACL
) &&
2155 kauth_guid_equal(&fsec
->fsec_owner
, &kauth_null_guid
) &&
2156 kauth_guid_equal(&fsec
->fsec_group
, &kauth_null_guid
)) {
2157 error
= vn_removexattr(vp
, KAUTH_FILESEC_XATTR
, XATTR_NOSECURITY
, ctx
);
2158 /* no attribute is ok, nothing to delete */
2159 if (error
== ENOATTR
)
2161 VFS_DEBUG(ctx
, vp
, "SETATTR - remove filesec returning %d", error
);
2164 error
= vnode_set_filesec(vp
, fsec
, facl
, ctx
);
2165 VFS_DEBUG(ctx
, vp
, "SETATTR - update filesec returning %d", error
);
2168 /* if we fetched a filesec, dispose of the buffer */
2170 kauth_filesec_free(fsec
);
2178 * Definition of vnode operations.
2184 *#% lookup dvp L ? ?
2185 *#% lookup vpp - L -
2187 struct vnop_lookup_args
{
2188 struct vnodeop_desc
*a_desc
;
2191 struct componentname
*a_cnp
;
2192 vfs_context_t a_context
;
2197 VNOP_LOOKUP(vnode_t dvp
, vnode_t
*vpp
, struct componentname
*cnp
, vfs_context_t context
)
2200 struct vnop_lookup_args a
;
2203 int funnel_state
= 0;
2205 a
.a_desc
= &vnop_lookup_desc
;
2209 a
.a_context
= context
;
2210 thread_safe
= THREAD_SAFE_FS(dvp
);
2212 vnode_cache_credentials(dvp
, context
);
2215 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2219 _err
= (*dvp
->v_op
[vnop_lookup_desc
.vdesc_offset
])(&a
);
2224 if ( (cnp
->cn_flags
& ISLASTCN
) ) {
2225 if ( (cnp
->cn_flags
& LOCKPARENT
) ) {
2226 if ( !(cnp
->cn_flags
& FSNODELOCKHELD
) ) {
2228 * leave the fsnode lock held on
2229 * the directory, but restore the funnel...
2230 * also indicate that we need to drop the
2231 * fsnode_lock when we're done with the
2232 * system call processing for this path
2234 cnp
->cn_flags
|= FSNODELOCKHELD
;
2236 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2241 unlock_fsnode(dvp
, &funnel_state
);
2249 *#% create dvp L L L
2250 *#% create vpp - L -
2254 struct vnop_create_args
{
2255 struct vnodeop_desc
*a_desc
;
2258 struct componentname
*a_cnp
;
2259 struct vnode_attr
*a_vap
;
2260 vfs_context_t a_context
;
2264 VNOP_CREATE(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t context
)
2267 struct vnop_create_args a
;
2269 int funnel_state
= 0;
2271 a
.a_desc
= &vnop_create_desc
;
2276 a
.a_context
= context
;
2277 thread_safe
= THREAD_SAFE_FS(dvp
);
2280 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2284 _err
= (*dvp
->v_op
[vnop_create_desc
.vdesc_offset
])(&a
);
2285 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
2287 * Remove stale Apple Double file (if any).
2289 xattrfile_remove(dvp
, cnp
->cn_nameptr
, context
, thread_safe
, 0);
2292 unlock_fsnode(dvp
, &funnel_state
);
2300 *#% whiteout dvp L L L
2301 *#% whiteout cnp - - -
2302 *#% whiteout flag - - -
2305 struct vnop_whiteout_args
{
2306 struct vnodeop_desc
*a_desc
;
2308 struct componentname
*a_cnp
;
2310 vfs_context_t a_context
;
2314 VNOP_WHITEOUT(vnode_t dvp
, struct componentname
* cnp
, int flags
, vfs_context_t context
)
2317 struct vnop_whiteout_args a
;
2319 int funnel_state
= 0;
2321 a
.a_desc
= &vnop_whiteout_desc
;
2325 a
.a_context
= context
;
2326 thread_safe
= THREAD_SAFE_FS(dvp
);
2329 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2333 _err
= (*dvp
->v_op
[vnop_whiteout_desc
.vdesc_offset
])(&a
);
2335 unlock_fsnode(dvp
, &funnel_state
);
2347 struct vnop_mknod_args
{
2348 struct vnodeop_desc
*a_desc
;
2351 struct componentname
*a_cnp
;
2352 struct vnode_attr
*a_vap
;
2353 vfs_context_t a_context
;
2357 VNOP_MKNOD(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t context
)
2361 struct vnop_mknod_args a
;
2363 int funnel_state
= 0;
2365 a
.a_desc
= &vnop_mknod_desc
;
2370 a
.a_context
= context
;
2371 thread_safe
= THREAD_SAFE_FS(dvp
);
2374 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2378 _err
= (*dvp
->v_op
[vnop_mknod_desc
.vdesc_offset
])(&a
);
2380 unlock_fsnode(dvp
, &funnel_state
);
2391 struct vnop_open_args
{
2392 struct vnodeop_desc
*a_desc
;
2395 vfs_context_t a_context
;
2399 VNOP_OPEN(vnode_t vp
, int mode
, vfs_context_t context
)
2402 struct vnop_open_args a
;
2404 int funnel_state
= 0;
2405 struct vfs_context acontext
;
2407 if (context
== NULL
) {
2408 acontext
.vc_proc
= current_proc();
2409 acontext
.vc_ucred
= kauth_cred_get();
2410 context
= &acontext
;
2412 a
.a_desc
= &vnop_open_desc
;
2415 a
.a_context
= context
;
2416 thread_safe
= THREAD_SAFE_FS(vp
);
2419 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2420 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2421 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2422 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2427 _err
= (*vp
->v_op
[vnop_open_desc
.vdesc_offset
])(&a
);
2429 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2430 unlock_fsnode(vp
, NULL
);
2432 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2443 struct vnop_close_args
{
2444 struct vnodeop_desc
*a_desc
;
2447 vfs_context_t a_context
;
2451 VNOP_CLOSE(vnode_t vp
, int fflag
, vfs_context_t context
)
2454 struct vnop_close_args a
;
2456 int funnel_state
= 0;
2457 struct vfs_context acontext
;
2459 if (context
== NULL
) {
2460 acontext
.vc_proc
= current_proc();
2461 acontext
.vc_ucred
= kauth_cred_get();
2462 context
= &acontext
;
2464 a
.a_desc
= &vnop_close_desc
;
2467 a
.a_context
= context
;
2468 thread_safe
= THREAD_SAFE_FS(vp
);
2471 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2472 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2473 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2474 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2479 _err
= (*vp
->v_op
[vnop_close_desc
.vdesc_offset
])(&a
);
2481 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2482 unlock_fsnode(vp
, NULL
);
2484 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2495 struct vnop_access_args
{
2496 struct vnodeop_desc
*a_desc
;
2499 vfs_context_t a_context
;
2503 VNOP_ACCESS(vnode_t vp
, int action
, vfs_context_t context
)
2506 struct vnop_access_args a
;
2508 int funnel_state
= 0;
2509 struct vfs_context acontext
;
2511 if (context
== NULL
) {
2512 acontext
.vc_proc
= current_proc();
2513 acontext
.vc_ucred
= kauth_cred_get();
2514 context
= &acontext
;
2516 a
.a_desc
= &vnop_access_desc
;
2518 a
.a_action
= action
;
2519 a
.a_context
= context
;
2520 thread_safe
= THREAD_SAFE_FS(vp
);
2523 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2527 _err
= (*vp
->v_op
[vnop_access_desc
.vdesc_offset
])(&a
);
2529 unlock_fsnode(vp
, &funnel_state
);
2537 *#% getattr vp = = =
2540 struct vnop_getattr_args
{
2541 struct vnodeop_desc
*a_desc
;
2543 struct vnode_attr
*a_vap
;
2544 vfs_context_t a_context
;
2548 VNOP_GETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t context
)
2551 struct vnop_getattr_args a
;
2555 a
.a_desc
= &vnop_getattr_desc
;
2558 a
.a_context
= context
;
2559 thread_safe
= THREAD_SAFE_FS(vp
);
2562 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2566 _err
= (*vp
->v_op
[vnop_getattr_desc
.vdesc_offset
])(&a
);
2568 unlock_fsnode(vp
, &funnel_state
);
2576 *#% setattr vp L L L
2579 struct vnop_setattr_args
{
2580 struct vnodeop_desc
*a_desc
;
2582 struct vnode_attr
*a_vap
;
2583 vfs_context_t a_context
;
2587 VNOP_SETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t context
)
2590 struct vnop_setattr_args a
;
2594 a
.a_desc
= &vnop_setattr_desc
;
2597 a
.a_context
= context
;
2598 thread_safe
= THREAD_SAFE_FS(vp
);
2601 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2605 _err
= (*vp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
2608 * Shadow uid/gid/mod change to extended attibute file.
2610 if (_err
== 0 && !NATIVE_XATTR(vp
)) {
2611 struct vnode_attr va
;
2615 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
2616 VATTR_SET(&va
, va_uid
, vap
->va_uid
);
2619 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
2620 VATTR_SET(&va
, va_gid
, vap
->va_gid
);
2623 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
2624 VATTR_SET(&va
, va_mode
, vap
->va_mode
);
2631 dvp
= vnode_getparent(vp
);
2632 vname
= vnode_getname(vp
);
2634 xattrfile_setattr(dvp
, vname
, &va
, context
, thread_safe
);
2638 vnode_putname(vname
);
2642 unlock_fsnode(vp
, &funnel_state
);
2650 *#% getattrlist vp = = =
2653 struct vnop_getattrlist_args
{
2654 struct vnodeop_desc
*a_desc
;
2656 struct attrlist
*a_alist
;
2659 vfs_context_t a_context
;
2663 VNOP_GETATTRLIST(vnode_t vp
, struct attrlist
* alist
, struct uio
* uio
, int options
, vfs_context_t context
)
2666 struct vnop_getattrlist_args a
;
2668 int funnel_state
= 0;
2670 a
.a_desc
= &vnop_getattrlist_desc
;
2674 a
.a_options
= options
;
2675 a
.a_context
= context
;
2676 thread_safe
= THREAD_SAFE_FS(vp
);
2679 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2683 _err
= (*vp
->v_op
[vnop_getattrlist_desc
.vdesc_offset
])(&a
);
2685 unlock_fsnode(vp
, &funnel_state
);
2693 *#% setattrlist vp L L L
2696 struct vnop_setattrlist_args
{
2697 struct vnodeop_desc
*a_desc
;
2699 struct attrlist
*a_alist
;
2702 vfs_context_t a_context
;
2706 VNOP_SETATTRLIST(vnode_t vp
, struct attrlist
* alist
, struct uio
* uio
, int options
, vfs_context_t context
)
2709 struct vnop_setattrlist_args a
;
2711 int funnel_state
= 0;
2713 a
.a_desc
= &vnop_setattrlist_desc
;
2717 a
.a_options
= options
;
2718 a
.a_context
= context
;
2719 thread_safe
= THREAD_SAFE_FS(vp
);
2722 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2726 _err
= (*vp
->v_op
[vnop_setattrlist_desc
.vdesc_offset
])(&a
);
2728 vnode_uncache_credentials(vp
);
2731 unlock_fsnode(vp
, &funnel_state
);
2743 struct vnop_read_args
{
2744 struct vnodeop_desc
*a_desc
;
2748 vfs_context_t a_context
;
2752 VNOP_READ(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t context
)
2755 struct vnop_read_args a
;
2757 int funnel_state
= 0;
2758 struct vfs_context acontext
;
2760 if (context
== NULL
) {
2761 acontext
.vc_proc
= current_proc();
2762 acontext
.vc_ucred
= kauth_cred_get();
2763 context
= &acontext
;
2766 a
.a_desc
= &vnop_read_desc
;
2769 a
.a_ioflag
= ioflag
;
2770 a
.a_context
= context
;
2771 thread_safe
= THREAD_SAFE_FS(vp
);
2774 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2775 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2776 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2777 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2782 _err
= (*vp
->v_op
[vnop_read_desc
.vdesc_offset
])(&a
);
2785 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2786 unlock_fsnode(vp
, NULL
);
2788 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2800 struct vnop_write_args
{
2801 struct vnodeop_desc
*a_desc
;
2805 vfs_context_t a_context
;
2809 VNOP_WRITE(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t context
)
2811 struct vnop_write_args a
;
2814 int funnel_state
= 0;
2815 struct vfs_context acontext
;
2817 if (context
== NULL
) {
2818 acontext
.vc_proc
= current_proc();
2819 acontext
.vc_ucred
= kauth_cred_get();
2820 context
= &acontext
;
2823 a
.a_desc
= &vnop_write_desc
;
2826 a
.a_ioflag
= ioflag
;
2827 a
.a_context
= context
;
2828 thread_safe
= THREAD_SAFE_FS(vp
);
2831 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2832 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2833 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2834 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2839 _err
= (*vp
->v_op
[vnop_write_desc
.vdesc_offset
])(&a
);
2842 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2843 unlock_fsnode(vp
, NULL
);
2845 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2857 struct vnop_ioctl_args
{
2858 struct vnodeop_desc
*a_desc
;
2863 vfs_context_t a_context
;
2867 VNOP_IOCTL(vnode_t vp
, u_long command
, caddr_t data
, int fflag
, vfs_context_t context
)
2870 struct vnop_ioctl_args a
;
2872 int funnel_state
= 0;
2873 struct vfs_context acontext
;
2875 if (context
== NULL
) {
2876 acontext
.vc_proc
= current_proc();
2877 acontext
.vc_ucred
= kauth_cred_get();
2878 context
= &acontext
;
2881 if (vfs_context_is64bit(context
)) {
2882 if (!vnode_vfs64bitready(vp
)) {
2887 a
.a_desc
= &vnop_ioctl_desc
;
2889 a
.a_command
= command
;
2892 a
.a_context
= context
;
2893 thread_safe
= THREAD_SAFE_FS(vp
);
2896 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2897 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2898 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2899 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2904 _err
= (*vp
->v_op
[vnop_ioctl_desc
.vdesc_offset
])(&a
);
2906 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2907 unlock_fsnode(vp
, NULL
);
2909 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2921 struct vnop_select_args
{
2922 struct vnodeop_desc
*a_desc
;
2927 vfs_context_t a_context
;
2931 VNOP_SELECT(vnode_t vp
, int which
, int fflags
, void * wql
, vfs_context_t context
)
2934 struct vnop_select_args a
;
2936 int funnel_state
= 0;
2937 struct vfs_context acontext
;
2939 if (context
== NULL
) {
2940 acontext
.vc_proc
= current_proc();
2941 acontext
.vc_ucred
= kauth_cred_get();
2942 context
= &acontext
;
2944 a
.a_desc
= &vnop_select_desc
;
2947 a
.a_fflags
= fflags
;
2948 a
.a_context
= context
;
2950 thread_safe
= THREAD_SAFE_FS(vp
);
2953 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2954 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2955 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2956 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2961 _err
= (*vp
->v_op
[vnop_select_desc
.vdesc_offset
])(&a
);
2963 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2964 unlock_fsnode(vp
, NULL
);
2966 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2975 *#% exchange fvp L L L
2976 *#% exchange tvp L L L
2979 struct vnop_exchange_args
{
2980 struct vnodeop_desc
*a_desc
;
2984 vfs_context_t a_context
;
2988 VNOP_EXCHANGE(vnode_t fvp
, vnode_t tvp
, int options
, vfs_context_t context
)
2991 struct vnop_exchange_args a
;
2993 int funnel_state
= 0;
2994 vnode_t lock_first
= NULL
, lock_second
= NULL
;
2996 a
.a_desc
= &vnop_exchange_desc
;
2999 a
.a_options
= options
;
3000 a
.a_context
= context
;
3001 thread_safe
= THREAD_SAFE_FS(fvp
);
3005 * Lock in vnode address order to avoid deadlocks
3014 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) ) {
3017 if ( (_err
= lock_fsnode(lock_second
, NULL
)) ) {
3018 unlock_fsnode(lock_first
, &funnel_state
);
3022 _err
= (*fvp
->v_op
[vnop_exchange_desc
.vdesc_offset
])(&a
);
3024 unlock_fsnode(lock_second
, NULL
);
3025 unlock_fsnode(lock_first
, &funnel_state
);
3037 struct vnop_revoke_args
{
3038 struct vnodeop_desc
*a_desc
;
3041 vfs_context_t a_context
;
3045 VNOP_REVOKE(vnode_t vp
, int flags
, vfs_context_t context
)
3047 struct vnop_revoke_args a
;
3050 int funnel_state
= 0;
3052 a
.a_desc
= &vnop_revoke_desc
;
3055 a
.a_context
= context
;
3056 thread_safe
= THREAD_SAFE_FS(vp
);
3059 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3061 _err
= (*vp
->v_op
[vnop_revoke_desc
.vdesc_offset
])(&a
);
3063 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3075 struct vnop_mmap_args
{
3076 struct vnodeop_desc
*a_desc
;
3079 vfs_context_t a_context
;
3083 VNOP_MMAP(vnode_t vp
, int fflags
, vfs_context_t context
)
3086 struct vnop_mmap_args a
;
3088 int funnel_state
= 0;
3090 a
.a_desc
= &vnop_mmap_desc
;
3092 a
.a_fflags
= fflags
;
3093 a
.a_context
= context
;
3094 thread_safe
= THREAD_SAFE_FS(vp
);
3097 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3101 _err
= (*vp
->v_op
[vnop_mmap_desc
.vdesc_offset
])(&a
);
3103 unlock_fsnode(vp
, &funnel_state
);
3112 *# mnomap - vp U U U
3115 struct vnop_mnomap_args
{
3116 struct vnodeop_desc
*a_desc
;
3118 vfs_context_t a_context
;
3122 VNOP_MNOMAP(vnode_t vp
, vfs_context_t context
)
3125 struct vnop_mnomap_args a
;
3127 int funnel_state
= 0;
3129 a
.a_desc
= &vnop_mnomap_desc
;
3131 a
.a_context
= context
;
3132 thread_safe
= THREAD_SAFE_FS(vp
);
3135 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3139 _err
= (*vp
->v_op
[vnop_mnomap_desc
.vdesc_offset
])(&a
);
3141 unlock_fsnode(vp
, &funnel_state
);
3153 struct vnop_fsync_args
{
3154 struct vnodeop_desc
*a_desc
;
3157 vfs_context_t a_context
;
3161 VNOP_FSYNC(vnode_t vp
, int waitfor
, vfs_context_t context
)
3163 struct vnop_fsync_args a
;
3166 int funnel_state
= 0;
3168 a
.a_desc
= &vnop_fsync_desc
;
3170 a
.a_waitfor
= waitfor
;
3171 a
.a_context
= context
;
3172 thread_safe
= THREAD_SAFE_FS(vp
);
3175 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3179 _err
= (*vp
->v_op
[vnop_fsync_desc
.vdesc_offset
])(&a
);
3181 unlock_fsnode(vp
, &funnel_state
);
3190 *#% remove dvp L U U
3194 struct vnop_remove_args
{
3195 struct vnodeop_desc
*a_desc
;
3198 struct componentname
*a_cnp
;
3200 vfs_context_t a_context
;
3204 VNOP_REMOVE(vnode_t dvp
, vnode_t vp
, struct componentname
* cnp
, int flags
, vfs_context_t context
)
3207 struct vnop_remove_args a
;
3209 int funnel_state
= 0;
3211 a
.a_desc
= &vnop_remove_desc
;
3216 a
.a_context
= context
;
3217 thread_safe
= THREAD_SAFE_FS(dvp
);
3220 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3224 _err
= (*dvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&a
);
3227 vnode_setneedinactive(vp
);
3229 if ( !(NATIVE_XATTR(dvp
)) ) {
3231 * Remove any associated extended attibute file (._ AppleDouble file).
3233 xattrfile_remove(dvp
, cnp
->cn_nameptr
, context
, thread_safe
, 1);
3237 unlock_fsnode(vp
, &funnel_state
);
3250 struct vnop_link_args
{
3251 struct vnodeop_desc
*a_desc
;
3254 struct componentname
*a_cnp
;
3255 vfs_context_t a_context
;
3259 VNOP_LINK(vnode_t vp
, vnode_t tdvp
, struct componentname
* cnp
, vfs_context_t context
)
3262 struct vnop_link_args a
;
3264 int funnel_state
= 0;
3267 * For file systems with non-native extended attributes,
3268 * disallow linking to an existing "._" Apple Double file.
3270 if ( !NATIVE_XATTR(tdvp
) && (vp
->v_type
== VREG
)) {
3273 vname
= vnode_getname(vp
);
3274 if (vname
!= NULL
) {
3276 if (vname
[0] == '.' && vname
[1] == '_' && vname
[2] != '\0') {
3279 vnode_putname(vname
);
3284 a
.a_desc
= &vnop_link_desc
;
3288 a
.a_context
= context
;
3289 thread_safe
= THREAD_SAFE_FS(vp
);
3292 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3296 _err
= (*tdvp
->v_op
[vnop_link_desc
.vdesc_offset
])(&a
);
3298 unlock_fsnode(vp
, &funnel_state
);
3307 *#% rename fdvp U U U
3308 *#% rename fvp U U U
3309 *#% rename tdvp L U U
3310 *#% rename tvp X U U
3313 struct vnop_rename_args
{
3314 struct vnodeop_desc
*a_desc
;
3317 struct componentname
*a_fcnp
;
3320 struct componentname
*a_tcnp
;
3321 vfs_context_t a_context
;
3325 VNOP_RENAME(struct vnode
*fdvp
, struct vnode
*fvp
, struct componentname
*fcnp
,
3326 struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
3327 vfs_context_t context
)
3330 struct vnop_rename_args a
;
3331 int funnel_state
= 0;
3332 char smallname1
[48];
3333 char smallname2
[48];
3334 char *xfromname
= NULL
;
3335 char *xtoname
= NULL
;
3336 vnode_t lock_first
= NULL
, lock_second
= NULL
;
3337 vnode_t fdvp_unsafe
= NULLVP
;
3338 vnode_t tdvp_unsafe
= NULLVP
;
3340 a
.a_desc
= &vnop_rename_desc
;
3347 a
.a_context
= context
;
3349 if (!THREAD_SAFE_FS(fdvp
))
3351 if (!THREAD_SAFE_FS(tdvp
))
3354 if (fdvp_unsafe
!= NULLVP
) {
3356 * Lock parents in vnode address order to avoid deadlocks
3357 * note that it's possible for the fdvp to be unsafe,
3358 * but the tdvp to be safe because tvp could be a directory
3359 * in the root of a filesystem... in that case, tdvp is the
3360 * in the filesystem that this root is mounted on
3362 if (tdvp_unsafe
== NULL
|| fdvp_unsafe
== tdvp_unsafe
) {
3363 lock_first
= fdvp_unsafe
;
3365 } else if (fdvp_unsafe
< tdvp_unsafe
) {
3366 lock_first
= fdvp_unsafe
;
3367 lock_second
= tdvp_unsafe
;
3369 lock_first
= tdvp_unsafe
;
3370 lock_second
= fdvp_unsafe
;
3372 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) )
3375 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
3376 unlock_fsnode(lock_first
, &funnel_state
);
3381 * Lock both children in vnode address order to avoid deadlocks
3383 if (tvp
== NULL
|| tvp
== fvp
) {
3386 } else if (fvp
< tvp
) {
3393 if ( (_err
= lock_fsnode(lock_first
, NULL
)) )
3396 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
3397 unlock_fsnode(lock_first
, NULL
);
3402 * Save source and destination names (._ AppleDouble files).
3403 * Skip if source already has a "._" prefix.
3405 if (!NATIVE_XATTR(fdvp
) &&
3406 !(fcnp
->cn_nameptr
[0] == '.' && fcnp
->cn_nameptr
[1] == '_')) {
3409 /* Get source attribute file name. */
3410 len
= fcnp
->cn_namelen
+ 3;
3411 if (len
> sizeof(smallname1
)) {
3412 MALLOC(xfromname
, char *, len
, M_TEMP
, M_WAITOK
);
3414 xfromname
= &smallname1
[0];
3416 strcpy(xfromname
, "._");
3417 strncat(xfromname
, fcnp
->cn_nameptr
, fcnp
->cn_namelen
);
3418 xfromname
[len
-1] = '\0';
3420 /* Get destination attribute file name. */
3421 len
= tcnp
->cn_namelen
+ 3;
3422 if (len
> sizeof(smallname2
)) {
3423 MALLOC(xtoname
, char *, len
, M_TEMP
, M_WAITOK
);
3425 xtoname
= &smallname2
[0];
3427 strcpy(xtoname
, "._");
3428 strncat(xtoname
, tcnp
->cn_nameptr
, tcnp
->cn_namelen
);
3429 xtoname
[len
-1] = '\0';
3432 _err
= (*fdvp
->v_op
[vnop_rename_desc
.vdesc_offset
])(&a
);
3434 if (fdvp_unsafe
!= NULLVP
) {
3435 if (lock_second
!= NULL
)
3436 unlock_fsnode(lock_second
, NULL
);
3437 unlock_fsnode(lock_first
, NULL
);
3440 if (tvp
&& tvp
!= fvp
)
3441 vnode_setneedinactive(tvp
);
3445 * Rename any associated extended attibute file (._ AppleDouble file).
3447 if (_err
== 0 && !NATIVE_XATTR(fdvp
) && xfromname
!= NULL
) {
3448 struct nameidata fromnd
, tond
;
3453 * Get source attribute file vnode.
3454 * Note that fdvp already has an iocount reference and
3455 * using DELETE will take an additional reference.
3457 NDINIT(&fromnd
, DELETE
, NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3458 CAST_USER_ADDR_T(xfromname
), context
);
3459 fromnd
.ni_dvp
= fdvp
;
3460 error
= namei(&fromnd
);
3463 /* When source doesn't exist there still may be a destination. */
3464 if (error
== ENOENT
) {
3469 } else if (fromnd
.ni_vp
->v_type
!= VREG
) {
3470 vnode_put(fromnd
.ni_vp
);
3475 struct vnop_remove_args args
;
3478 * Get destination attribute file vnode.
3479 * Note that tdvp already has an iocount reference.
3481 NDINIT(&tond
, DELETE
, NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3482 CAST_USER_ADDR_T(xtoname
), context
);
3484 error
= namei(&tond
);
3488 if (tond
.ni_vp
->v_type
!= VREG
) {
3489 vnode_put(tond
.ni_vp
);
3493 args
.a_desc
= &vnop_remove_desc
;
3495 args
.a_vp
= tond
.ni_vp
;
3496 args
.a_cnp
= &tond
.ni_cnd
;
3497 args
.a_context
= context
;
3499 if (fdvp_unsafe
!= NULLVP
)
3500 error
= lock_fsnode(tond
.ni_vp
, NULL
);
3502 error
= (*tdvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&args
);
3504 if (fdvp_unsafe
!= NULLVP
)
3505 unlock_fsnode(tond
.ni_vp
, NULL
);
3508 vnode_setneedinactive(tond
.ni_vp
);
3510 vnode_put(tond
.ni_vp
);
3516 * Get destination attribute file vnode.
3518 NDINIT(&tond
, RENAME
,
3519 NOCACHE
| NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3520 CAST_USER_ADDR_T(xtoname
), context
);
3522 error
= namei(&tond
);
3525 vnode_put(fromnd
.ni_vp
);
3529 a
.a_desc
= &vnop_rename_desc
;
3531 a
.a_fvp
= fromnd
.ni_vp
;
3532 a
.a_fcnp
= &fromnd
.ni_cnd
;
3534 a
.a_tvp
= tond
.ni_vp
;
3535 a
.a_tcnp
= &tond
.ni_cnd
;
3536 a
.a_context
= context
;
3538 if (fdvp_unsafe
!= NULLVP
) {
3540 * Lock in vnode address order to avoid deadlocks
3542 if (tond
.ni_vp
== NULL
|| tond
.ni_vp
== fromnd
.ni_vp
) {
3543 lock_first
= fromnd
.ni_vp
;
3545 } else if (fromnd
.ni_vp
< tond
.ni_vp
) {
3546 lock_first
= fromnd
.ni_vp
;
3547 lock_second
= tond
.ni_vp
;
3549 lock_first
= tond
.ni_vp
;
3550 lock_second
= fromnd
.ni_vp
;
3552 if ( (error
= lock_fsnode(lock_first
, NULL
)) == 0) {
3553 if (lock_second
!= NULL
&& (error
= lock_fsnode(lock_second
, NULL
)) )
3554 unlock_fsnode(lock_first
, NULL
);
3558 error
= (*fdvp
->v_op
[vnop_rename_desc
.vdesc_offset
])(&a
);
3560 if (fdvp_unsafe
!= NULLVP
) {
3561 if (lock_second
!= NULL
)
3562 unlock_fsnode(lock_second
, NULL
);
3563 unlock_fsnode(lock_first
, NULL
);
3566 vnode_setneedinactive(fromnd
.ni_vp
);
3568 if (tond
.ni_vp
&& tond
.ni_vp
!= fromnd
.ni_vp
)
3569 vnode_setneedinactive(tond
.ni_vp
);
3572 vnode_put(fromnd
.ni_vp
);
3574 vnode_put(tond
.ni_vp
);
3580 if (xfromname
&& xfromname
!= &smallname1
[0]) {
3581 FREE(xfromname
, M_TEMP
);
3583 if (xtoname
&& xtoname
!= &smallname2
[0]) {
3584 FREE(xtoname
, M_TEMP
);
3587 if (fdvp_unsafe
!= NULLVP
) {
3588 if (tdvp_unsafe
!= NULLVP
)
3589 unlock_fsnode(tdvp_unsafe
, NULL
);
3590 unlock_fsnode(fdvp_unsafe
, &funnel_state
);
3602 struct vnop_mkdir_args
{
3603 struct vnodeop_desc
*a_desc
;
3606 struct componentname
*a_cnp
;
3607 struct vnode_attr
*a_vap
;
3608 vfs_context_t a_context
;
3612 VNOP_MKDIR(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
3613 struct vnode_attr
*vap
, vfs_context_t context
)
3616 struct vnop_mkdir_args a
;
3618 int funnel_state
= 0;
3620 a
.a_desc
= &vnop_mkdir_desc
;
3625 a
.a_context
= context
;
3626 thread_safe
= THREAD_SAFE_FS(dvp
);
3629 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3633 _err
= (*dvp
->v_op
[vnop_mkdir_desc
.vdesc_offset
])(&a
);
3634 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
3636 * Remove stale Apple Double file (if any).
3638 xattrfile_remove(dvp
, cnp
->cn_nameptr
, context
, thread_safe
, 0);
3641 unlock_fsnode(dvp
, &funnel_state
);
3654 struct vnop_rmdir_args
{
3655 struct vnodeop_desc
*a_desc
;
3658 struct componentname
*a_cnp
;
3659 vfs_context_t a_context
;
3664 VNOP_RMDIR(struct vnode
*dvp
, struct vnode
*vp
, struct componentname
*cnp
, vfs_context_t context
)
3667 struct vnop_rmdir_args a
;
3669 int funnel_state
= 0;
3671 a
.a_desc
= &vnop_rmdir_desc
;
3675 a
.a_context
= context
;
3676 thread_safe
= THREAD_SAFE_FS(dvp
);
3679 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3683 _err
= (*vp
->v_op
[vnop_rmdir_desc
.vdesc_offset
])(&a
);
3686 vnode_setneedinactive(vp
);
3688 if ( !(NATIVE_XATTR(dvp
)) ) {
3690 * Remove any associated extended attibute file (._ AppleDouble file).
3692 xattrfile_remove(dvp
, cnp
->cn_nameptr
, context
, thread_safe
, 1);
3696 unlock_fsnode(vp
, &funnel_state
);
3702 * Remove a ._ AppleDouble file
3704 #define AD_STALE_SECS (180)
3706 xattrfile_remove(vnode_t dvp
, const char * basename
, vfs_context_t context
, int thread_safe
, int force
) {
3708 struct nameidata nd
;
3710 char *filename
= NULL
;
3713 if ((basename
== NULL
) || (basename
[0] == '\0') ||
3714 (basename
[0] == '.' && basename
[1] == '_')) {
3717 filename
= &smallname
[0];
3718 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
3719 if (len
>= sizeof(smallname
)) {
3720 len
++; /* snprintf result doesn't include '\0' */
3721 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
3722 len
= snprintf(filename
, len
, "._%s", basename
);
3724 NDINIT(&nd
, DELETE
, LOCKLEAF
| NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3725 CAST_USER_ADDR_T(filename
), context
);
3727 if (namei(&nd
) != 0)
3732 if (xvp
->v_type
!= VREG
)
3736 * When creating a new object and a "._" file already
3737 * exists, check to see if its a stale "._" file.
3741 struct vnode_attr va
;
3744 VATTR_WANTED(&va
, va_data_size
);
3745 VATTR_WANTED(&va
, va_modify_time
);
3746 if (VNOP_GETATTR(xvp
, &va
, context
) == 0 &&
3747 VATTR_IS_SUPPORTED(&va
, va_data_size
) &&
3748 VATTR_IS_SUPPORTED(&va
, va_modify_time
) &&
3749 va
.va_data_size
!= 0) {
3753 if ((tv
.tv_sec
> va
.va_modify_time
.tv_sec
) &&
3754 (tv
.tv_sec
- va
.va_modify_time
.tv_sec
) > AD_STALE_SECS
) {
3755 force
= 1; /* must be stale */
3760 struct vnop_remove_args a
;
3763 a
.a_desc
= &vnop_remove_desc
;
3764 a
.a_dvp
= nd
.ni_dvp
;
3766 a
.a_cnp
= &nd
.ni_cnd
;
3767 a
.a_context
= context
;
3770 if ( (lock_fsnode(xvp
, NULL
)) )
3773 error
= (*dvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&a
);
3776 unlock_fsnode(xvp
, NULL
);
3779 vnode_setneedinactive(xvp
);
3782 /* Note: nd.ni_dvp's iocount is dropped by caller of VNOP_XXXX */
3785 if (filename
&& filename
!= &smallname
[0]) {
3786 FREE(filename
, M_TEMP
);
3791 * Shadow uid/gid/mod to a ._ AppleDouble file
3794 xattrfile_setattr(vnode_t dvp
, const char * basename
, struct vnode_attr
* vap
,
3795 vfs_context_t context
, int thread_safe
) {
3797 struct nameidata nd
;
3799 char *filename
= NULL
;
3802 if ((dvp
== NULLVP
) ||
3803 (basename
== NULL
) || (basename
[0] == '\0') ||
3804 (basename
[0] == '.' && basename
[1] == '_')) {
3807 filename
= &smallname
[0];
3808 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
3809 if (len
>= sizeof(smallname
)) {
3810 len
++; /* snprintf result doesn't include '\0' */
3811 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
3812 len
= snprintf(filename
, len
, "._%s", basename
);
3814 NDINIT(&nd
, LOOKUP
, NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3815 CAST_USER_ADDR_T(filename
), context
);
3817 if (namei(&nd
) != 0)
3823 if (xvp
->v_type
== VREG
) {
3824 struct vnop_setattr_args a
;
3826 a
.a_desc
= &vnop_setattr_desc
;
3829 a
.a_context
= context
;
3832 if ( (lock_fsnode(xvp
, NULL
)) )
3835 (void) (*xvp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
3837 unlock_fsnode(xvp
, NULL
);
3843 if (filename
&& filename
!= &smallname
[0]) {
3844 FREE(filename
, M_TEMP
);
3851 *#% symlink dvp L U U
3852 *#% symlink vpp - U -
3855 struct vnop_symlink_args
{
3856 struct vnodeop_desc
*a_desc
;
3859 struct componentname
*a_cnp
;
3860 struct vnode_attr
*a_vap
;
3862 vfs_context_t a_context
;
3867 VNOP_SYMLINK(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
3868 struct vnode_attr
*vap
, char *target
, vfs_context_t context
)
3871 struct vnop_symlink_args a
;
3873 int funnel_state
= 0;
3875 a
.a_desc
= &vnop_symlink_desc
;
3880 a
.a_target
= target
;
3881 a
.a_context
= context
;
3882 thread_safe
= THREAD_SAFE_FS(dvp
);
3885 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3889 _err
= (*dvp
->v_op
[vnop_symlink_desc
.vdesc_offset
])(&a
);
3890 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
3892 * Remove stale Apple Double file (if any).
3894 xattrfile_remove(dvp
, cnp
->cn_nameptr
, context
, thread_safe
, 0);
3897 unlock_fsnode(dvp
, &funnel_state
);
3905 *#% readdir vp L L L
3908 struct vnop_readdir_args
{
3909 struct vnodeop_desc
*a_desc
;
3915 vfs_context_t a_context
;
3920 VNOP_READDIR(struct vnode
*vp
, struct uio
*uio
, int flags
, int *eofflag
,
3921 int *numdirent
, vfs_context_t context
)
3924 struct vnop_readdir_args a
;
3926 int funnel_state
= 0;
3928 a
.a_desc
= &vnop_readdir_desc
;
3932 a
.a_eofflag
= eofflag
;
3933 a
.a_numdirent
= numdirent
;
3934 a
.a_context
= context
;
3935 thread_safe
= THREAD_SAFE_FS(vp
);
3938 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3942 _err
= (*vp
->v_op
[vnop_readdir_desc
.vdesc_offset
])(&a
);
3944 unlock_fsnode(vp
, &funnel_state
);
3952 *#% readdirattr vp L L L
3955 struct vnop_readdirattr_args
{
3956 struct vnodeop_desc
*a_desc
;
3958 struct attrlist
*a_alist
;
3964 u_long
*a_actualcount
;
3965 vfs_context_t a_context
;
3970 VNOP_READDIRATTR(struct vnode
*vp
, struct attrlist
*alist
, struct uio
*uio
, u_long maxcount
,
3971 u_long options
, u_long
*newstate
, int *eofflag
, u_long
*actualcount
, vfs_context_t context
)
3974 struct vnop_readdirattr_args a
;
3976 int funnel_state
= 0;
3978 a
.a_desc
= &vnop_readdirattr_desc
;
3982 a
.a_maxcount
= maxcount
;
3983 a
.a_options
= options
;
3984 a
.a_newstate
= newstate
;
3985 a
.a_eofflag
= eofflag
;
3986 a
.a_actualcount
= actualcount
;
3987 a
.a_context
= context
;
3988 thread_safe
= THREAD_SAFE_FS(vp
);
3991 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3995 _err
= (*vp
->v_op
[vnop_readdirattr_desc
.vdesc_offset
])(&a
);
3997 unlock_fsnode(vp
, &funnel_state
);
4005 *#% readlink vp L L L
4008 struct vnop_readlink_args
{
4009 struct vnodeop_desc
*a_desc
;
4012 vfs_context_t a_context
;
4017 VNOP_READLINK(struct vnode
*vp
, struct uio
*uio
, vfs_context_t context
)
4020 struct vnop_readlink_args a
;
4022 int funnel_state
= 0;
4024 a
.a_desc
= &vnop_readlink_desc
;
4027 a
.a_context
= context
;
4028 thread_safe
= THREAD_SAFE_FS(vp
);
4031 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4035 _err
= (*vp
->v_op
[vnop_readlink_desc
.vdesc_offset
])(&a
);
4037 unlock_fsnode(vp
, &funnel_state
);
4045 *#% inactive vp L U U
4048 struct vnop_inactive_args
{
4049 struct vnodeop_desc
*a_desc
;
4051 vfs_context_t a_context
;
4055 VNOP_INACTIVE(struct vnode
*vp
, vfs_context_t context
)
4058 struct vnop_inactive_args a
;
4060 int funnel_state
= 0;
4062 a
.a_desc
= &vnop_inactive_desc
;
4064 a
.a_context
= context
;
4065 thread_safe
= THREAD_SAFE_FS(vp
);
4068 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4072 _err
= (*vp
->v_op
[vnop_inactive_desc
.vdesc_offset
])(&a
);
4074 unlock_fsnode(vp
, &funnel_state
);
4083 *#% reclaim vp U U U
4086 struct vnop_reclaim_args
{
4087 struct vnodeop_desc
*a_desc
;
4089 vfs_context_t a_context
;
4093 VNOP_RECLAIM(struct vnode
*vp
, vfs_context_t context
)
4096 struct vnop_reclaim_args a
;
4098 int funnel_state
= 0;
4100 a
.a_desc
= &vnop_reclaim_desc
;
4102 a
.a_context
= context
;
4103 thread_safe
= THREAD_SAFE_FS(vp
);
4106 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4108 _err
= (*vp
->v_op
[vnop_reclaim_desc
.vdesc_offset
])(&a
);
4110 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4119 *#% pathconf vp L L L
4122 struct vnop_pathconf_args
{
4123 struct vnodeop_desc
*a_desc
;
4126 register_t
*a_retval
;
4127 vfs_context_t a_context
;
4131 VNOP_PATHCONF(struct vnode
*vp
, int name
, register_t
*retval
, vfs_context_t context
)
4134 struct vnop_pathconf_args a
;
4136 int funnel_state
= 0;
4138 a
.a_desc
= &vnop_pathconf_desc
;
4141 a
.a_retval
= retval
;
4142 a
.a_context
= context
;
4143 thread_safe
= THREAD_SAFE_FS(vp
);
4146 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4150 _err
= (*vp
->v_op
[vnop_pathconf_desc
.vdesc_offset
])(&a
);
4152 unlock_fsnode(vp
, &funnel_state
);
4160 *#% advlock vp U U U
4163 struct vnop_advlock_args
{
4164 struct vnodeop_desc
*a_desc
;
4170 vfs_context_t a_context
;
4174 VNOP_ADVLOCK(struct vnode
*vp
, caddr_t id
, int op
, struct flock
*fl
, int flags
, vfs_context_t context
)
4177 struct vnop_advlock_args a
;
4179 int funnel_state
= 0;
4180 struct uthread
* uth
;
4182 a
.a_desc
= &vnop_advlock_desc
;
4188 a
.a_context
= context
;
4189 thread_safe
= THREAD_SAFE_FS(vp
);
4191 uth
= get_bsdthread_info(current_thread());
4193 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4195 /* Disallow advisory locking on non-seekable vnodes */
4196 if (vnode_isfifo(vp
)) {
4197 _err
= err_advlock(&a
);
4199 if ((vp
->v_flag
& VLOCKLOCAL
)) {
4200 /* Advisory locking done at this layer */
4201 _err
= lf_advlock(&a
);
4203 /* Advisory locking done by underlying filesystem */
4204 _err
= (*vp
->v_op
[vnop_advlock_desc
.vdesc_offset
])(&a
);
4208 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4218 *#% allocate vp L L L
4221 struct vnop_allocate_args
{
4222 struct vnodeop_desc
*a_desc
;
4226 off_t
*a_bytesallocated
;
4228 vfs_context_t a_context
;
4233 VNOP_ALLOCATE(struct vnode
*vp
, off_t length
, u_int32_t flags
, off_t
*bytesallocated
, off_t offset
, vfs_context_t context
)
4236 struct vnop_allocate_args a
;
4238 int funnel_state
= 0;
4240 a
.a_desc
= &vnop_allocate_desc
;
4242 a
.a_length
= length
;
4244 a
.a_bytesallocated
= bytesallocated
;
4245 a
.a_offset
= offset
;
4246 a
.a_context
= context
;
4247 thread_safe
= THREAD_SAFE_FS(vp
);
4250 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4254 _err
= (*vp
->v_op
[vnop_allocate_desc
.vdesc_offset
])(&a
);
4256 unlock_fsnode(vp
, &funnel_state
);
4267 struct vnop_pagein_args
{
4268 struct vnodeop_desc
*a_desc
;
4271 vm_offset_t a_pl_offset
;
4275 vfs_context_t a_context
;
4279 VNOP_PAGEIN(struct vnode
*vp
, upl_t pl
, vm_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t context
)
4282 struct vnop_pagein_args a
;
4284 int funnel_state
= 0;
4286 a
.a_desc
= &vnop_pagein_desc
;
4289 a
.a_pl_offset
= pl_offset
;
4290 a
.a_f_offset
= f_offset
;
4293 a
.a_context
= context
;
4294 thread_safe
= THREAD_SAFE_FS(vp
);
4297 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4299 _err
= (*vp
->v_op
[vnop_pagein_desc
.vdesc_offset
])(&a
);
4301 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4309 *#% pageout vp = = =
4312 struct vnop_pageout_args
{
4313 struct vnodeop_desc
*a_desc
;
4316 vm_offset_t a_pl_offset
;
4320 vfs_context_t a_context
;
4325 VNOP_PAGEOUT(struct vnode
*vp
, upl_t pl
, vm_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t context
)
4328 struct vnop_pageout_args a
;
4330 int funnel_state
= 0;
4332 a
.a_desc
= &vnop_pageout_desc
;
4335 a
.a_pl_offset
= pl_offset
;
4336 a
.a_f_offset
= f_offset
;
4339 a
.a_context
= context
;
4340 thread_safe
= THREAD_SAFE_FS(vp
);
4343 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4345 _err
= (*vp
->v_op
[vnop_pageout_desc
.vdesc_offset
])(&a
);
4347 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4356 *#% searchfs vp L L L
4359 struct vnop_searchfs_args
{
4360 struct vnodeop_desc
*a_desc
;
4362 void *a_searchparams1
;
4363 void *a_searchparams2
;
4364 struct attrlist
*a_searchattrs
;
4365 u_long a_maxmatches
;
4366 struct timeval
*a_timelimit
;
4367 struct attrlist
*a_returnattrs
;
4368 u_long
*a_nummatches
;
4369 u_long a_scriptcode
;
4372 struct searchstate
*a_searchstate
;
4373 vfs_context_t a_context
;
4378 VNOP_SEARCHFS(struct vnode
*vp
, void *searchparams1
, void *searchparams2
, struct attrlist
*searchattrs
, u_long maxmatches
, struct timeval
*timelimit
, struct attrlist
*returnattrs
, u_long
*nummatches
, u_long scriptcode
, u_long options
, struct uio
*uio
, struct searchstate
*searchstate
, vfs_context_t context
)
4381 struct vnop_searchfs_args a
;
4383 int funnel_state
= 0;
4385 a
.a_desc
= &vnop_searchfs_desc
;
4387 a
.a_searchparams1
= searchparams1
;
4388 a
.a_searchparams2
= searchparams2
;
4389 a
.a_searchattrs
= searchattrs
;
4390 a
.a_maxmatches
= maxmatches
;
4391 a
.a_timelimit
= timelimit
;
4392 a
.a_returnattrs
= returnattrs
;
4393 a
.a_nummatches
= nummatches
;
4394 a
.a_scriptcode
= scriptcode
;
4395 a
.a_options
= options
;
4397 a
.a_searchstate
= searchstate
;
4398 a
.a_context
= context
;
4399 thread_safe
= THREAD_SAFE_FS(vp
);
4402 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4406 _err
= (*vp
->v_op
[vnop_searchfs_desc
.vdesc_offset
])(&a
);
4408 unlock_fsnode(vp
, &funnel_state
);
4416 *#% copyfile fvp U U U
4417 *#% copyfile tdvp L U U
4418 *#% copyfile tvp X U U
4421 struct vnop_copyfile_args
{
4422 struct vnodeop_desc
*a_desc
;
4426 struct componentname
*a_tcnp
;
4429 vfs_context_t a_context
;
4433 VNOP_COPYFILE(struct vnode
*fvp
, struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
4434 int mode
, int flags
, vfs_context_t context
)
4437 struct vnop_copyfile_args a
;
4438 a
.a_desc
= &vnop_copyfile_desc
;
4445 a
.a_context
= context
;
4446 _err
= (*fvp
->v_op
[vnop_copyfile_desc
.vdesc_offset
])(&a
);
4452 VNOP_GETXATTR(vnode_t vp
, const char *name
, uio_t uio
, size_t *size
, int options
, vfs_context_t context
)
4454 struct vnop_getxattr_args a
;
4457 int funnel_state
= 0;
4459 a
.a_desc
= &vnop_getxattr_desc
;
4464 a
.a_options
= options
;
4465 a
.a_context
= context
;
4467 thread_safe
= THREAD_SAFE_FS(vp
);
4469 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4473 error
= (*vp
->v_op
[vnop_getxattr_desc
.vdesc_offset
])(&a
);
4475 unlock_fsnode(vp
, &funnel_state
);
4481 VNOP_SETXATTR(vnode_t vp
, const char *name
, uio_t uio
, int options
, vfs_context_t context
)
4483 struct vnop_setxattr_args a
;
4486 int funnel_state
= 0;
4488 a
.a_desc
= &vnop_setxattr_desc
;
4492 a
.a_options
= options
;
4493 a
.a_context
= context
;
4495 thread_safe
= THREAD_SAFE_FS(vp
);
4497 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4501 error
= (*vp
->v_op
[vnop_setxattr_desc
.vdesc_offset
])(&a
);
4503 unlock_fsnode(vp
, &funnel_state
);
4509 VNOP_REMOVEXATTR(vnode_t vp
, const char *name
, int options
, vfs_context_t context
)
4511 struct vnop_removexattr_args a
;
4514 int funnel_state
= 0;
4516 a
.a_desc
= &vnop_removexattr_desc
;
4519 a
.a_options
= options
;
4520 a
.a_context
= context
;
4522 thread_safe
= THREAD_SAFE_FS(vp
);
4524 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4528 error
= (*vp
->v_op
[vnop_removexattr_desc
.vdesc_offset
])(&a
);
4530 unlock_fsnode(vp
, &funnel_state
);
4536 VNOP_LISTXATTR(vnode_t vp
, uio_t uio
, size_t *size
, int options
, vfs_context_t context
)
4538 struct vnop_listxattr_args a
;
4541 int funnel_state
= 0;
4543 a
.a_desc
= &vnop_listxattr_desc
;
4547 a
.a_options
= options
;
4548 a
.a_context
= context
;
4550 thread_safe
= THREAD_SAFE_FS(vp
);
4552 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4556 error
= (*vp
->v_op
[vnop_listxattr_desc
.vdesc_offset
])(&a
);
4558 unlock_fsnode(vp
, &funnel_state
);
4567 *#% blktooff vp = = =
4570 struct vnop_blktooff_args
{
4571 struct vnodeop_desc
*a_desc
;
4578 VNOP_BLKTOOFF(struct vnode
*vp
, daddr64_t lblkno
, off_t
*offset
)
4581 struct vnop_blktooff_args a
;
4583 int funnel_state
= 0;
4585 a
.a_desc
= &vnop_blktooff_desc
;
4587 a
.a_lblkno
= lblkno
;
4588 a
.a_offset
= offset
;
4589 thread_safe
= THREAD_SAFE_FS(vp
);
4592 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4594 _err
= (*vp
->v_op
[vnop_blktooff_desc
.vdesc_offset
])(&a
);
4596 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4604 *#% offtoblk vp = = =
4607 struct vnop_offtoblk_args
{
4608 struct vnodeop_desc
*a_desc
;
4611 daddr64_t
*a_lblkno
;
4615 VNOP_OFFTOBLK(struct vnode
*vp
, off_t offset
, daddr64_t
*lblkno
)
4618 struct vnop_offtoblk_args a
;
4620 int funnel_state
= 0;
4622 a
.a_desc
= &vnop_offtoblk_desc
;
4624 a
.a_offset
= offset
;
4625 a
.a_lblkno
= lblkno
;
4626 thread_safe
= THREAD_SAFE_FS(vp
);
4629 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4631 _err
= (*vp
->v_op
[vnop_offtoblk_desc
.vdesc_offset
])(&a
);
4633 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4641 *#% blockmap vp L L L
4644 struct vnop_blockmap_args
{
4645 struct vnodeop_desc
*a_desc
;
4653 vfs_context_t a_context
;
4657 VNOP_BLOCKMAP(struct vnode
*vp
, off_t foffset
, size_t size
, daddr64_t
*bpn
, size_t *run
, void *poff
, int flags
, vfs_context_t context
)
4660 struct vnop_blockmap_args a
;
4662 int funnel_state
= 0;
4663 struct vfs_context acontext
;
4665 if (context
== NULL
) {
4666 acontext
.vc_proc
= current_proc();
4667 acontext
.vc_ucred
= kauth_cred_get();
4668 context
= &acontext
;
4670 a
.a_desc
= &vnop_blockmap_desc
;
4672 a
.a_foffset
= foffset
;
4678 a
.a_context
= context
;
4679 thread_safe
= THREAD_SAFE_FS(vp
);
4682 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4684 _err
= (*vp
->v_op
[vnop_blockmap_desc
.vdesc_offset
])(&a
);
4686 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4692 struct vnop_strategy_args
{
4693 struct vnodeop_desc
*a_desc
;
4699 VNOP_STRATEGY(struct buf
*bp
)
4702 struct vnop_strategy_args a
;
4703 a
.a_desc
= &vnop_strategy_desc
;
4705 _err
= (*buf_vnode(bp
)->v_op
[vnop_strategy_desc
.vdesc_offset
])(&a
);
4710 struct vnop_bwrite_args
{
4711 struct vnodeop_desc
*a_desc
;
4716 VNOP_BWRITE(struct buf
*bp
)
4719 struct vnop_bwrite_args a
;
4720 a
.a_desc
= &vnop_bwrite_desc
;
4722 _err
= (*buf_vnode(bp
)->v_op
[vnop_bwrite_desc
.vdesc_offset
])(&a
);
4727 struct vnop_kqfilt_add_args
{
4728 struct vnodeop_desc
*a_desc
;
4731 vfs_context_t a_context
;
4735 VNOP_KQFILT_ADD(struct vnode
*vp
, struct knote
*kn
, vfs_context_t context
)
4738 struct vnop_kqfilt_add_args a
;
4740 int funnel_state
= 0;
4742 a
.a_desc
= VDESC(vnop_kqfilt_add
);
4745 a
.a_context
= context
;
4746 thread_safe
= THREAD_SAFE_FS(vp
);
4749 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4753 _err
= (*vp
->v_op
[vnop_kqfilt_add_desc
.vdesc_offset
])(&a
);
4755 unlock_fsnode(vp
, &funnel_state
);
4761 struct vnop_kqfilt_remove_args
{
4762 struct vnodeop_desc
*a_desc
;
4765 vfs_context_t a_context
;
4769 VNOP_KQFILT_REMOVE(struct vnode
*vp
, uintptr_t ident
, vfs_context_t context
)
4772 struct vnop_kqfilt_remove_args a
;
4774 int funnel_state
= 0;
4776 a
.a_desc
= VDESC(vnop_kqfilt_remove
);
4779 a
.a_context
= context
;
4780 thread_safe
= THREAD_SAFE_FS(vp
);
4783 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4787 _err
= (*vp
->v_op
[vnop_kqfilt_remove_desc
.vdesc_offset
])(&a
);
4789 unlock_fsnode(vp
, &funnel_state
);