2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
76 * External virtual filesystem routines
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
87 #include <sys/vnode_internal.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
92 #include <sys/errno.h>
93 #include <sys/malloc.h>
94 #include <sys/domain.h>
96 #include <sys/syslog.h>
99 #include <sys/sysctl.h>
100 #include <sys/filedesc.h>
101 #include <sys/event.h>
102 #include <sys/fsevents.h>
103 #include <sys/user.h>
104 #include <sys/lockf.h>
105 #include <sys/xattr.h>
107 #include <kern/assert.h>
108 #include <kern/kalloc.h>
109 #include <kern/task.h>
111 #include <libkern/OSByteOrder.h>
113 #include <miscfs/specfs/specdev.h>
115 #include <mach/mach_types.h>
116 #include <mach/memory_object_types.h>
117 #include <mach/task.h>
120 #include <security/mac_framework.h>
131 #define THREAD_SAFE_FS(VP) \
132 ((VP)->v_unsafefs ? 0 : 1)
133 #endif /* __LP64__ */
135 #define NATIVE_XATTR(VP) \
136 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
138 static void xattrfile_remove(vnode_t dvp
, const char *basename
,
139 vfs_context_t ctx
, int force
);
140 static void xattrfile_setattr(vnode_t dvp
, const char * basename
,
141 struct vnode_attr
* vap
, vfs_context_t ctx
);
144 * vnode_setneedinactive
146 * Description: Indicate that when the last iocount on this vnode goes away,
147 * and the usecount is also zero, we should inform the filesystem
150 * Parameters: vnode_t vnode to mark
154 * Notes: Notably used when we're deleting a file--we need not have a
155 * usecount, so VNOP_INACTIVE may not get called by anyone. We
156 * want it called when we drop our iocount.
159 vnode_setneedinactive(vnode_t vp
)
164 vp
->v_lflag
|= VL_NEEDINACTIVE
;
171 lock_fsnode(vnode_t vp
, int *funnel_state
)
174 *funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
176 if (vp
->v_unsafefs
) {
177 if (vp
->v_unsafefs
->fsnodeowner
== current_thread()) {
178 vp
->v_unsafefs
->fsnode_count
++;
180 lck_mtx_lock(&vp
->v_unsafefs
->fsnodelock
);
182 if (vp
->v_lflag
& (VL_TERMWANT
| VL_TERMINATE
| VL_DEAD
)) {
183 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
186 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
189 vp
->v_unsafefs
->fsnodeowner
= current_thread();
190 vp
->v_unsafefs
->fsnode_count
= 1;
198 unlock_fsnode(vnode_t vp
, int *funnel_state
)
200 if (vp
->v_unsafefs
) {
201 if (--vp
->v_unsafefs
->fsnode_count
== 0) {
202 vp
->v_unsafefs
->fsnodeowner
= NULL
;
203 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
207 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
209 #endif /* __LP64__ */
213 /* ====================================================================== */
214 /* ************ EXTERNAL KERNEL APIS ********************************** */
215 /* ====================================================================== */
218 * implementations of exported VFS operations
221 VFS_MOUNT(mount_t mp
, vnode_t devvp
, user_addr_t data
, vfs_context_t ctx
)
226 int funnel_state
= 0;
227 #endif /* __LP64__ */
229 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_mount
== 0))
233 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
235 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
237 #endif /* __LP64__ */
239 if (vfs_context_is64bit(ctx
)) {
240 if (vfs_64bitready(mp
)) {
241 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, ctx
);
248 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, ctx
);
253 (void) thread_funnel_set(kernel_flock
, funnel_state
);
255 #endif /* __LP64__ */
261 VFS_START(mount_t mp
, int flags
, vfs_context_t ctx
)
266 int funnel_state
= 0;
267 #endif /* __LP64__ */
269 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_start
== 0))
273 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
276 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
278 #endif /* __LP64__ */
280 error
= (*mp
->mnt_op
->vfs_start
)(mp
, flags
, ctx
);
284 (void) thread_funnel_set(kernel_flock
, funnel_state
);
286 #endif /* __LP64__ */
292 VFS_UNMOUNT(mount_t mp
, int flags
, vfs_context_t ctx
)
297 int funnel_state
= 0;
298 #endif /* __LP64__ */
300 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_unmount
== 0))
304 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
307 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
309 #endif /* __LP64__ */
311 error
= (*mp
->mnt_op
->vfs_unmount
)(mp
, flags
, ctx
);
315 (void) thread_funnel_set(kernel_flock
, funnel_state
);
317 #endif /* __LP64__ */
324 * ENOTSUP Not supported
328 * Note: The return codes from the underlying VFS's root routine can't
329 * be fully enumerated here, since third party VFS authors may not
330 * limit their error returns to the ones documented here, even
331 * though this may result in some programs functioning incorrectly.
333 * The return codes documented above are those which may currently
334 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
335 * for a call to hfs_vget on the volume mount poit, not including
336 * additional error codes which may be propagated from underlying
337 * routines called by hfs_vget.
340 VFS_ROOT(mount_t mp
, struct vnode
** vpp
, vfs_context_t ctx
)
345 int funnel_state
= 0;
346 #endif /* __LP64__ */
348 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_root
== 0))
352 ctx
= vfs_context_current();
356 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
358 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
360 #endif /* __LP64__ */
362 error
= (*mp
->mnt_op
->vfs_root
)(mp
, vpp
, ctx
);
366 (void) thread_funnel_set(kernel_flock
, funnel_state
);
368 #endif /* __LP64__ */
374 VFS_QUOTACTL(mount_t mp
, int cmd
, uid_t uid
, caddr_t datap
, vfs_context_t ctx
)
379 int funnel_state
= 0;
380 #endif /* __LP64__ */
382 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_quotactl
== 0))
386 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
388 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
390 #endif /* __LP64__ */
392 error
= (*mp
->mnt_op
->vfs_quotactl
)(mp
, cmd
, uid
, datap
, ctx
);
396 (void) thread_funnel_set(kernel_flock
, funnel_state
);
398 #endif /* __LP64__ */
404 VFS_GETATTR(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
409 int funnel_state
= 0;
410 #endif /* __LP64__ */
412 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_getattr
== 0))
416 ctx
= vfs_context_current();
420 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
422 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
424 #endif /* __LP64__ */
426 error
= (*mp
->mnt_op
->vfs_getattr
)(mp
, vfa
, ctx
);
430 (void) thread_funnel_set(kernel_flock
, funnel_state
);
432 #endif /* __LP64__ */
438 VFS_SETATTR(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
443 int funnel_state
= 0;
444 #endif /* __LP64__ */
446 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_setattr
== 0))
450 ctx
= vfs_context_current();
454 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
456 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
458 #endif /* __LP64__ */
460 error
= (*mp
->mnt_op
->vfs_setattr
)(mp
, vfa
, ctx
);
464 (void) thread_funnel_set(kernel_flock
, funnel_state
);
466 #endif /* __LP64__ */
472 VFS_SYNC(mount_t mp
, int flags
, vfs_context_t ctx
)
477 int funnel_state
= 0;
478 #endif /* __LP64__ */
480 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_sync
== 0))
484 ctx
= vfs_context_current();
488 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
490 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
492 #endif /* __LP64__ */
494 error
= (*mp
->mnt_op
->vfs_sync
)(mp
, flags
, ctx
);
498 (void) thread_funnel_set(kernel_flock
, funnel_state
);
500 #endif /* __LP64__ */
506 VFS_VGET(mount_t mp
, ino64_t ino
, struct vnode
**vpp
, vfs_context_t ctx
)
511 int funnel_state
= 0;
512 #endif /* __LP64__ */
514 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_vget
== 0))
518 ctx
= vfs_context_current();
522 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
524 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
526 #endif /* __LP64__ */
528 error
= (*mp
->mnt_op
->vfs_vget
)(mp
, ino
, vpp
, ctx
);
532 (void) thread_funnel_set(kernel_flock
, funnel_state
);
534 #endif /* __LP64__ */
540 VFS_FHTOVP(mount_t mp
, int fhlen
, unsigned char * fhp
, vnode_t
* vpp
, vfs_context_t ctx
)
545 int funnel_state
= 0;
546 #endif /* __LP64__ */
548 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_fhtovp
== 0))
552 ctx
= vfs_context_current();
556 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
558 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
560 #endif /* __LP64__ */
562 error
= (*mp
->mnt_op
->vfs_fhtovp
)(mp
, fhlen
, fhp
, vpp
, ctx
);
566 (void) thread_funnel_set(kernel_flock
, funnel_state
);
568 #endif /* __LP64__ */
574 VFS_VPTOFH(struct vnode
* vp
, int *fhlenp
, unsigned char * fhp
, vfs_context_t ctx
)
579 int funnel_state
= 0;
580 #endif /* __LP64__ */
582 if ((vp
->v_mount
== dead_mountp
) || (vp
->v_mount
->mnt_op
->vfs_vptofh
== 0))
586 ctx
= vfs_context_current();
590 thread_safe
= THREAD_SAFE_FS(vp
);
592 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
594 #endif /* __LP64__ */
596 error
= (*vp
->v_mount
->mnt_op
->vfs_vptofh
)(vp
, fhlenp
, fhp
, ctx
);
600 (void) thread_funnel_set(kernel_flock
, funnel_state
);
602 #endif /* __LP64__ */
608 /* returns the cached throttle mask for the mount_t */
610 vfs_throttle_mask(mount_t mp
)
612 return(mp
->mnt_throttle_mask
);
615 /* returns a copy of vfs type name for the mount_t */
617 vfs_name(mount_t mp
, char * buffer
)
619 strncpy(buffer
, mp
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
622 /* returns vfs type number for the mount_t */
624 vfs_typenum(mount_t mp
)
626 return(mp
->mnt_vtable
->vfc_typenum
);
629 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
631 vfs_mntlabel(mount_t mp
)
633 return (void*)mp
->mnt_mntlabel
;
636 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
638 vfs_flags(mount_t mp
)
640 return((uint64_t)(mp
->mnt_flag
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
)));
643 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
645 vfs_setflags(mount_t mp
, uint64_t flags
)
647 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
650 mp
->mnt_flag
|= lflags
;
654 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
656 vfs_clearflags(mount_t mp
, uint64_t flags
)
658 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
661 mp
->mnt_flag
&= ~lflags
;
665 /* Is the mount_t ronly and upgrade read/write requested? */
667 vfs_iswriteupgrade(mount_t mp
) /* ronly && MNTK_WANTRDWR */
669 return ((mp
->mnt_flag
& MNT_RDONLY
) && (mp
->mnt_kern_flag
& MNTK_WANTRDWR
));
673 /* Is the mount_t mounted ronly */
675 vfs_isrdonly(mount_t mp
)
677 return (mp
->mnt_flag
& MNT_RDONLY
);
680 /* Is the mount_t mounted for filesystem synchronous writes? */
682 vfs_issynchronous(mount_t mp
)
684 return (mp
->mnt_flag
& MNT_SYNCHRONOUS
);
687 /* Is the mount_t mounted read/write? */
689 vfs_isrdwr(mount_t mp
)
691 return ((mp
->mnt_flag
& MNT_RDONLY
) == 0);
695 /* Is mount_t marked for update (ie MNT_UPDATE) */
697 vfs_isupdate(mount_t mp
)
699 return (mp
->mnt_flag
& MNT_UPDATE
);
703 /* Is mount_t marked for reload (ie MNT_RELOAD) */
705 vfs_isreload(mount_t mp
)
707 return ((mp
->mnt_flag
& MNT_UPDATE
) && (mp
->mnt_flag
& MNT_RELOAD
));
710 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
712 vfs_isforce(mount_t mp
)
714 if ((mp
->mnt_lflag
& MNT_LFORCE
) || (mp
->mnt_kern_flag
& MNTK_FRCUNMOUNT
))
721 vfs_isunmount(mount_t mp
)
723 if ((mp
->mnt_lflag
& MNT_LUNMOUNT
)) {
731 vfs_64bitready(mount_t mp
)
733 if ((mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFS64BITREADY
))
741 vfs_authcache_ttl(mount_t mp
)
743 if ( (mp
->mnt_kern_flag
& (MNTK_AUTH_OPAQUE
| MNTK_AUTH_CACHE_TTL
)) )
744 return (mp
->mnt_authcache_ttl
);
746 return (CACHED_RIGHT_INFINITE_TTL
);
750 vfs_setauthcache_ttl(mount_t mp
, int ttl
)
753 mp
->mnt_kern_flag
|= MNTK_AUTH_CACHE_TTL
;
754 mp
->mnt_authcache_ttl
= ttl
;
759 vfs_clearauthcache_ttl(mount_t mp
)
762 mp
->mnt_kern_flag
&= ~MNTK_AUTH_CACHE_TTL
;
764 * back to the default TTL value in case
765 * MNTK_AUTH_OPAQUE is set on this mount
767 mp
->mnt_authcache_ttl
= CACHED_LOOKUP_RIGHT_TTL
;
772 vfs_markdependency(mount_t mp
)
774 proc_t p
= current_proc();
776 mp
->mnt_dependent_process
= p
;
777 mp
->mnt_dependent_pid
= proc_pid(p
);
783 vfs_authopaque(mount_t mp
)
785 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE
))
792 vfs_authopaqueaccess(mount_t mp
)
794 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE_ACCESS
))
801 vfs_setauthopaque(mount_t mp
)
804 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE
;
809 vfs_setauthopaqueaccess(mount_t mp
)
812 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE_ACCESS
;
817 vfs_clearauthopaque(mount_t mp
)
820 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE
;
825 vfs_clearauthopaqueaccess(mount_t mp
)
828 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE_ACCESS
;
833 vfs_setextendedsecurity(mount_t mp
)
836 mp
->mnt_kern_flag
|= MNTK_EXTENDED_SECURITY
;
841 vfs_clearextendedsecurity(mount_t mp
)
844 mp
->mnt_kern_flag
&= ~MNTK_EXTENDED_SECURITY
;
849 vfs_extendedsecurity(mount_t mp
)
851 return(mp
->mnt_kern_flag
& MNTK_EXTENDED_SECURITY
);
854 /* returns the max size of short symlink in this mount_t */
856 vfs_maxsymlen(mount_t mp
)
858 return(mp
->mnt_maxsymlinklen
);
861 /* set max size of short symlink on mount_t */
863 vfs_setmaxsymlen(mount_t mp
, uint32_t symlen
)
865 mp
->mnt_maxsymlinklen
= symlen
;
868 /* return a pointer to the RO vfs_statfs associated with mount_t */
870 vfs_statfs(mount_t mp
)
872 return(&mp
->mnt_vfsstat
);
876 vfs_getattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
880 if ((error
= VFS_GETATTR(mp
, vfa
, ctx
)) != 0)
884 * If we have a filesystem create time, use it to default some others.
886 if (VFSATTR_IS_SUPPORTED(vfa
, f_create_time
)) {
887 if (VFSATTR_IS_ACTIVE(vfa
, f_modify_time
) && !VFSATTR_IS_SUPPORTED(vfa
, f_modify_time
))
888 VFSATTR_RETURN(vfa
, f_modify_time
, vfa
->f_create_time
);
895 vfs_setattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
899 if (vfs_isrdonly(mp
))
902 error
= VFS_SETATTR(mp
, vfa
, ctx
);
905 * If we had alternate ways of setting vfs attributes, we'd
912 /* return the private data handle stored in mount_t */
914 vfs_fsprivate(mount_t mp
)
916 return(mp
->mnt_data
);
919 /* set the private data handle in mount_t */
921 vfs_setfsprivate(mount_t mp
, void *mntdata
)
924 mp
->mnt_data
= mntdata
;
930 * return the block size of the underlying
931 * device associated with mount_t
934 vfs_devblocksize(mount_t mp
) {
936 return(mp
->mnt_devblocksize
);
940 * Returns vnode with an iocount that must be released with vnode_put()
943 vfs_vnodecovered(mount_t mp
)
945 vnode_t vp
= mp
->mnt_vnodecovered
;
946 if ((vp
== NULL
) || (vnode_getwithref(vp
) != 0)) {
954 * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
955 * The iocount must be released with vnode_put(). Note that this KPI is subtle
956 * with respect to the validity of using this device vnode for anything substantial
957 * (which is discouraged). If commands are sent to the device driver without
958 * taking proper steps to ensure that the device is still open, chaos may ensue.
959 * Similarly, this routine should only be called if there is some guarantee that
960 * the mount itself is still valid.
963 vfs_devvp(mount_t mp
)
965 vnode_t vp
= mp
->mnt_devvp
;
967 if ((vp
!= NULLVP
) && (vnode_get(vp
) == 0)) {
975 * return the io attributes associated with mount_t
978 vfs_ioattr(mount_t mp
, struct vfsioattr
*ioattrp
)
981 ioattrp
->io_maxreadcnt
= MAXPHYS
;
982 ioattrp
->io_maxwritecnt
= MAXPHYS
;
983 ioattrp
->io_segreadcnt
= 32;
984 ioattrp
->io_segwritecnt
= 32;
985 ioattrp
->io_maxsegreadsize
= MAXPHYS
;
986 ioattrp
->io_maxsegwritesize
= MAXPHYS
;
987 ioattrp
->io_devblocksize
= DEV_BSIZE
;
988 ioattrp
->io_flags
= 0;
990 ioattrp
->io_maxreadcnt
= mp
->mnt_maxreadcnt
;
991 ioattrp
->io_maxwritecnt
= mp
->mnt_maxwritecnt
;
992 ioattrp
->io_segreadcnt
= mp
->mnt_segreadcnt
;
993 ioattrp
->io_segwritecnt
= mp
->mnt_segwritecnt
;
994 ioattrp
->io_maxsegreadsize
= mp
->mnt_maxsegreadsize
;
995 ioattrp
->io_maxsegwritesize
= mp
->mnt_maxsegwritesize
;
996 ioattrp
->io_devblocksize
= mp
->mnt_devblocksize
;
997 ioattrp
->io_flags
= mp
->mnt_ioflags
;
999 ioattrp
->io_reserved
[0] = NULL
;
1000 ioattrp
->io_reserved
[1] = NULL
;
1005 * set the IO attributes associated with mount_t
1008 vfs_setioattr(mount_t mp
, struct vfsioattr
* ioattrp
)
1012 mp
->mnt_maxreadcnt
= ioattrp
->io_maxreadcnt
;
1013 mp
->mnt_maxwritecnt
= ioattrp
->io_maxwritecnt
;
1014 mp
->mnt_segreadcnt
= ioattrp
->io_segreadcnt
;
1015 mp
->mnt_segwritecnt
= ioattrp
->io_segwritecnt
;
1016 mp
->mnt_maxsegreadsize
= ioattrp
->io_maxsegreadsize
;
1017 mp
->mnt_maxsegwritesize
= ioattrp
->io_maxsegwritesize
;
1018 mp
->mnt_devblocksize
= ioattrp
->io_devblocksize
;
1019 mp
->mnt_ioflags
= ioattrp
->io_flags
;
1023 * Add a new filesystem into the kernel specified in passed in
1024 * vfstable structure. It fills in the vnode
1025 * dispatch vector that is to be passed to when vnodes are created.
1026 * It returns a handle which is to be used to when the FS is to be removed
1028 typedef int (*PFI
)(void *);
1029 extern int vfs_opv_numops
;
1031 vfs_fsadd(struct vfs_fsentry
*vfe
, vfstable_t
* handle
)
1033 struct vfstable
*newvfstbl
= NULL
;
1035 int (***opv_desc_vector_p
)(void *);
1036 int (**opv_desc_vector
)(void *);
1037 struct vnodeopv_entry_desc
*opve_descp
;
1043 * This routine is responsible for all the initialization that would
1044 * ordinarily be done as part of the system startup;
1047 if (vfe
== (struct vfs_fsentry
*)0)
1050 desccount
= vfe
->vfe_vopcnt
;
1051 if ((desccount
<=0) || ((desccount
> 8)) || (vfe
->vfe_vfsops
== (struct vfsops
*)NULL
)
1052 || (vfe
->vfe_opvdescs
== (struct vnodeopv_desc
**)NULL
))
1056 /* Non-threadsafe filesystems are not supported for K64 */
1057 if ((vfe
->vfe_flags
& (VFS_TBLTHREADSAFE
| VFS_TBLFSNODELOCK
)) == 0) {
1060 #endif /* __LP64__ */
1062 MALLOC(newvfstbl
, void *, sizeof(struct vfstable
), M_TEMP
,
1064 bzero(newvfstbl
, sizeof(struct vfstable
));
1065 newvfstbl
->vfc_vfsops
= vfe
->vfe_vfsops
;
1066 strncpy(&newvfstbl
->vfc_name
[0], vfe
->vfe_fsname
, MFSNAMELEN
);
1067 if ((vfe
->vfe_flags
& VFS_TBLNOTYPENUM
))
1068 newvfstbl
->vfc_typenum
= maxvfsconf
++;
1070 newvfstbl
->vfc_typenum
= vfe
->vfe_fstypenum
;
1072 newvfstbl
->vfc_refcount
= 0;
1073 newvfstbl
->vfc_flags
= 0;
1074 newvfstbl
->vfc_mountroot
= NULL
;
1075 newvfstbl
->vfc_next
= NULL
;
1076 newvfstbl
->vfc_vfsflags
= 0;
1077 if (vfe
->vfe_flags
& VFS_TBL64BITREADY
)
1078 newvfstbl
->vfc_vfsflags
|= VFC_VFS64BITREADY
;
1079 if (vfe
->vfe_flags
& VFS_TBLVNOP_PAGEINV2
)
1080 newvfstbl
->vfc_vfsflags
|= VFC_VFSVNOP_PAGEINV2
;
1081 if (vfe
->vfe_flags
& VFS_TBLVNOP_PAGEOUTV2
)
1082 newvfstbl
->vfc_vfsflags
|= VFC_VFSVNOP_PAGEOUTV2
;
1084 if (vfe
->vfe_flags
& VFS_TBLTHREADSAFE
)
1085 newvfstbl
->vfc_vfsflags
|= VFC_VFSTHREADSAFE
;
1086 if (vfe
->vfe_flags
& VFS_TBLFSNODELOCK
)
1087 newvfstbl
->vfc_vfsflags
|= VFC_VFSTHREADSAFE
;
1088 #endif /* __LP64__ */
1089 if ((vfe
->vfe_flags
& VFS_TBLLOCALVOL
) == VFS_TBLLOCALVOL
)
1090 newvfstbl
->vfc_flags
|= MNT_LOCAL
;
1091 if ((vfe
->vfe_flags
& VFS_TBLLOCALVOL
) && (vfe
->vfe_flags
& VFS_TBLGENERICMNTARGS
) == 0)
1092 newvfstbl
->vfc_vfsflags
|= VFC_VFSLOCALARGS
;
1094 newvfstbl
->vfc_vfsflags
|= VFC_VFSGENERICARGS
;
1096 if (vfe
->vfe_flags
& VFS_TBLNATIVEXATTR
)
1097 newvfstbl
->vfc_vfsflags
|= VFC_VFSNATIVEXATTR
;
1098 if (vfe
->vfe_flags
& VFS_TBLUNMOUNT_PREFLIGHT
)
1099 newvfstbl
->vfc_vfsflags
|= VFC_VFSPREFLIGHT
;
1100 if (vfe
->vfe_flags
& VFS_TBLREADDIR_EXTENDED
)
1101 newvfstbl
->vfc_vfsflags
|= VFC_VFSREADDIR_EXTENDED
;
1102 if (vfe
->vfe_flags
& VFS_TBLNOMACLABEL
)
1103 newvfstbl
->vfc_vfsflags
|= VFC_VFSNOMACLABEL
;
1106 * Allocate and init the vectors.
1107 * Also handle backwards compatibility.
1109 * We allocate one large block to hold all <desccount>
1110 * vnode operation vectors stored contiguously.
1112 /* XXX - shouldn't be M_TEMP */
1114 descsize
= desccount
* vfs_opv_numops
* sizeof(PFI
);
1115 MALLOC(descptr
, PFI
*, descsize
,
1117 bzero(descptr
, descsize
);
1119 newvfstbl
->vfc_descptr
= descptr
;
1120 newvfstbl
->vfc_descsize
= descsize
;
1123 for (i
= 0; i
< desccount
; i
++ ) {
1124 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
1126 * Fill in the caller's pointer to the start of the i'th vector.
1127 * They'll need to supply it when calling vnode_create.
1129 opv_desc_vector
= descptr
+ i
* vfs_opv_numops
;
1130 *opv_desc_vector_p
= opv_desc_vector
;
1132 for (j
= 0; vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
].opve_op
; j
++) {
1133 opve_descp
= &(vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
]);
1136 * Sanity check: is this operation listed
1137 * in the list of operations? We check this
1138 * by seeing if its offset is zero. Since
1139 * the default routine should always be listed
1140 * first, it should be the only one with a zero
1141 * offset. Any other operation with a zero
1142 * offset is probably not listed in
1143 * vfs_op_descs, and so is probably an error.
1145 * A panic here means the layer programmer
1146 * has committed the all-too common bug
1147 * of adding a new operation to the layer's
1148 * list of vnode operations but
1149 * not adding the operation to the system-wide
1150 * list of supported operations.
1152 if (opve_descp
->opve_op
->vdesc_offset
== 0 &&
1153 opve_descp
->opve_op
->vdesc_offset
!= VOFFSET(vnop_default
)) {
1154 printf("vfs_fsadd: operation %s not listed in %s.\n",
1155 opve_descp
->opve_op
->vdesc_name
,
1157 panic("vfs_fsadd: bad operation");
1160 * Fill in this entry.
1162 opv_desc_vector
[opve_descp
->opve_op
->vdesc_offset
] =
1163 opve_descp
->opve_impl
;
1168 * Finally, go back and replace unfilled routines
1169 * with their default. (Sigh, an O(n^3) algorithm. I
1170 * could make it better, but that'd be work, and n is small.)
1172 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
1175 * Force every operations vector to have a default routine.
1177 opv_desc_vector
= *opv_desc_vector_p
;
1178 if (opv_desc_vector
[VOFFSET(vnop_default
)] == NULL
)
1179 panic("vfs_fsadd: operation vector without default routine.");
1180 for (j
= 0; j
< vfs_opv_numops
; j
++)
1181 if (opv_desc_vector
[j
] == NULL
)
1182 opv_desc_vector
[j
] =
1183 opv_desc_vector
[VOFFSET(vnop_default
)];
1185 } /* end of each vnodeopv_desc parsing */
1189 *handle
= vfstable_add(newvfstbl
);
1191 if (newvfstbl
->vfc_typenum
<= maxvfsconf
)
1192 maxvfsconf
= newvfstbl
->vfc_typenum
+ 1;
1194 if (newvfstbl
->vfc_vfsops
->vfs_init
) {
1195 struct vfsconf vfsc
;
1196 bzero(&vfsc
, sizeof(struct vfsconf
));
1197 vfsc
.vfc_reserved1
= 0;
1198 bcopy((*handle
)->vfc_name
, vfsc
.vfc_name
, sizeof(vfsc
.vfc_name
));
1199 vfsc
.vfc_typenum
= (*handle
)->vfc_typenum
;
1200 vfsc
.vfc_refcount
= (*handle
)->vfc_refcount
;
1201 vfsc
.vfc_flags
= (*handle
)->vfc_flags
;
1202 vfsc
.vfc_reserved2
= 0;
1203 vfsc
.vfc_reserved3
= 0;
1205 (*newvfstbl
->vfc_vfsops
->vfs_init
)(&vfsc
);
1208 FREE(newvfstbl
, M_TEMP
);
1214 * Removes the filesystem from kernel.
1215 * The argument passed in is the handle that was given when
1216 * file system was added
1219 vfs_fsremove(vfstable_t handle
)
1221 struct vfstable
* vfstbl
= (struct vfstable
*)handle
;
1222 void *old_desc
= NULL
;
1225 /* Preflight check for any mounts */
1227 if ( vfstbl
->vfc_refcount
!= 0 ) {
1228 mount_list_unlock();
1233 * save the old descriptor; the free cannot occur unconditionally,
1234 * since vfstable_del() may fail.
1236 if (vfstbl
->vfc_descptr
&& vfstbl
->vfc_descsize
) {
1237 old_desc
= vfstbl
->vfc_descptr
;
1239 err
= vfstable_del(vfstbl
);
1241 mount_list_unlock();
1243 /* free the descriptor if the delete was successful */
1244 if (err
== 0 && old_desc
) {
1245 FREE(old_desc
, M_TEMP
);
1252 vfs_context_pid(vfs_context_t ctx
)
1254 return (proc_pid(vfs_context_proc(ctx
)));
1258 vfs_context_suser(vfs_context_t ctx
)
1260 return (suser(ctx
->vc_ucred
, NULL
));
1264 * Return bit field of signals posted to all threads in the context's process.
1266 * XXX Signals should be tied to threads, not processes, for most uses of this
1270 vfs_context_issignal(vfs_context_t ctx
, sigset_t mask
)
1272 proc_t p
= vfs_context_proc(ctx
);
1274 return(proc_pendingsignals(p
, mask
));
1279 vfs_context_is64bit(vfs_context_t ctx
)
1281 proc_t proc
= vfs_context_proc(ctx
);
1284 return(proc_is64bit(proc
));
1292 * Description: Given a vfs_context_t, return the proc_t associated with it.
1294 * Parameters: vfs_context_t The context to use
1296 * Returns: proc_t The process for this context
1298 * Notes: This function will return the current_proc() if any of the
1299 * following conditions are true:
1301 * o The supplied context pointer is NULL
1302 * o There is no Mach thread associated with the context
1303 * o There is no Mach task associated with the Mach thread
1304 * o There is no proc_t associated with the Mach task
1305 * o The proc_t has no per process open file table
1306 * o The proc_t is post-vfork()
1308 * This causes this function to return a value matching as
1309 * closely as possible the previous behaviour, while at the
1310 * same time avoiding the task lending that results from vfork()
1313 vfs_context_proc(vfs_context_t ctx
)
1317 if (ctx
!= NULL
&& ctx
->vc_thread
!= NULL
)
1318 proc
= (proc_t
)get_bsdthreadtask_info(ctx
->vc_thread
);
1319 if (proc
!= NULL
&& (proc
->p_fd
== NULL
|| (proc
->p_lflag
& P_LVFORK
)))
1322 return(proc
== NULL
? current_proc() : proc
);
1326 * vfs_context_get_special_port
1328 * Description: Return the requested special port from the task associated
1329 * with the given context.
1331 * Parameters: vfs_context_t The context to use
1332 * int Index of special port
1333 * ipc_port_t * Pointer to returned port
1335 * Returns: kern_return_t see task_get_special_port()
1338 vfs_context_get_special_port(vfs_context_t ctx
, int which
, ipc_port_t
*portp
)
1342 if (ctx
!= NULL
&& ctx
->vc_thread
!= NULL
)
1343 task
= get_threadtask(ctx
->vc_thread
);
1345 return task_get_special_port(task
, which
, portp
);
1349 * vfs_context_set_special_port
1351 * Description: Set the requested special port in the task associated
1352 * with the given context.
1354 * Parameters: vfs_context_t The context to use
1355 * int Index of special port
1356 * ipc_port_t New special port
1358 * Returns: kern_return_t see task_set_special_port()
1361 vfs_context_set_special_port(vfs_context_t ctx
, int which
, ipc_port_t port
)
1365 if (ctx
!= NULL
&& ctx
->vc_thread
!= NULL
)
1366 task
= get_threadtask(ctx
->vc_thread
);
1368 return task_set_special_port(task
, which
, port
);
1372 * vfs_context_thread
1374 * Description: Return the Mach thread associated with a vfs_context_t
1376 * Parameters: vfs_context_t The context to use
1378 * Returns: thread_t The thread for this context, or
1379 * NULL, if there is not one.
1381 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1382 * as a result of a static vfs_context_t declaration in a function
1383 * and will result in this function returning NULL.
1385 * This is intentional; this function should NOT return the
1386 * current_thread() in this case.
1389 vfs_context_thread(vfs_context_t ctx
)
1391 return(ctx
->vc_thread
);
1398 * Description: Returns a reference on the vnode for the current working
1399 * directory for the supplied context
1401 * Parameters: vfs_context_t The context to use
1403 * Returns: vnode_t The current working directory
1406 * Notes: The function first attempts to obtain the current directory
1407 * from the thread, and if it is not present there, falls back
1408 * to obtaining it from the process instead. If it can't be
1409 * obtained from either place, we return NULLVP.
1412 vfs_context_cwd(vfs_context_t ctx
)
1414 vnode_t cwd
= NULLVP
;
1416 if(ctx
!= NULL
&& ctx
->vc_thread
!= NULL
) {
1417 uthread_t uth
= get_bsdthread_info(ctx
->vc_thread
);
1421 * Get the cwd from the thread; if there isn't one, get it
1422 * from the process, instead.
1424 if ((cwd
= uth
->uu_cdir
) == NULLVP
&&
1425 (proc
= (proc_t
)get_bsdthreadtask_info(ctx
->vc_thread
)) != NULL
&&
1427 cwd
= proc
->p_fd
->fd_cdir
;
1434 * vfs_context_create
1436 * Description: Allocate and initialize a new context.
1438 * Parameters: vfs_context_t: Context to copy, or NULL for new
1440 * Returns: Pointer to new context
1442 * Notes: Copy cred and thread from argument, if available; else
1443 * initialize with current thread and new cred. Returns
1444 * with a reference held on the credential.
1447 vfs_context_create(vfs_context_t ctx
)
1449 vfs_context_t newcontext
;
1451 newcontext
= (vfs_context_t
)kalloc(sizeof(struct vfs_context
));
1454 kauth_cred_t safecred
;
1456 newcontext
->vc_thread
= ctx
->vc_thread
;
1457 safecred
= ctx
->vc_ucred
;
1459 newcontext
->vc_thread
= current_thread();
1460 safecred
= kauth_cred_get();
1462 if (IS_VALID_CRED(safecred
))
1463 kauth_cred_ref(safecred
);
1464 newcontext
->vc_ucred
= safecred
;
1472 vfs_context_current(void)
1474 vfs_context_t ctx
= NULL
;
1475 volatile uthread_t ut
= (uthread_t
)get_bsdthread_info(current_thread());
1478 if (ut
->uu_context
.vc_ucred
!= NULL
) {
1479 ctx
= &ut
->uu_context
;
1483 return(ctx
== NULL
? vfs_context_kernel() : ctx
);
1490 * Dangerous hack - adopt the first kernel thread as the current thread, to
1491 * get to the vfs_context_t in the uthread associated with a kernel thread.
1492 * This is used by UDF to make the call into IOCDMediaBSDClient,
1493 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1494 * ioctl() is being called from kernel or user space (and all this because
1495 * we do not pass threads into our ioctl()'s, instead of processes).
1497 * This is also used by imageboot_setup(), called early from bsd_init() after
1498 * kernproc has been given a credential.
1500 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1501 * of many Mach headers to do the reference directly rather than indirectly;
1502 * we will need to forego this convenience when we reture proc_thread().
1504 static struct vfs_context kerncontext
;
1506 vfs_context_kernel(void)
1508 if (kerncontext
.vc_ucred
== NOCRED
)
1509 kerncontext
.vc_ucred
= kernproc
->p_ucred
;
1510 if (kerncontext
.vc_thread
== NULL
)
1511 kerncontext
.vc_thread
= proc_thread(kernproc
);
1513 return(&kerncontext
);
1518 vfs_context_rele(vfs_context_t ctx
)
1521 if (IS_VALID_CRED(ctx
->vc_ucred
))
1522 kauth_cred_unref(&ctx
->vc_ucred
);
1523 kfree(ctx
, sizeof(struct vfs_context
));
1530 vfs_context_ucred(vfs_context_t ctx
)
1532 return (ctx
->vc_ucred
);
1536 * Return true if the context is owned by the superuser.
1539 vfs_context_issuser(vfs_context_t ctx
)
1541 return(kauth_cred_issuser(vfs_context_ucred(ctx
)));
1545 * Given a context, for all fields of vfs_context_t which
1546 * are not held with a reference, set those fields to the
1547 * values for the current execution context. Currently, this
1548 * just means the vc_thread.
1550 * Returns: 0 for success, nonzero for failure
1552 * The intended use is:
1553 * 1. vfs_context_create() gets the caller a context
1554 * 2. vfs_context_bind() sets the unrefcounted data
1555 * 3. vfs_context_rele() releases the context
1559 vfs_context_bind(vfs_context_t ctx
)
1561 ctx
->vc_thread
= current_thread();
1565 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1569 * Convert between vnode types and inode formats (since POSIX.1
1570 * defines mode word of stat structure in terms of inode formats).
1573 vnode_iftovt(int mode
)
1575 return(iftovt_tab
[((mode
) & S_IFMT
) >> 12]);
1579 vnode_vttoif(enum vtype indx
)
1581 return(vttoif_tab
[(int)(indx
)]);
1585 vnode_makeimode(int indx
, int mode
)
1587 return (int)(VTTOIF(indx
) | (mode
));
1592 * vnode manipulation functions.
1595 /* returns system root vnode iocount; It should be released using vnode_put() */
1601 error
= vnode_get(rootvnode
);
1603 return ((vnode_t
)0);
1610 vnode_vid(vnode_t vp
)
1612 return ((uint32_t)(vp
->v_id
));
1616 vnode_mount(vnode_t vp
)
1618 return (vp
->v_mount
);
1622 vnode_mountedhere(vnode_t vp
)
1626 if ((vp
->v_type
== VDIR
) && ((mp
= vp
->v_mountedhere
) != NULL
) &&
1627 (mp
->mnt_vnodecovered
== vp
))
1630 return (mount_t
)NULL
;
1633 /* returns vnode type of vnode_t */
1635 vnode_vtype(vnode_t vp
)
1637 return (vp
->v_type
);
1640 /* returns FS specific node saved in vnode */
1642 vnode_fsnode(vnode_t vp
)
1644 return (vp
->v_data
);
1648 vnode_clearfsnode(vnode_t vp
)
1654 vnode_specrdev(vnode_t vp
)
1660 /* Accessor functions */
1661 /* is vnode_t a root vnode */
1663 vnode_isvroot(vnode_t vp
)
1665 return ((vp
->v_flag
& VROOT
)? 1 : 0);
1668 /* is vnode_t a system vnode */
1670 vnode_issystem(vnode_t vp
)
1672 return ((vp
->v_flag
& VSYSTEM
)? 1 : 0);
1675 /* is vnode_t a swap file vnode */
1677 vnode_isswap(vnode_t vp
)
1679 return ((vp
->v_flag
& VSWAP
)? 1 : 0);
1682 /* is vnode_t a tty */
1684 vnode_istty(vnode_t vp
)
1686 return ((vp
->v_flag
& VISTTY
) ? 1 : 0);
1689 /* if vnode_t mount operation in progress */
1691 vnode_ismount(vnode_t vp
)
1693 return ((vp
->v_flag
& VMOUNT
)? 1 : 0);
1696 /* is this vnode under recyle now */
1698 vnode_isrecycled(vnode_t vp
)
1702 vnode_lock_spin(vp
);
1703 ret
= (vp
->v_lflag
& (VL_TERMINATE
|VL_DEAD
))? 1 : 0;
1708 /* vnode was created by background task requesting rapid aging
1709 and has not since been referenced by a normal task */
1711 vnode_israge(vnode_t vp
)
1713 return ((vp
->v_flag
& VRAGE
)? 1 : 0);
1717 vnode_needssnapshots(vnode_t vp
)
1719 return ((vp
->v_flag
& VNEEDSSNAPSHOT
)? 1 : 0);
1723 /* Check the process/thread to see if we should skip atime updates */
1725 vfs_ctx_skipatime (vfs_context_t ctx
) {
1730 proc
= vfs_context_proc(ctx
);
1731 thr
= vfs_context_thread (ctx
);
1733 /* Validate pointers in case we were invoked via a kernel context */
1735 ut
= get_bsdthread_info (thr
);
1737 if (proc
->p_lflag
& P_LRAGE_VNODES
) {
1742 if (ut
->uu_flag
& UT_RAGE_VNODES
) {
1750 /* is vnode_t marked to not keep data cached once it's been consumed */
1752 vnode_isnocache(vnode_t vp
)
1754 return ((vp
->v_flag
& VNOCACHE_DATA
)? 1 : 0);
1758 * has sequential readahead been disabled on this vnode
1761 vnode_isnoreadahead(vnode_t vp
)
1763 return ((vp
->v_flag
& VRAOFF
)? 1 : 0);
1767 vnode_is_openevt(vnode_t vp
)
1769 return ((vp
->v_flag
& VOPENEVT
)? 1 : 0);
1772 /* is vnode_t a standard one? */
1774 vnode_isstandard(vnode_t vp
)
1776 return ((vp
->v_flag
& VSTANDARD
)? 1 : 0);
1779 /* don't vflush() if SKIPSYSTEM */
1781 vnode_isnoflush(vnode_t vp
)
1783 return ((vp
->v_flag
& VNOFLUSH
)? 1 : 0);
1786 /* is vnode_t a regular file */
1788 vnode_isreg(vnode_t vp
)
1790 return ((vp
->v_type
== VREG
)? 1 : 0);
1793 /* is vnode_t a directory? */
1795 vnode_isdir(vnode_t vp
)
1797 return ((vp
->v_type
== VDIR
)? 1 : 0);
1800 /* is vnode_t a symbolic link ? */
1802 vnode_islnk(vnode_t vp
)
1804 return ((vp
->v_type
== VLNK
)? 1 : 0);
1808 vnode_lookup_continue_needed(vnode_t vp
, struct componentname
*cnp
)
1810 struct nameidata
*ndp
= cnp
->cn_ndp
;
1813 panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL\n");
1816 if (vnode_isdir(vp
)) {
1817 if (vp
->v_mountedhere
!= NULL
) {
1822 if (vp
->v_resolve
) {
1825 #endif /* CONFIG_TRIGGERS */
1830 if (vnode_islnk(vp
)) {
1831 /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
1832 if (cnp
->cn_flags
& FOLLOW
) {
1835 if (ndp
->ni_flag
& NAMEI_TRAILINGSLASH
) {
1843 ndp
->ni_flag
|= NAMEI_CONTLOOKUP
;
1844 return EKEEPLOOKING
;
1847 /* is vnode_t a fifo ? */
1849 vnode_isfifo(vnode_t vp
)
1851 return ((vp
->v_type
== VFIFO
)? 1 : 0);
1854 /* is vnode_t a block device? */
1856 vnode_isblk(vnode_t vp
)
1858 return ((vp
->v_type
== VBLK
)? 1 : 0);
1862 vnode_isspec(vnode_t vp
)
1864 return (((vp
->v_type
== VCHR
) || (vp
->v_type
== VBLK
)) ? 1 : 0);
1867 /* is vnode_t a char device? */
1869 vnode_ischr(vnode_t vp
)
1871 return ((vp
->v_type
== VCHR
)? 1 : 0);
1874 /* is vnode_t a socket? */
1876 vnode_issock(vnode_t vp
)
1878 return ((vp
->v_type
== VSOCK
)? 1 : 0);
1881 /* is vnode_t a device with multiple active vnodes referring to it? */
1883 vnode_isaliased(vnode_t vp
)
1885 enum vtype vt
= vp
->v_type
;
1886 if (!((vt
== VCHR
) || (vt
== VBLK
))) {
1889 return (vp
->v_specflags
& SI_ALIASED
);
1893 /* is vnode_t a named stream? */
1895 vnode_isnamedstream(
1904 return ((vp
->v_flag
& VISNAMEDSTREAM
) ? 1 : 0);
1920 return ((vp
->v_flag
& VISSHADOW
) ? 1 : 0);
1926 /* does vnode have associated named stream vnodes ? */
1928 vnode_hasnamedstreams(
1937 return ((vp
->v_lflag
& VL_HASSTREAMS
) ? 1 : 0);
1942 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1944 vnode_setnocache(vnode_t vp
)
1946 vnode_lock_spin(vp
);
1947 vp
->v_flag
|= VNOCACHE_DATA
;
1952 vnode_clearnocache(vnode_t vp
)
1954 vnode_lock_spin(vp
);
1955 vp
->v_flag
&= ~VNOCACHE_DATA
;
1960 vnode_set_openevt(vnode_t vp
)
1962 vnode_lock_spin(vp
);
1963 vp
->v_flag
|= VOPENEVT
;
1968 vnode_clear_openevt(vnode_t vp
)
1970 vnode_lock_spin(vp
);
1971 vp
->v_flag
&= ~VOPENEVT
;
1977 vnode_setnoreadahead(vnode_t vp
)
1979 vnode_lock_spin(vp
);
1980 vp
->v_flag
|= VRAOFF
;
1985 vnode_clearnoreadahead(vnode_t vp
)
1987 vnode_lock_spin(vp
);
1988 vp
->v_flag
&= ~VRAOFF
;
1993 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1995 vnode_setnoflush(vnode_t vp
)
1997 vnode_lock_spin(vp
);
1998 vp
->v_flag
|= VNOFLUSH
;
2003 vnode_clearnoflush(vnode_t vp
)
2005 vnode_lock_spin(vp
);
2006 vp
->v_flag
&= ~VNOFLUSH
;
2011 /* is vnode_t a blkdevice and has a FS mounted on it */
2013 vnode_ismountedon(vnode_t vp
)
2015 return ((vp
->v_specflags
& SI_MOUNTEDON
)? 1 : 0);
2019 vnode_setmountedon(vnode_t vp
)
2021 vnode_lock_spin(vp
);
2022 vp
->v_specflags
|= SI_MOUNTEDON
;
2027 vnode_clearmountedon(vnode_t vp
)
2029 vnode_lock_spin(vp
);
2030 vp
->v_specflags
&= ~SI_MOUNTEDON
;
2036 vnode_settag(vnode_t vp
, int tag
)
2043 vnode_tag(vnode_t vp
)
2049 vnode_parent(vnode_t vp
)
2052 return(vp
->v_parent
);
2056 vnode_setparent(vnode_t vp
, vnode_t dvp
)
2062 vnode_name(vnode_t vp
)
2064 /* we try to keep v_name a reasonable name for the node */
2069 vnode_setname(vnode_t vp
, char * name
)
2074 /* return the registered FS name when adding the FS to kernel */
2076 vnode_vfsname(vnode_t vp
, char * buf
)
2078 strncpy(buf
, vp
->v_mount
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
2081 /* return the FS type number */
2083 vnode_vfstypenum(vnode_t vp
)
2085 return(vp
->v_mount
->mnt_vtable
->vfc_typenum
);
2089 vnode_vfs64bitready(vnode_t vp
)
2093 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
2095 if ((vp
->v_mount
!= dead_mountp
) && (vp
->v_mount
->mnt_vtable
->vfc_vfsflags
& VFC_VFS64BITREADY
))
2103 /* return the visible flags on associated mount point of vnode_t */
2105 vnode_vfsvisflags(vnode_t vp
)
2107 return(vp
->v_mount
->mnt_flag
& MNT_VISFLAGMASK
);
2110 /* return the command modifier flags on associated mount point of vnode_t */
2112 vnode_vfscmdflags(vnode_t vp
)
2114 return(vp
->v_mount
->mnt_flag
& MNT_CMDFLAGS
);
2117 /* return the max symlink of short links of vnode_t */
2119 vnode_vfsmaxsymlen(vnode_t vp
)
2121 return(vp
->v_mount
->mnt_maxsymlinklen
);
2124 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2126 vnode_vfsstatfs(vnode_t vp
)
2128 return(&vp
->v_mount
->mnt_vfsstat
);
2131 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2133 vnode_vfsfsprivate(vnode_t vp
)
2135 return(vp
->v_mount
->mnt_data
);
2138 /* is vnode_t in a rdonly mounted FS */
2140 vnode_vfsisrdonly(vnode_t vp
)
2142 return ((vp
->v_mount
->mnt_flag
& MNT_RDONLY
)? 1 : 0);
2146 vnode_compound_rename_available(vnode_t vp
)
2148 return vnode_compound_op_available(vp
, COMPOUND_VNOP_RENAME
);
2151 vnode_compound_rmdir_available(vnode_t vp
)
2153 return vnode_compound_op_available(vp
, COMPOUND_VNOP_RMDIR
);
2156 vnode_compound_mkdir_available(vnode_t vp
)
2158 return vnode_compound_op_available(vp
, COMPOUND_VNOP_MKDIR
);
2161 vnode_compound_remove_available(vnode_t vp
)
2163 return vnode_compound_op_available(vp
, COMPOUND_VNOP_REMOVE
);
2166 vnode_compound_open_available(vnode_t vp
)
2168 return vnode_compound_op_available(vp
, COMPOUND_VNOP_OPEN
);
2172 vnode_compound_op_available(vnode_t vp
, compound_vnop_id_t opid
)
2174 return ((vp
->v_mount
->mnt_compound_ops
& opid
) != 0);
2178 * Returns vnode ref to current working directory; if a per-thread current
2179 * working directory is in effect, return that instead of the per process one.
2181 * XXX Published, but not used.
2184 current_workingdir(void)
2186 return vfs_context_cwd(vfs_context_current());
2189 /* returns vnode ref to current root(chroot) directory */
2191 current_rootdir(void)
2193 proc_t proc
= current_proc();
2196 if ( (vp
= proc
->p_fd
->fd_rdir
) ) {
2197 if ( (vnode_getwithref(vp
)) )
2204 * Get a filesec and optional acl contents from an extended attribute.
2205 * Function will attempt to retrive ACL, UUID, and GUID information using a
2206 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2208 * Parameters: vp The vnode on which to operate.
2209 * fsecp The filesec (and ACL, if any) being
2211 * ctx The vnode context in which the
2212 * operation is to be attempted.
2214 * Returns: 0 Success
2217 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
2218 * host byte order, as will be the ACL contents, if any.
2219 * Internally, we will cannonize these values from network (PPC)
2220 * byte order after we retrieve them so that the on-disk contents
2221 * of the extended attribute are identical for both PPC and Intel
2222 * (if we were not being required to provide this service via
2223 * fallback, this would be the job of the filesystem
2224 * 'VNOP_GETATTR' call).
2226 * We use ntohl() because it has a transitive property on Intel
2227 * machines and no effect on PPC mancines. This guarantees us
2229 * XXX: Deleting rather than ignoreing a corrupt security structure is
2230 * probably the only way to reset it without assistance from an
2231 * file system integrity checking tool. Right now we ignore it.
2233 * XXX: We should enummerate the possible errno values here, and where
2234 * in the code they originated.
2237 vnode_get_filesec(vnode_t vp
, kauth_filesec_t
*fsecp
, vfs_context_t ctx
)
2239 kauth_filesec_t fsec
;
2242 size_t xsize
, rsize
;
2244 uint32_t host_fsec_magic
;
2245 uint32_t host_acl_entrycount
;
2251 /* find out how big the EA is */
2252 if (vn_getxattr(vp
, KAUTH_FILESEC_XATTR
, NULL
, &xsize
, XATTR_NOSECURITY
, ctx
) != 0) {
2253 /* no EA, no filesec */
2254 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
2256 /* either way, we are done */
2261 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2262 * ACE entrly ACL, and if it's larger than that, it must have the right
2263 * number of bytes such that it contains an atomic number of ACEs,
2264 * rather than partial entries. Otherwise, we ignore it.
2266 if (!KAUTH_FILESEC_VALID(xsize
)) {
2267 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize
);
2272 /* how many entries would fit? */
2273 fsec_size
= KAUTH_FILESEC_COUNT(xsize
);
2275 /* get buffer and uio */
2276 if (((fsec
= kauth_filesec_alloc(fsec_size
)) == NULL
) ||
2277 ((fsec_uio
= uio_create(1, 0, UIO_SYSSPACE
, UIO_READ
)) == NULL
) ||
2278 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), xsize
)) {
2279 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2284 /* read security attribute */
2286 if ((error
= vn_getxattr(vp
,
2287 KAUTH_FILESEC_XATTR
,
2293 /* no attribute - no security data */
2294 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
2296 /* either way, we are done */
2301 * Validate security structure; the validation must take place in host
2302 * byte order. If it's corrupt, we will just ignore it.
2305 /* Validate the size before trying to convert it */
2306 if (rsize
< KAUTH_FILESEC_SIZE(0)) {
2307 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize
);
2311 /* Validate the magic number before trying to convert it */
2312 host_fsec_magic
= ntohl(KAUTH_FILESEC_MAGIC
);
2313 if (fsec
->fsec_magic
!= host_fsec_magic
) {
2314 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic
);
2318 /* Validate the entry count before trying to convert it. */
2319 host_acl_entrycount
= ntohl(fsec
->fsec_acl
.acl_entrycount
);
2320 if (host_acl_entrycount
!= KAUTH_FILESEC_NOACL
) {
2321 if (host_acl_entrycount
> KAUTH_ACL_MAX_ENTRIES
) {
2322 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount
);
2325 if (KAUTH_FILESEC_SIZE(host_acl_entrycount
) > rsize
) {
2326 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount
, rsize
);
2331 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST
, fsec
, NULL
);
2338 kauth_filesec_free(fsec
);
2339 if (fsec_uio
!= NULL
)
2347 * Set a filesec and optional acl contents into an extended attribute.
2348 * function will attempt to store ACL, UUID, and GUID information using a
2349 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2350 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2351 * original caller supplied an acl.
2353 * Parameters: vp The vnode on which to operate.
2354 * fsec The filesec being set.
2355 * acl The acl to be associated with 'fsec'.
2356 * ctx The vnode context in which the
2357 * operation is to be attempted.
2359 * Returns: 0 Success
2362 * Notes: Both the fsec and the acl are always valid.
2364 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2365 * as are the acl contents, if they are used. Internally, we will
2366 * cannonize these values into network (PPC) byte order before we
2367 * attempt to write them so that the on-disk contents of the
2368 * extended attribute are identical for both PPC and Intel (if we
2369 * were not being required to provide this service via fallback,
2370 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2371 * We reverse this process on the way out, so we leave with the
2372 * same byte order we started with.
2374 * XXX: We should enummerate the possible errno values here, and where
2375 * in the code they originated.
2378 vnode_set_filesec(vnode_t vp
, kauth_filesec_t fsec
, kauth_acl_t acl
, vfs_context_t ctx
)
2382 uint32_t saved_acl_copysize
;
2386 if ((fsec_uio
= uio_create(2, 0, UIO_SYSSPACE
, UIO_WRITE
)) == NULL
) {
2387 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2392 * Save the pre-converted ACL copysize, because it gets swapped too
2393 * if we are running with the wrong endianness.
2395 saved_acl_copysize
= KAUTH_ACL_COPYSIZE(acl
);
2397 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK
, fsec
, acl
);
2399 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL
));
2400 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(acl
), saved_acl_copysize
);
2401 error
= vn_setxattr(vp
,
2402 KAUTH_FILESEC_XATTR
,
2404 XATTR_NOSECURITY
, /* we have auth'ed already */
2406 VFS_DEBUG(ctx
, vp
, "SETATTR - set ACL returning %d", error
);
2408 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST
, fsec
, acl
);
2411 if (fsec_uio
!= NULL
)
2418 * Returns: 0 Success
2419 * ENOMEM Not enough space [only if has filesec]
2421 * vnode_get_filesec: ???
2422 * kauth_cred_guid2uid: ???
2423 * kauth_cred_guid2gid: ???
2424 * vfs_update_vfsstat: ???
2427 vnode_getattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2429 kauth_filesec_t fsec
;
2435 /* don't ask for extended security data if the filesystem doesn't support it */
2436 if (!vfs_extendedsecurity(vnode_mount(vp
))) {
2437 VATTR_CLEAR_ACTIVE(vap
, va_acl
);
2438 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
2439 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
2443 * If the caller wants size values we might have to synthesise, give the
2444 * filesystem the opportunity to supply better intermediate results.
2446 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
2447 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
2448 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
2449 VATTR_SET_ACTIVE(vap
, va_data_size
);
2450 VATTR_SET_ACTIVE(vap
, va_data_alloc
);
2451 VATTR_SET_ACTIVE(vap
, va_total_size
);
2452 VATTR_SET_ACTIVE(vap
, va_total_alloc
);
2455 error
= VNOP_GETATTR(vp
, vap
, ctx
);
2457 KAUTH_DEBUG("ERROR - returning %d", error
);
2462 * If extended security data was requested but not returned, try the fallback
2465 if (VATTR_NOT_RETURNED(vap
, va_acl
) || VATTR_NOT_RETURNED(vap
, va_uuuid
) || VATTR_NOT_RETURNED(vap
, va_guuid
)) {
2468 if ((vp
->v_type
== VDIR
) || (vp
->v_type
== VLNK
) || (vp
->v_type
== VREG
)) {
2469 /* try to get the filesec */
2470 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0)
2473 /* if no filesec, no attributes */
2475 VATTR_RETURN(vap
, va_acl
, NULL
);
2476 VATTR_RETURN(vap
, va_uuuid
, kauth_null_guid
);
2477 VATTR_RETURN(vap
, va_guuid
, kauth_null_guid
);
2480 /* looks good, try to return what we were asked for */
2481 VATTR_RETURN(vap
, va_uuuid
, fsec
->fsec_owner
);
2482 VATTR_RETURN(vap
, va_guuid
, fsec
->fsec_group
);
2484 /* only return the ACL if we were actually asked for it */
2485 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
2486 if (fsec
->fsec_acl
.acl_entrycount
== KAUTH_FILESEC_NOACL
) {
2487 VATTR_RETURN(vap
, va_acl
, NULL
);
2489 facl
= kauth_acl_alloc(fsec
->fsec_acl
.acl_entrycount
);
2491 kauth_filesec_free(fsec
);
2495 bcopy(&fsec
->fsec_acl
, facl
, KAUTH_ACL_COPYSIZE(&fsec
->fsec_acl
));
2496 VATTR_RETURN(vap
, va_acl
, facl
);
2499 kauth_filesec_free(fsec
);
2503 * If someone gave us an unsolicited filesec, toss it. We promise that
2504 * we're OK with a filesystem giving us anything back, but our callers
2505 * only expect what they asked for.
2507 if (VATTR_IS_SUPPORTED(vap
, va_acl
) && !VATTR_IS_ACTIVE(vap
, va_acl
)) {
2508 if (vap
->va_acl
!= NULL
)
2509 kauth_acl_free(vap
->va_acl
);
2510 VATTR_CLEAR_SUPPORTED(vap
, va_acl
);
2513 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2515 * Handle the case where we need a UID/GID, but only have extended
2516 * security information.
2518 if (VATTR_NOT_RETURNED(vap
, va_uid
) &&
2519 VATTR_IS_SUPPORTED(vap
, va_uuuid
) &&
2520 !kauth_guid_equal(&vap
->va_uuuid
, &kauth_null_guid
)) {
2521 if ((error
= kauth_cred_guid2uid(&vap
->va_uuuid
, &nuid
)) == 0)
2522 VATTR_RETURN(vap
, va_uid
, nuid
);
2524 if (VATTR_NOT_RETURNED(vap
, va_gid
) &&
2525 VATTR_IS_SUPPORTED(vap
, va_guuid
) &&
2526 !kauth_guid_equal(&vap
->va_guuid
, &kauth_null_guid
)) {
2527 if ((error
= kauth_cred_guid2gid(&vap
->va_guuid
, &ngid
)) == 0)
2528 VATTR_RETURN(vap
, va_gid
, ngid
);
2533 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2535 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
2536 if (vfs_context_issuser(ctx
) && VATTR_IS_SUPPORTED(vap
, va_uid
)) {
2538 } else if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
2539 nuid
= vp
->v_mount
->mnt_fsowner
;
2540 if (nuid
== KAUTH_UID_NONE
)
2542 } else if (VATTR_IS_SUPPORTED(vap
, va_uid
)) {
2545 /* this will always be something sensible */
2546 nuid
= vp
->v_mount
->mnt_fsowner
;
2548 if ((nuid
== 99) && !vfs_context_issuser(ctx
))
2549 nuid
= kauth_cred_getuid(vfs_context_ucred(ctx
));
2550 VATTR_RETURN(vap
, va_uid
, nuid
);
2552 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
2553 if (vfs_context_issuser(ctx
) && VATTR_IS_SUPPORTED(vap
, va_gid
)) {
2555 } else if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
2556 ngid
= vp
->v_mount
->mnt_fsgroup
;
2557 if (ngid
== KAUTH_GID_NONE
)
2559 } else if (VATTR_IS_SUPPORTED(vap
, va_gid
)) {
2562 /* this will always be something sensible */
2563 ngid
= vp
->v_mount
->mnt_fsgroup
;
2565 if ((ngid
== 99) && !vfs_context_issuser(ctx
))
2566 ngid
= kauth_cred_getgid(vfs_context_ucred(ctx
));
2567 VATTR_RETURN(vap
, va_gid
, ngid
);
2571 * Synthesise some values that can be reasonably guessed.
2573 if (!VATTR_IS_SUPPORTED(vap
, va_iosize
))
2574 VATTR_RETURN(vap
, va_iosize
, vp
->v_mount
->mnt_vfsstat
.f_iosize
);
2576 if (!VATTR_IS_SUPPORTED(vap
, va_flags
))
2577 VATTR_RETURN(vap
, va_flags
, 0);
2579 if (!VATTR_IS_SUPPORTED(vap
, va_filerev
))
2580 VATTR_RETURN(vap
, va_filerev
, 0);
2582 if (!VATTR_IS_SUPPORTED(vap
, va_gen
))
2583 VATTR_RETURN(vap
, va_gen
, 0);
2586 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2588 if (!VATTR_IS_SUPPORTED(vap
, va_data_size
))
2589 VATTR_RETURN(vap
, va_data_size
, 0);
2591 /* do we want any of the possibly-computed values? */
2592 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
2593 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
2594 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
2595 /* make sure f_bsize is valid */
2596 if (vp
->v_mount
->mnt_vfsstat
.f_bsize
== 0) {
2597 if ((error
= vfs_update_vfsstat(vp
->v_mount
, ctx
, VFS_KERNEL_EVENT
)) != 0)
2601 /* default va_data_alloc from va_data_size */
2602 if (!VATTR_IS_SUPPORTED(vap
, va_data_alloc
))
2603 VATTR_RETURN(vap
, va_data_alloc
, roundup(vap
->va_data_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
2605 /* default va_total_size from va_data_size */
2606 if (!VATTR_IS_SUPPORTED(vap
, va_total_size
))
2607 VATTR_RETURN(vap
, va_total_size
, vap
->va_data_size
);
2609 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2610 if (!VATTR_IS_SUPPORTED(vap
, va_total_alloc
))
2611 VATTR_RETURN(vap
, va_total_alloc
, roundup(vap
->va_total_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
2615 * If we don't have a change time, pull it from the modtime.
2617 if (!VATTR_IS_SUPPORTED(vap
, va_change_time
) && VATTR_IS_SUPPORTED(vap
, va_modify_time
))
2618 VATTR_RETURN(vap
, va_change_time
, vap
->va_modify_time
);
2621 * This is really only supported for the creation VNOPs, but since the field is there
2622 * we should populate it correctly.
2624 VATTR_RETURN(vap
, va_type
, vp
->v_type
);
2627 * The fsid can be obtained from the mountpoint directly.
2629 VATTR_RETURN(vap
, va_fsid
, vp
->v_mount
->mnt_vfsstat
.f_fsid
.val
[0]);
2637 * Set the attributes on a vnode in a vnode context.
2639 * Parameters: vp The vnode whose attributes to set.
2640 * vap A pointer to the attributes to set.
2641 * ctx The vnode context in which the
2642 * operation is to be attempted.
2644 * Returns: 0 Success
2647 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2649 * The contents of the data area pointed to by 'vap' may be
2650 * modified if the vnode is on a filesystem which has been
2651 * mounted with ingore ownership flags, or by the underlyng
2652 * VFS itself, or by the fallback code, if the underlying VFS
2653 * does not support ACL, UUID, or GUUID attributes directly.
2655 * XXX: We should enummerate the possible errno values here, and where
2656 * in the code they originated.
2659 vnode_setattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2661 int error
, is_perm_change
=0;
2664 * Make sure the filesystem is mounted R/W.
2665 * If not, return an error.
2667 if (vfs_isrdonly(vp
->v_mount
)) {
2672 /* For streams, va_data_size is the only setable attribute. */
2673 if ((vp
->v_flag
& VISNAMEDSTREAM
) && (vap
->va_active
!= VNODE_ATTR_va_data_size
)) {
2680 * If ownership is being ignored on this volume, we silently discard
2681 * ownership changes.
2683 if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
2684 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
2685 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
2688 if ( VATTR_IS_ACTIVE(vap
, va_uid
) || VATTR_IS_ACTIVE(vap
, va_gid
)
2689 || VATTR_IS_ACTIVE(vap
, va_mode
) || VATTR_IS_ACTIVE(vap
, va_acl
)) {
2694 * Make sure that extended security is enabled if we're going to try
2697 if (!vfs_extendedsecurity(vnode_mount(vp
)) &&
2698 (VATTR_IS_ACTIVE(vap
, va_acl
) || VATTR_IS_ACTIVE(vap
, va_uuuid
) || VATTR_IS_ACTIVE(vap
, va_guuid
))) {
2699 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2704 error
= VNOP_SETATTR(vp
, vap
, ctx
);
2706 if ((error
== 0) && !VATTR_ALL_SUPPORTED(vap
))
2707 error
= vnode_setattr_fallback(vp
, vap
, ctx
);
2710 // only send a stat_changed event if this is more than
2711 // just an access or backup time update
2712 if (error
== 0 && (vap
->va_active
!= VNODE_ATTR_BIT(va_access_time
)) && (vap
->va_active
!= VNODE_ATTR_BIT(va_backup_time
))) {
2713 if (is_perm_change
) {
2714 if (need_fsevent(FSE_CHOWN
, vp
)) {
2715 add_fsevent(FSE_CHOWN
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
2717 } else if(need_fsevent(FSE_STAT_CHANGED
, vp
)) {
2718 add_fsevent(FSE_STAT_CHANGED
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
2728 * Fallback for setting the attributes on a vnode in a vnode context. This
2729 * Function will attempt to store ACL, UUID, and GUID information utilizing
2730 * a read/modify/write operation against an EA used as a backing store for
2733 * Parameters: vp The vnode whose attributes to set.
2734 * vap A pointer to the attributes to set.
2735 * ctx The vnode context in which the
2736 * operation is to be attempted.
2738 * Returns: 0 Success
2741 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2742 * as are the fsec and lfsec, if they are used.
2744 * The contents of the data area pointed to by 'vap' may be
2745 * modified to indicate that the attribute is supported for
2746 * any given requested attribute.
2748 * XXX: We should enummerate the possible errno values here, and where
2749 * in the code they originated.
2752 vnode_setattr_fallback(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2754 kauth_filesec_t fsec
;
2756 struct kauth_filesec lfsec
;
2762 * Extended security fallback via extended attributes.
2764 * Note that we do not free the filesec; the caller is expected to
2767 if (VATTR_NOT_RETURNED(vap
, va_acl
) ||
2768 VATTR_NOT_RETURNED(vap
, va_uuuid
) ||
2769 VATTR_NOT_RETURNED(vap
, va_guuid
)) {
2770 VFS_DEBUG(ctx
, vp
, "SETATTR - doing filesec fallback");
2773 * Fail for file types that we don't permit extended security
2776 if ((vp
->v_type
!= VDIR
) && (vp
->v_type
!= VLNK
) && (vp
->v_type
!= VREG
)) {
2777 VFS_DEBUG(ctx
, vp
, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp
));
2783 * If we don't have all the extended security items, we need
2784 * to fetch the existing data to perform a read-modify-write
2788 if (!VATTR_IS_ACTIVE(vap
, va_acl
) ||
2789 !VATTR_IS_ACTIVE(vap
, va_uuuid
) ||
2790 !VATTR_IS_ACTIVE(vap
, va_guuid
)) {
2791 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0) {
2792 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error
);
2796 /* if we didn't get a filesec, use our local one */
2798 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2801 KAUTH_DEBUG("SETATTR - updating existing filesec");
2804 facl
= &fsec
->fsec_acl
;
2806 /* if we're using the local filesec, we need to initialise it */
2807 if (fsec
== &lfsec
) {
2808 fsec
->fsec_magic
= KAUTH_FILESEC_MAGIC
;
2809 fsec
->fsec_owner
= kauth_null_guid
;
2810 fsec
->fsec_group
= kauth_null_guid
;
2811 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
2812 facl
->acl_flags
= 0;
2816 * Update with the supplied attributes.
2818 if (VATTR_IS_ACTIVE(vap
, va_uuuid
)) {
2819 KAUTH_DEBUG("SETATTR - updating owner UUID");
2820 fsec
->fsec_owner
= vap
->va_uuuid
;
2821 VATTR_SET_SUPPORTED(vap
, va_uuuid
);
2823 if (VATTR_IS_ACTIVE(vap
, va_guuid
)) {
2824 KAUTH_DEBUG("SETATTR - updating group UUID");
2825 fsec
->fsec_group
= vap
->va_guuid
;
2826 VATTR_SET_SUPPORTED(vap
, va_guuid
);
2828 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
2829 if (vap
->va_acl
== NULL
) {
2830 KAUTH_DEBUG("SETATTR - removing ACL");
2831 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
2833 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap
->va_acl
->acl_entrycount
);
2836 VATTR_SET_SUPPORTED(vap
, va_acl
);
2840 * If the filesec data is all invalid, we can just remove
2841 * the EA completely.
2843 if ((facl
->acl_entrycount
== KAUTH_FILESEC_NOACL
) &&
2844 kauth_guid_equal(&fsec
->fsec_owner
, &kauth_null_guid
) &&
2845 kauth_guid_equal(&fsec
->fsec_group
, &kauth_null_guid
)) {
2846 error
= vn_removexattr(vp
, KAUTH_FILESEC_XATTR
, XATTR_NOSECURITY
, ctx
);
2847 /* no attribute is ok, nothing to delete */
2848 if (error
== ENOATTR
)
2850 VFS_DEBUG(ctx
, vp
, "SETATTR - remove filesec returning %d", error
);
2853 error
= vnode_set_filesec(vp
, fsec
, facl
, ctx
);
2854 VFS_DEBUG(ctx
, vp
, "SETATTR - update filesec returning %d", error
);
2857 /* if we fetched a filesec, dispose of the buffer */
2859 kauth_filesec_free(fsec
);
2867 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
2871 vnode_notify(vnode_t vp
, uint32_t events
, struct vnode_attr
*vap
)
2873 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
2874 uint32_t knote_mask
= (VNODE_EVENT_WRITE
| VNODE_EVENT_DELETE
| VNODE_EVENT_RENAME
2875 | VNODE_EVENT_LINK
| VNODE_EVENT_EXTEND
| VNODE_EVENT_ATTRIB
);
2876 uint32_t dir_contents_mask
= (VNODE_EVENT_DIR_CREATED
| VNODE_EVENT_FILE_CREATED
2877 | VNODE_EVENT_DIR_REMOVED
| VNODE_EVENT_FILE_REMOVED
);
2878 uint32_t knote_events
= (events
& knote_mask
);
2880 /* Permissions are not explicitly part of the kqueue model */
2881 if (events
& VNODE_EVENT_PERMS
) {
2882 knote_events
|= NOTE_ATTRIB
;
2885 /* Directory contents information just becomes NOTE_WRITE */
2886 if ((vnode_isdir(vp
)) && (events
& dir_contents_mask
)) {
2887 knote_events
|= NOTE_WRITE
;
2891 lock_vnode_and_post(vp
, knote_events
);
2894 create_fsevent_from_kevent(vp
, events
, vap
);
2907 vnode_isdyldsharedcache(vnode_t vp
)
2909 return ((vp
->v_flag
& VSHARED_DYLD
) ? 1 : 0);
2914 * For a filesystem that isn't tracking its own vnode watchers:
2915 * check whether a vnode is being monitored.
2918 vnode_ismonitored(vnode_t vp
) {
2919 return (vp
->v_knotes
.slh_first
!= NULL
);
2923 * Initialize a struct vnode_attr and activate the attributes required
2924 * by the vnode_notify() call.
2927 vfs_get_notify_attributes(struct vnode_attr
*vap
)
2930 vap
->va_active
= VNODE_NOTIFY_ATTRS
;
2936 vfs_settriggercallback(fsid_t
*fsid
, vfs_trigger_callback_t vtc
, void *data
, uint32_t flags __unused
, vfs_context_t ctx
)
2941 mp
= mount_list_lookupby_fsid(fsid
, 0 /* locked */, 1 /* withref */);
2946 error
= vfs_busy(mp
, LK_NOWAIT
);
2954 if (mp
->mnt_triggercallback
!= NULL
) {
2960 mp
->mnt_triggercallback
= vtc
;
2961 mp
->mnt_triggerdata
= data
;
2964 mp
->mnt_triggercallback(mp
, VTC_REPLACE
, data
, ctx
);
2970 #endif /* CONFIG_TRIGGERS */
2973 * Definition of vnode operations.
2979 *#% lookup dvp L ? ?
2980 *#% lookup vpp - L -
2982 struct vnop_lookup_args
{
2983 struct vnodeop_desc
*a_desc
;
2986 struct componentname
*a_cnp
;
2987 vfs_context_t a_context
;
2992 * Returns: 0 Success
2993 * lock_fsnode:ENOENT No such file or directory [only for VFS
2994 * that is not thread safe & vnode is
2995 * currently being/has been terminated]
2996 * <vfs_lookup>:ENAMETOOLONG
2997 * <vfs_lookup>:ENOENT
2998 * <vfs_lookup>:EJUSTRETURN
2999 * <vfs_lookup>:EPERM
3000 * <vfs_lookup>:EISDIR
3001 * <vfs_lookup>:ENOTDIR
3004 * Note: The return codes from the underlying VFS's lookup routine can't
3005 * be fully enumerated here, since third party VFS authors may not
3006 * limit their error returns to the ones documented here, even
3007 * though this may result in some programs functioning incorrectly.
3009 * The return codes documented above are those which may currently
3010 * be returned by HFS from hfs_lookup, not including additional
3011 * error code which may be propagated from underlying routines.
3014 VNOP_LOOKUP(vnode_t dvp
, vnode_t
*vpp
, struct componentname
*cnp
, vfs_context_t ctx
)
3017 struct vnop_lookup_args a
;
3021 int funnel_state
= 0;
3022 #endif /* __LP64__ */
3024 a
.a_desc
= &vnop_lookup_desc
;
3031 thread_safe
= THREAD_SAFE_FS(dvp
);
3033 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3037 #endif /* __LP64__ */
3039 _err
= (*dvp
->v_op
[vnop_lookup_desc
.vdesc_offset
])(&a
);
3045 if ( (cnp
->cn_flags
& ISLASTCN
) ) {
3046 if ( (cnp
->cn_flags
& LOCKPARENT
) ) {
3047 if ( !(cnp
->cn_flags
& FSNODELOCKHELD
) ) {
3049 * leave the fsnode lock held on
3050 * the directory, but restore the funnel...
3051 * also indicate that we need to drop the
3052 * fsnode_lock when we're done with the
3053 * system call processing for this path
3055 cnp
->cn_flags
|= FSNODELOCKHELD
;
3057 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3062 unlock_fsnode(dvp
, &funnel_state
);
3064 #endif /* __LP64__ */
3070 struct vnop_compound_open_args
{
3071 struct vnodeop_desc
*a_desc
;
3074 struct componentname
*a_cnp
;
3077 struct vnode_attr
*a_vap
;
3078 vfs_context_t a_context
;
3084 VNOP_COMPOUND_OPEN(vnode_t dvp
, vnode_t
*vpp
, struct nameidata
*ndp
, int32_t flags
, int32_t fmode
, uint32_t *statusp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
3087 struct vnop_compound_open_args a
;
3090 uint32_t tmp_status
= 0;
3091 struct componentname
*cnp
= &ndp
->ni_cnd
;
3093 want_create
= (flags
& VNOP_COMPOUND_OPEN_DO_CREATE
);
3095 a
.a_desc
= &vnop_compound_open_desc
;
3097 a
.a_vpp
= vpp
; /* Could be NULL */
3101 a
.a_status
= (statusp
!= NULL
) ? statusp
: &tmp_status
;
3104 a
.a_open_create_authorizer
= vn_authorize_create
;
3105 a
.a_open_existing_authorizer
= vn_authorize_open_existing
;
3106 a
.a_reserved
= NULL
;
3108 if (dvp
== NULLVP
) {
3111 if (want_create
&& !vap
) {
3112 panic("Want create, but no vap?");
3114 if (!want_create
&& vap
) {
3115 panic("Don't want create, but have a vap?");
3118 _err
= (*dvp
->v_op
[vnop_compound_open_desc
.vdesc_offset
])(&a
);
3120 did_create
= (*a
.a_status
& COMPOUND_OPEN_STATUS_DID_CREATE
);
3122 if (did_create
&& !want_create
) {
3123 panic("Filesystem did a create, even though none was requested?");
3127 if (!NATIVE_XATTR(dvp
)) {
3129 * Remove stale Apple Double file (if any).
3131 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 0);
3134 /* On create, provide kqueue notification */
3135 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
3138 lookup_compound_vnop_post_hook(_err
, dvp
, *vpp
, ndp
, did_create
);
3139 #if 0 /* FSEvents... */
3140 if (*vpp
&& _err
&& _err
!= EKEEPLOOKING
) {
3151 struct vnop_create_args
{
3152 struct vnodeop_desc
*a_desc
;
3155 struct componentname
*a_cnp
;
3156 struct vnode_attr
*a_vap
;
3157 vfs_context_t a_context
;
3161 VNOP_CREATE(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
3164 struct vnop_create_args a
;
3167 int funnel_state
= 0;
3168 #endif /* __LP64__ */
3170 a
.a_desc
= &vnop_create_desc
;
3178 thread_safe
= THREAD_SAFE_FS(dvp
);
3180 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3184 #endif /* __LP64__ */
3186 _err
= (*dvp
->v_op
[vnop_create_desc
.vdesc_offset
])(&a
);
3187 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
3189 * Remove stale Apple Double file (if any).
3191 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 0);
3196 unlock_fsnode(dvp
, &funnel_state
);
3198 #endif /* __LP64__ */
3200 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
3208 *#% whiteout dvp L L L
3209 *#% whiteout cnp - - -
3210 *#% whiteout flag - - -
3213 struct vnop_whiteout_args
{
3214 struct vnodeop_desc
*a_desc
;
3216 struct componentname
*a_cnp
;
3218 vfs_context_t a_context
;
3222 VNOP_WHITEOUT(vnode_t dvp
, struct componentname
* cnp
, int flags
, vfs_context_t ctx
)
3225 struct vnop_whiteout_args a
;
3228 int funnel_state
= 0;
3229 #endif /* __LP64__ */
3231 a
.a_desc
= &vnop_whiteout_desc
;
3238 thread_safe
= THREAD_SAFE_FS(dvp
);
3240 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3244 #endif /* __LP64__ */
3246 _err
= (*dvp
->v_op
[vnop_whiteout_desc
.vdesc_offset
])(&a
);
3250 unlock_fsnode(dvp
, &funnel_state
);
3252 #endif /* __LP64__ */
3254 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
3266 struct vnop_mknod_args
{
3267 struct vnodeop_desc
*a_desc
;
3270 struct componentname
*a_cnp
;
3271 struct vnode_attr
*a_vap
;
3272 vfs_context_t a_context
;
3276 VNOP_MKNOD(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
3280 struct vnop_mknod_args a
;
3283 int funnel_state
= 0;
3284 #endif /* __LP64__ */
3286 a
.a_desc
= &vnop_mknod_desc
;
3294 thread_safe
= THREAD_SAFE_FS(dvp
);
3296 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3300 #endif /* __LP64__ */
3302 _err
= (*dvp
->v_op
[vnop_mknod_desc
.vdesc_offset
])(&a
);
3306 unlock_fsnode(dvp
, &funnel_state
);
3308 #endif /* __LP64__ */
3310 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
3321 struct vnop_open_args
{
3322 struct vnodeop_desc
*a_desc
;
3325 vfs_context_t a_context
;
3329 VNOP_OPEN(vnode_t vp
, int mode
, vfs_context_t ctx
)
3332 struct vnop_open_args a
;
3335 int funnel_state
= 0;
3336 #endif /* __LP64__ */
3339 ctx
= vfs_context_current();
3341 a
.a_desc
= &vnop_open_desc
;
3347 thread_safe
= THREAD_SAFE_FS(vp
);
3349 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3350 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3351 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3352 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3357 #endif /* __LP64__ */
3359 _err
= (*vp
->v_op
[vnop_open_desc
.vdesc_offset
])(&a
);
3363 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3364 unlock_fsnode(vp
, NULL
);
3366 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3368 #endif /* __LP64__ */
3379 struct vnop_close_args
{
3380 struct vnodeop_desc
*a_desc
;
3383 vfs_context_t a_context
;
3387 VNOP_CLOSE(vnode_t vp
, int fflag
, vfs_context_t ctx
)
3390 struct vnop_close_args a
;
3393 int funnel_state
= 0;
3394 #endif /* __LP64__ */
3397 ctx
= vfs_context_current();
3399 a
.a_desc
= &vnop_close_desc
;
3405 thread_safe
= THREAD_SAFE_FS(vp
);
3407 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3408 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3409 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3410 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3415 #endif /* __LP64__ */
3417 _err
= (*vp
->v_op
[vnop_close_desc
.vdesc_offset
])(&a
);
3421 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3422 unlock_fsnode(vp
, NULL
);
3424 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3426 #endif /* __LP64__ */
3437 struct vnop_access_args
{
3438 struct vnodeop_desc
*a_desc
;
3441 vfs_context_t a_context
;
3445 VNOP_ACCESS(vnode_t vp
, int action
, vfs_context_t ctx
)
3448 struct vnop_access_args a
;
3451 int funnel_state
= 0;
3452 #endif /* __LP64__ */
3455 ctx
= vfs_context_current();
3457 a
.a_desc
= &vnop_access_desc
;
3459 a
.a_action
= action
;
3463 thread_safe
= THREAD_SAFE_FS(vp
);
3465 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3469 #endif /* __LP64__ */
3471 _err
= (*vp
->v_op
[vnop_access_desc
.vdesc_offset
])(&a
);
3475 unlock_fsnode(vp
, &funnel_state
);
3477 #endif /* __LP64__ */
3485 *#% getattr vp = = =
3488 struct vnop_getattr_args
{
3489 struct vnodeop_desc
*a_desc
;
3491 struct vnode_attr
*a_vap
;
3492 vfs_context_t a_context
;
3496 VNOP_GETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
3499 struct vnop_getattr_args a
;
3502 int funnel_state
= 0;
3503 #endif /* __LP64__ */
3505 a
.a_desc
= &vnop_getattr_desc
;
3511 thread_safe
= THREAD_SAFE_FS(vp
);
3513 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3517 #endif /* __LP64__ */
3519 _err
= (*vp
->v_op
[vnop_getattr_desc
.vdesc_offset
])(&a
);
3523 unlock_fsnode(vp
, &funnel_state
);
3525 #endif /* __LP64__ */
3533 *#% setattr vp L L L
3536 struct vnop_setattr_args
{
3537 struct vnodeop_desc
*a_desc
;
3539 struct vnode_attr
*a_vap
;
3540 vfs_context_t a_context
;
3544 VNOP_SETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
3547 struct vnop_setattr_args a
;
3550 int funnel_state
= 0;
3551 #endif /* __LP64__ */
3553 a
.a_desc
= &vnop_setattr_desc
;
3559 thread_safe
= THREAD_SAFE_FS(vp
);
3561 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3565 #endif /* __LP64__ */
3567 _err
= (*vp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
3570 * Shadow uid/gid/mod change to extended attribute file.
3572 if (_err
== 0 && !NATIVE_XATTR(vp
)) {
3573 struct vnode_attr va
;
3577 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
3578 VATTR_SET(&va
, va_uid
, vap
->va_uid
);
3581 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
3582 VATTR_SET(&va
, va_gid
, vap
->va_gid
);
3585 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
3586 VATTR_SET(&va
, va_mode
, vap
->va_mode
);
3593 dvp
= vnode_getparent(vp
);
3594 vname
= vnode_getname(vp
);
3596 xattrfile_setattr(dvp
, vname
, &va
, ctx
);
3600 vnode_putname(vname
);
3606 unlock_fsnode(vp
, &funnel_state
);
3608 #endif /* __LP64__ */
3611 * If we have changed any of the things about the file that are likely
3612 * to result in changes to authorization results, blow the vnode auth
3616 VATTR_IS_SUPPORTED(vap
, va_mode
) ||
3617 VATTR_IS_SUPPORTED(vap
, va_uid
) ||
3618 VATTR_IS_SUPPORTED(vap
, va_gid
) ||
3619 VATTR_IS_SUPPORTED(vap
, va_flags
) ||
3620 VATTR_IS_SUPPORTED(vap
, va_acl
) ||
3621 VATTR_IS_SUPPORTED(vap
, va_uuuid
) ||
3622 VATTR_IS_SUPPORTED(vap
, va_guuid
))) {
3623 vnode_uncache_authorized_action(vp
, KAUTH_INVALIDATE_CACHED_RIGHTS
);
3626 if (vfs_authopaque(vp
->v_mount
) && vnode_hasnamedstreams(vp
)) {
3628 if (vnode_getnamedstream(vp
, &svp
, XATTR_RESOURCEFORK_NAME
, NS_OPEN
, 0, ctx
) == 0) {
3629 vnode_uncache_authorized_action(svp
, KAUTH_INVALIDATE_CACHED_RIGHTS
);
3633 #endif /* NAMEDSTREAMS */
3637 post_event_if_success(vp
, _err
, NOTE_ATTRIB
);
3649 struct vnop_read_args
{
3650 struct vnodeop_desc
*a_desc
;
3654 vfs_context_t a_context
;
3658 VNOP_READ(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t ctx
)
3661 struct vnop_read_args a
;
3664 int funnel_state
= 0;
3665 #endif /* __LP64__ */
3668 ctx
= vfs_context_current();
3671 a
.a_desc
= &vnop_read_desc
;
3674 a
.a_ioflag
= ioflag
;
3678 thread_safe
= THREAD_SAFE_FS(vp
);
3680 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3681 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3682 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3683 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3688 #endif /* __LP64__ */
3690 _err
= (*vp
->v_op
[vnop_read_desc
.vdesc_offset
])(&a
);
3694 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3695 unlock_fsnode(vp
, NULL
);
3697 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3699 #endif /* __LP64__ */
3711 struct vnop_write_args
{
3712 struct vnodeop_desc
*a_desc
;
3716 vfs_context_t a_context
;
3720 VNOP_WRITE(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t ctx
)
3722 struct vnop_write_args a
;
3726 int funnel_state
= 0;
3727 #endif /* __LP64__ */
3730 ctx
= vfs_context_current();
3733 a
.a_desc
= &vnop_write_desc
;
3736 a
.a_ioflag
= ioflag
;
3740 thread_safe
= THREAD_SAFE_FS(vp
);
3742 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3743 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3744 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3745 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3750 #endif /* __LP64__ */
3752 _err
= (*vp
->v_op
[vnop_write_desc
.vdesc_offset
])(&a
);
3756 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3757 unlock_fsnode(vp
, NULL
);
3759 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3761 #endif /* __LP64__ */
3763 post_event_if_success(vp
, _err
, NOTE_WRITE
);
3775 struct vnop_ioctl_args
{
3776 struct vnodeop_desc
*a_desc
;
3781 vfs_context_t a_context
;
3785 VNOP_IOCTL(vnode_t vp
, u_long command
, caddr_t data
, int fflag
, vfs_context_t ctx
)
3788 struct vnop_ioctl_args a
;
3791 int funnel_state
= 0;
3792 #endif /* __LP64__ */
3795 ctx
= vfs_context_current();
3799 * This check should probably have been put in the TTY code instead...
3801 * We have to be careful about what we assume during startup and shutdown.
3802 * We have to be able to use the root filesystem's device vnode even when
3803 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3804 * structure. If there is no data pointer, it doesn't matter whether
3805 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZECACHE)
3806 * which passes NULL for its data pointer can therefore be used during
3807 * mount or unmount of the root filesystem.
3809 * Depending on what root filesystems need to do during mount/unmount, we
3810 * may need to loosen this check again in the future.
3812 if (vfs_context_is64bit(ctx
) && !(vnode_ischr(vp
) || vnode_isblk(vp
))) {
3813 if (data
!= NULL
&& !vnode_vfs64bitready(vp
)) {
3818 a
.a_desc
= &vnop_ioctl_desc
;
3820 a
.a_command
= command
;
3826 thread_safe
= THREAD_SAFE_FS(vp
);
3828 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3829 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3830 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3831 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3836 #endif /* __LP64__ */
3838 _err
= (*vp
->v_op
[vnop_ioctl_desc
.vdesc_offset
])(&a
);
3842 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3843 unlock_fsnode(vp
, NULL
);
3845 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3847 #endif /* __LP64__ */
3859 struct vnop_select_args
{
3860 struct vnodeop_desc
*a_desc
;
3865 vfs_context_t a_context
;
3869 VNOP_SELECT(vnode_t vp
, int which
, int fflags
, void * wql
, vfs_context_t ctx
)
3872 struct vnop_select_args a
;
3875 int funnel_state
= 0;
3876 #endif /* __LP64__ */
3879 ctx
= vfs_context_current();
3881 a
.a_desc
= &vnop_select_desc
;
3884 a
.a_fflags
= fflags
;
3889 thread_safe
= THREAD_SAFE_FS(vp
);
3891 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3892 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3893 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3894 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3899 #endif /* __LP64__ */
3901 _err
= (*vp
->v_op
[vnop_select_desc
.vdesc_offset
])(&a
);
3905 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3906 unlock_fsnode(vp
, NULL
);
3908 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3910 #endif /* __LP64__ */
3919 *#% exchange fvp L L L
3920 *#% exchange tvp L L L
3923 struct vnop_exchange_args
{
3924 struct vnodeop_desc
*a_desc
;
3928 vfs_context_t a_context
;
3932 VNOP_EXCHANGE(vnode_t fvp
, vnode_t tvp
, int options
, vfs_context_t ctx
)
3935 struct vnop_exchange_args a
;
3938 int funnel_state
= 0;
3939 vnode_t lock_first
= NULL
, lock_second
= NULL
;
3940 #endif /* __LP64__ */
3942 a
.a_desc
= &vnop_exchange_desc
;
3945 a
.a_options
= options
;
3949 thread_safe
= THREAD_SAFE_FS(fvp
);
3952 * Lock in vnode address order to avoid deadlocks
3961 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) ) {
3964 if ( (_err
= lock_fsnode(lock_second
, NULL
)) ) {
3965 unlock_fsnode(lock_first
, &funnel_state
);
3969 #endif /* __LP64__ */
3971 _err
= (*fvp
->v_op
[vnop_exchange_desc
.vdesc_offset
])(&a
);
3975 unlock_fsnode(lock_second
, NULL
);
3976 unlock_fsnode(lock_first
, &funnel_state
);
3978 #endif /* __LP64__ */
3980 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
3981 post_event_if_success(fvp
, _err
, NOTE_ATTRIB
);
3982 post_event_if_success(tvp
, _err
, NOTE_ATTRIB
);
3994 struct vnop_revoke_args
{
3995 struct vnodeop_desc
*a_desc
;
3998 vfs_context_t a_context
;
4002 VNOP_REVOKE(vnode_t vp
, int flags
, vfs_context_t ctx
)
4004 struct vnop_revoke_args a
;
4008 int funnel_state
= 0;
4009 #endif /* __LP64__ */
4011 a
.a_desc
= &vnop_revoke_desc
;
4017 thread_safe
= THREAD_SAFE_FS(vp
);
4019 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4021 #endif /* __LP64__ */
4023 _err
= (*vp
->v_op
[vnop_revoke_desc
.vdesc_offset
])(&a
);
4027 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4029 #endif /* __LP64__ */
4041 struct vnop_mmap_args
{
4042 struct vnodeop_desc
*a_desc
;
4045 vfs_context_t a_context
;
4049 VNOP_MMAP(vnode_t vp
, int fflags
, vfs_context_t ctx
)
4052 struct vnop_mmap_args a
;
4055 int funnel_state
= 0;
4056 #endif /* __LP64__ */
4058 a
.a_desc
= &vnop_mmap_desc
;
4060 a
.a_fflags
= fflags
;
4064 thread_safe
= THREAD_SAFE_FS(vp
);
4066 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4070 #endif /* __LP64__ */
4072 _err
= (*vp
->v_op
[vnop_mmap_desc
.vdesc_offset
])(&a
);
4076 unlock_fsnode(vp
, &funnel_state
);
4078 #endif /* __LP64__ */
4087 *# mnomap - vp U U U
4090 struct vnop_mnomap_args
{
4091 struct vnodeop_desc
*a_desc
;
4093 vfs_context_t a_context
;
4097 VNOP_MNOMAP(vnode_t vp
, vfs_context_t ctx
)
4100 struct vnop_mnomap_args a
;
4103 int funnel_state
= 0;
4104 #endif /* __LP64__ */
4106 a
.a_desc
= &vnop_mnomap_desc
;
4111 thread_safe
= THREAD_SAFE_FS(vp
);
4113 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4117 #endif /* __LP64__ */
4119 _err
= (*vp
->v_op
[vnop_mnomap_desc
.vdesc_offset
])(&a
);
4123 unlock_fsnode(vp
, &funnel_state
);
4125 #endif /* __LP64__ */
4137 struct vnop_fsync_args
{
4138 struct vnodeop_desc
*a_desc
;
4141 vfs_context_t a_context
;
4145 VNOP_FSYNC(vnode_t vp
, int waitfor
, vfs_context_t ctx
)
4147 struct vnop_fsync_args a
;
4151 int funnel_state
= 0;
4152 #endif /* __LP64__ */
4154 a
.a_desc
= &vnop_fsync_desc
;
4156 a
.a_waitfor
= waitfor
;
4160 thread_safe
= THREAD_SAFE_FS(vp
);
4162 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4166 #endif /* __LP64__ */
4168 _err
= (*vp
->v_op
[vnop_fsync_desc
.vdesc_offset
])(&a
);
4172 unlock_fsnode(vp
, &funnel_state
);
4174 #endif /* __LP64__ */
4183 *#% remove dvp L U U
4187 struct vnop_remove_args
{
4188 struct vnodeop_desc
*a_desc
;
4191 struct componentname
*a_cnp
;
4193 vfs_context_t a_context
;
4197 VNOP_REMOVE(vnode_t dvp
, vnode_t vp
, struct componentname
* cnp
, int flags
, vfs_context_t ctx
)
4200 struct vnop_remove_args a
;
4203 int funnel_state
= 0;
4204 #endif /* __LP64__ */
4206 a
.a_desc
= &vnop_remove_desc
;
4214 thread_safe
= THREAD_SAFE_FS(dvp
);
4216 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4220 #endif /* __LP64__ */
4222 _err
= (*dvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&a
);
4225 vnode_setneedinactive(vp
);
4227 if ( !(NATIVE_XATTR(dvp
)) ) {
4229 * Remove any associated extended attribute file (._ AppleDouble file).
4231 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 1);
4237 unlock_fsnode(vp
, &funnel_state
);
4239 #endif /* __LP64__ */
4241 post_event_if_success(vp
, _err
, NOTE_DELETE
| NOTE_LINK
);
4242 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
4248 VNOP_COMPOUND_REMOVE(vnode_t dvp
, vnode_t
*vpp
, struct nameidata
*ndp
, int32_t flags
, struct vnode_attr
*vap
, vfs_context_t ctx
)
4251 struct vnop_compound_remove_args a
;
4252 int no_vp
= (*vpp
== NULLVP
);
4254 a
.a_desc
= &vnop_compound_remove_desc
;
4257 a
.a_cnp
= &ndp
->ni_cnd
;
4261 a
.a_remove_authorizer
= vn_authorize_unlink
;
4263 _err
= (*dvp
->v_op
[vnop_compound_remove_desc
.vdesc_offset
])(&a
);
4265 vnode_setneedinactive(*vpp
);
4267 if ( !(NATIVE_XATTR(dvp
)) ) {
4269 * Remove any associated extended attribute file (._ AppleDouble file).
4271 xattrfile_remove(dvp
, ndp
->ni_cnd
.cn_nameptr
, ctx
, 1);
4275 post_event_if_success(*vpp
, _err
, NOTE_DELETE
| NOTE_LINK
);
4276 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
4279 lookup_compound_vnop_post_hook(_err
, dvp
, *vpp
, ndp
, 0);
4280 if (*vpp
&& _err
&& _err
!= EKEEPLOOKING
) {
4286 //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
4298 struct vnop_link_args
{
4299 struct vnodeop_desc
*a_desc
;
4302 struct componentname
*a_cnp
;
4303 vfs_context_t a_context
;
4307 VNOP_LINK(vnode_t vp
, vnode_t tdvp
, struct componentname
* cnp
, vfs_context_t ctx
)
4310 struct vnop_link_args a
;
4313 int funnel_state
= 0;
4314 #endif /* __LP64__ */
4317 * For file systems with non-native extended attributes,
4318 * disallow linking to an existing "._" Apple Double file.
4320 if ( !NATIVE_XATTR(tdvp
) && (vp
->v_type
== VREG
)) {
4323 vname
= vnode_getname(vp
);
4324 if (vname
!= NULL
) {
4326 if (vname
[0] == '.' && vname
[1] == '_' && vname
[2] != '\0') {
4329 vnode_putname(vname
);
4334 a
.a_desc
= &vnop_link_desc
;
4341 thread_safe
= THREAD_SAFE_FS(vp
);
4343 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4347 #endif /* __LP64__ */
4349 _err
= (*tdvp
->v_op
[vnop_link_desc
.vdesc_offset
])(&a
);
4353 unlock_fsnode(vp
, &funnel_state
);
4355 #endif /* __LP64__ */
4357 post_event_if_success(vp
, _err
, NOTE_LINK
);
4358 post_event_if_success(tdvp
, _err
, NOTE_WRITE
);
4364 vn_rename(struct vnode
*fdvp
, struct vnode
**fvpp
, struct componentname
*fcnp
, struct vnode_attr
*fvap
,
4365 struct vnode
*tdvp
, struct vnode
**tvpp
, struct componentname
*tcnp
, struct vnode_attr
*tvap
,
4366 uint32_t flags
, vfs_context_t ctx
)
4369 vnode_t src_attr_vp
= NULLVP
;
4370 vnode_t dst_attr_vp
= NULLVP
;
4371 struct nameidata fromnd
;
4372 struct nameidata tond
;
4373 char smallname1
[48];
4374 char smallname2
[48];
4375 char *xfromname
= NULL
;
4376 char *xtoname
= NULL
;
4379 batched
= vnode_compound_rename_available(fdvp
);
4382 vnode_t fdvp_unsafe
= (THREAD_SAFE_FS(fdvp
) ? NULLVP
: fdvp
);
4383 #endif /* __LP64__ */
4386 if (*fvpp
== NULLVP
)
4387 panic("Not batched, and no fvp?");
4391 * We need to preflight any potential AppleDouble file for the source file
4392 * before doing the rename operation, since we could potentially be doing
4393 * this operation on a network filesystem, and would end up duplicating
4394 * the work. Also, save the source and destination names. Skip it if the
4395 * source has a "._" prefix.
4398 if (!NATIVE_XATTR(fdvp
) &&
4399 !(fcnp
->cn_nameptr
[0] == '.' && fcnp
->cn_nameptr
[1] == '_')) {
4403 /* Get source attribute file name. */
4404 len
= fcnp
->cn_namelen
+ 3;
4405 if (len
> sizeof(smallname1
)) {
4406 MALLOC(xfromname
, char *, len
, M_TEMP
, M_WAITOK
);
4408 xfromname
= &smallname1
[0];
4410 strlcpy(xfromname
, "._", min(sizeof smallname1
, len
));
4411 strncat(xfromname
, fcnp
->cn_nameptr
, fcnp
->cn_namelen
);
4412 xfromname
[len
-1] = '\0';
4414 /* Get destination attribute file name. */
4415 len
= tcnp
->cn_namelen
+ 3;
4416 if (len
> sizeof(smallname2
)) {
4417 MALLOC(xtoname
, char *, len
, M_TEMP
, M_WAITOK
);
4419 xtoname
= &smallname2
[0];
4421 strlcpy(xtoname
, "._", min(sizeof smallname2
, len
));
4422 strncat(xtoname
, tcnp
->cn_nameptr
, tcnp
->cn_namelen
);
4423 xtoname
[len
-1] = '\0';
4426 * Look up source attribute file, keep reference on it if exists.
4427 * Note that we do the namei with the nameiop of RENAME, which is different than
4428 * in the rename syscall. It's OK if the source file does not exist, since this
4429 * is only for AppleDouble files.
4431 if (xfromname
!= NULL
) {
4432 NDINIT(&fromnd
, RENAME
, OP_RENAME
, NOFOLLOW
| USEDVP
| CN_NBMOUNTLOOK
,
4433 UIO_SYSSPACE
, CAST_USER_ADDR_T(xfromname
), ctx
);
4434 fromnd
.ni_dvp
= fdvp
;
4435 error
= namei(&fromnd
);
4438 * If there was an error looking up source attribute file,
4439 * we'll behave as if it didn't exist.
4444 /* src_attr_vp indicates need to call vnode_put / nameidone later */
4445 src_attr_vp
= fromnd
.ni_vp
;
4447 if (fromnd
.ni_vp
->v_type
!= VREG
) {
4448 src_attr_vp
= NULLVP
;
4449 vnode_put(fromnd
.ni_vp
);
4453 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4454 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4455 * have a vnode here, so we drop our namei buffer for the source attribute file
4457 if (src_attr_vp
== NULLVP
) {
4465 _err
= VNOP_COMPOUND_RENAME(fdvp
, fvpp
, fcnp
, fvap
, tdvp
, tvpp
, tcnp
, tvap
, flags
, ctx
);
4467 printf("VNOP_COMPOUND_RENAME() returned %d\n", _err
);
4471 _err
= VNOP_RENAME(fdvp
, *fvpp
, fcnp
, tdvp
, *tvpp
, tcnp
, ctx
);
4475 mac_vnode_notify_rename(ctx
, *fvpp
, tdvp
, tcnp
);
4479 * Rename any associated extended attribute file (._ AppleDouble file).
4481 if (_err
== 0 && !NATIVE_XATTR(fdvp
) && xfromname
!= NULL
) {
4485 * Get destination attribute file vnode.
4486 * Note that tdvp already has an iocount reference. Make sure to check that we
4487 * get a valid vnode from namei.
4489 NDINIT(&tond
, RENAME
, OP_RENAME
,
4490 NOCACHE
| NOFOLLOW
| USEDVP
| CN_NBMOUNTLOOK
, UIO_SYSSPACE
,
4491 CAST_USER_ADDR_T(xtoname
), ctx
);
4493 error
= namei(&tond
);
4499 dst_attr_vp
= tond
.ni_vp
;
4504 error
= VNOP_COMPOUND_RENAME(fdvp
, &src_attr_vp
, &fromnd
.ni_cnd
, NULL
,
4505 tdvp
, &dst_attr_vp
, &tond
.ni_cnd
, NULL
,
4508 error
= VNOP_RENAME(fdvp
, src_attr_vp
, &fromnd
.ni_cnd
,
4509 tdvp
, dst_attr_vp
, &tond
.ni_cnd
, ctx
);
4512 /* kevent notifications for moving resource files
4513 * _err is zero if we're here, so no need to notify directories, code
4514 * below will do that. only need to post the rename on the source and
4515 * possibly a delete on the dest
4517 post_event_if_success(src_attr_vp
, error
, NOTE_RENAME
);
4519 post_event_if_success(dst_attr_vp
, error
, NOTE_DELETE
);
4522 } else if (dst_attr_vp
) {
4524 * Just delete destination attribute file vnode if it exists, since
4525 * we didn't have a source attribute file.
4526 * Note that tdvp already has an iocount reference.
4529 struct vnop_remove_args args
;
4531 args
.a_desc
= &vnop_remove_desc
;
4533 args
.a_vp
= dst_attr_vp
;
4534 args
.a_cnp
= &tond
.ni_cnd
;
4535 args
.a_context
= ctx
;
4538 if (fdvp_unsafe
!= NULLVP
)
4539 error
= lock_fsnode(dst_attr_vp
, NULL
);
4540 #endif /* __LP64__ */
4542 error
= (*tdvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&args
);
4545 if (fdvp_unsafe
!= NULLVP
)
4546 unlock_fsnode(dst_attr_vp
, NULL
);
4547 #endif /* __LP64__ */
4550 vnode_setneedinactive(dst_attr_vp
);
4553 /* kevent notification for deleting the destination's attribute file
4554 * if it existed. Only need to post the delete on the destination, since
4555 * the code below will handle the directories.
4557 post_event_if_success(dst_attr_vp
, error
, NOTE_DELETE
);
4562 vnode_put(src_attr_vp
);
4566 vnode_put(dst_attr_vp
);
4570 if (xfromname
&& xfromname
!= &smallname1
[0]) {
4571 FREE(xfromname
, M_TEMP
);
4573 if (xtoname
&& xtoname
!= &smallname2
[0]) {
4574 FREE(xtoname
, M_TEMP
);
4584 *#% rename fdvp U U U
4585 *#% rename fvp U U U
4586 *#% rename tdvp L U U
4587 *#% rename tvp X U U
4590 struct vnop_rename_args
{
4591 struct vnodeop_desc
*a_desc
;
4594 struct componentname
*a_fcnp
;
4597 struct componentname
*a_tcnp
;
4598 vfs_context_t a_context
;
4602 VNOP_RENAME(struct vnode
*fdvp
, struct vnode
*fvp
, struct componentname
*fcnp
,
4603 struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
4608 struct vnop_rename_args a
;
4610 int funnel_state
= 0;
4611 vnode_t lock_first
= NULL
, lock_second
= NULL
;
4612 vnode_t fdvp_unsafe
= NULLVP
;
4613 vnode_t tdvp_unsafe
= NULLVP
;
4614 #endif /* __LP64__ */
4616 a
.a_desc
= &vnop_rename_desc
;
4626 if (!THREAD_SAFE_FS(fdvp
))
4628 if (!THREAD_SAFE_FS(tdvp
))
4631 if (fdvp_unsafe
!= NULLVP
) {
4633 * Lock parents in vnode address order to avoid deadlocks
4634 * note that it's possible for the fdvp to be unsafe,
4635 * but the tdvp to be safe because tvp could be a directory
4636 * in the root of a filesystem... in that case, tdvp is the
4637 * in the filesystem that this root is mounted on
4639 if (tdvp_unsafe
== NULL
|| fdvp_unsafe
== tdvp_unsafe
) {
4640 lock_first
= fdvp_unsafe
;
4642 } else if (fdvp_unsafe
< tdvp_unsafe
) {
4643 lock_first
= fdvp_unsafe
;
4644 lock_second
= tdvp_unsafe
;
4646 lock_first
= tdvp_unsafe
;
4647 lock_second
= fdvp_unsafe
;
4649 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) )
4652 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
4653 unlock_fsnode(lock_first
, &funnel_state
);
4658 * Lock both children in vnode address order to avoid deadlocks
4660 if (tvp
== NULL
|| tvp
== fvp
) {
4663 } else if (fvp
< tvp
) {
4670 if ( (_err
= lock_fsnode(lock_first
, NULL
)) )
4673 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
4674 unlock_fsnode(lock_first
, NULL
);
4678 #endif /* __LP64__ */
4680 /* do the rename of the main file. */
4681 _err
= (*fdvp
->v_op
[vnop_rename_desc
.vdesc_offset
])(&a
);
4684 if (fdvp_unsafe
!= NULLVP
) {
4685 if (lock_second
!= NULL
)
4686 unlock_fsnode(lock_second
, NULL
);
4687 unlock_fsnode(lock_first
, NULL
);
4689 #endif /* __LP64__ */
4692 if (tvp
&& tvp
!= fvp
)
4693 vnode_setneedinactive(tvp
);
4698 if (fdvp_unsafe
!= NULLVP
) {
4699 if (tdvp_unsafe
!= NULLVP
)
4700 unlock_fsnode(tdvp_unsafe
, NULL
);
4701 unlock_fsnode(fdvp_unsafe
, &funnel_state
);
4703 #endif /* __LP64__ */
4705 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4707 events
= NOTE_WRITE
;
4708 if (vnode_isdir(fvp
)) {
4709 /* Link count on dir changed only if we are moving a dir and...
4710 * --Moved to new dir, not overwriting there
4711 * --Kept in same dir and DID overwrite
4713 if (((fdvp
!= tdvp
) && (!tvp
)) || ((fdvp
== tdvp
) && (tvp
))) {
4714 events
|= NOTE_LINK
;
4718 lock_vnode_and_post(fdvp
, events
);
4720 lock_vnode_and_post(tdvp
, events
);
4723 /* If you're replacing the target, post a deletion for it */
4726 lock_vnode_and_post(tvp
, NOTE_DELETE
);
4729 lock_vnode_and_post(fvp
, NOTE_RENAME
);
4736 VNOP_COMPOUND_RENAME(
4737 struct vnode
*fdvp
, struct vnode
**fvpp
, struct componentname
*fcnp
, struct vnode_attr
*fvap
,
4738 struct vnode
*tdvp
, struct vnode
**tvpp
, struct componentname
*tcnp
, struct vnode_attr
*tvap
,
4739 uint32_t flags
, vfs_context_t ctx
)
4743 struct vnop_compound_rename_args a
;
4746 no_fvp
= (*fvpp
) == NULLVP
;
4747 no_tvp
= (*tvpp
) == NULLVP
;
4749 a
.a_desc
= &vnop_compound_rename_desc
;
4763 a
.a_rename_authorizer
= vn_authorize_rename
;
4764 a
.a_reserved
= NULL
;
4766 /* do the rename of the main file. */
4767 _err
= (*fdvp
->v_op
[vnop_compound_rename_desc
.vdesc_offset
])(&a
);
4770 if (*tvpp
&& *tvpp
!= *fvpp
)
4771 vnode_setneedinactive(*tvpp
);
4774 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4775 if (0 == _err
&& *fvpp
!= *tvpp
) {
4777 panic("No fvpp after compound rename?");
4780 events
= NOTE_WRITE
;
4781 if (vnode_isdir(*fvpp
)) {
4782 /* Link count on dir changed only if we are moving a dir and...
4783 * --Moved to new dir, not overwriting there
4784 * --Kept in same dir and DID overwrite
4786 if (((fdvp
!= tdvp
) && (!*tvpp
)) || ((fdvp
== tdvp
) && (*tvpp
))) {
4787 events
|= NOTE_LINK
;
4791 lock_vnode_and_post(fdvp
, events
);
4793 lock_vnode_and_post(tdvp
, events
);
4796 /* If you're replacing the target, post a deletion for it */
4799 lock_vnode_and_post(*tvpp
, NOTE_DELETE
);
4802 lock_vnode_and_post(*fvpp
, NOTE_RENAME
);
4806 lookup_compound_vnop_post_hook(_err
, fdvp
, *fvpp
, fcnp
->cn_ndp
, 0);
4808 if (no_tvp
&& *tvpp
!= NULLVP
) {
4809 lookup_compound_vnop_post_hook(_err
, tdvp
, *tvpp
, tcnp
->cn_ndp
, 0);
4812 if (_err
&& _err
!= EKEEPLOOKING
) {
4827 vn_mkdir(struct vnode
*dvp
, struct vnode
**vpp
, struct nameidata
*ndp
,
4828 struct vnode_attr
*vap
, vfs_context_t ctx
)
4830 if (ndp
->ni_cnd
.cn_nameiop
!= CREATE
) {
4831 panic("Non-CREATE nameiop in vn_mkdir()?");
4834 if (vnode_compound_mkdir_available(dvp
)) {
4835 return VNOP_COMPOUND_MKDIR(dvp
, vpp
, ndp
, vap
, ctx
);
4837 return VNOP_MKDIR(dvp
, vpp
, &ndp
->ni_cnd
, vap
, ctx
);
4848 struct vnop_mkdir_args
{
4849 struct vnodeop_desc
*a_desc
;
4852 struct componentname
*a_cnp
;
4853 struct vnode_attr
*a_vap
;
4854 vfs_context_t a_context
;
4858 VNOP_MKDIR(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
4859 struct vnode_attr
*vap
, vfs_context_t ctx
)
4862 struct vnop_mkdir_args a
;
4865 int funnel_state
= 0;
4866 #endif /* __LP64__ */
4868 a
.a_desc
= &vnop_mkdir_desc
;
4876 thread_safe
= THREAD_SAFE_FS(dvp
);
4878 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
4882 #endif /* __LP64__ */
4884 _err
= (*dvp
->v_op
[vnop_mkdir_desc
.vdesc_offset
])(&a
);
4885 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
4887 * Remove stale Apple Double file (if any).
4889 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 0);
4894 unlock_fsnode(dvp
, &funnel_state
);
4896 #endif /* __LP64__ */
4898 post_event_if_success(dvp
, _err
, NOTE_LINK
| NOTE_WRITE
);
4904 VNOP_COMPOUND_MKDIR(struct vnode
*dvp
, struct vnode
**vpp
, struct nameidata
*ndp
,
4905 struct vnode_attr
*vap
, vfs_context_t ctx
)
4908 struct vnop_compound_mkdir_args a
;
4910 a
.a_desc
= &vnop_compound_mkdir_desc
;
4913 a
.a_cnp
= &ndp
->ni_cnd
;
4918 a
.a_mkdir_authorizer
= vn_authorize_mkdir
;
4920 a
.a_reserved
= NULL
;
4922 _err
= (*dvp
->v_op
[vnop_compound_mkdir_desc
.vdesc_offset
])(&a
);
4923 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
4925 * Remove stale Apple Double file (if any).
4927 xattrfile_remove(dvp
, ndp
->ni_cnd
.cn_nameptr
, ctx
, 0);
4930 post_event_if_success(dvp
, _err
, NOTE_LINK
| NOTE_WRITE
);
4932 lookup_compound_vnop_post_hook(_err
, dvp
, *vpp
, ndp
, (_err
== 0));
4933 if (*vpp
&& _err
&& _err
!= EKEEPLOOKING
) {
4942 vn_rmdir(vnode_t dvp
, vnode_t
*vpp
, struct nameidata
*ndp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
4944 if (vnode_compound_rmdir_available(dvp
)) {
4945 return VNOP_COMPOUND_RMDIR(dvp
, vpp
, ndp
, vap
, ctx
);
4947 if (*vpp
== NULLVP
) {
4948 panic("NULL vp, but not a compound VNOP?");
4951 panic("Non-NULL vap, but not a compound VNOP?");
4953 return VNOP_RMDIR(dvp
, *vpp
, &ndp
->ni_cnd
, ctx
);
4964 struct vnop_rmdir_args
{
4965 struct vnodeop_desc
*a_desc
;
4968 struct componentname
*a_cnp
;
4969 vfs_context_t a_context
;
4974 VNOP_RMDIR(struct vnode
*dvp
, struct vnode
*vp
, struct componentname
*cnp
, vfs_context_t ctx
)
4977 struct vnop_rmdir_args a
;
4980 int funnel_state
= 0;
4981 #endif /* __LP64__ */
4983 a
.a_desc
= &vnop_rmdir_desc
;
4990 thread_safe
= THREAD_SAFE_FS(dvp
);
4992 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4996 #endif /* __LP64__ */
4998 _err
= (*vp
->v_op
[vnop_rmdir_desc
.vdesc_offset
])(&a
);
5001 vnode_setneedinactive(vp
);
5003 if ( !(NATIVE_XATTR(dvp
)) ) {
5005 * Remove any associated extended attribute file (._ AppleDouble file).
5007 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 1);
5013 unlock_fsnode(vp
, &funnel_state
);
5015 #endif /* __LP64__ */
5017 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
5018 post_event_if_success(vp
, _err
, NOTE_DELETE
| NOTE_LINK
);
5019 post_event_if_success(dvp
, _err
, NOTE_LINK
| NOTE_WRITE
);
5025 VNOP_COMPOUND_RMDIR(struct vnode
*dvp
, struct vnode
**vpp
, struct nameidata
*ndp
,
5026 struct vnode_attr
*vap
, vfs_context_t ctx
)
5029 struct vnop_compound_rmdir_args a
;
5032 a
.a_desc
= &vnop_mkdir_desc
;
5035 a
.a_cnp
= &ndp
->ni_cnd
;
5039 a
.a_rmdir_authorizer
= vn_authorize_rmdir
;
5040 a
.a_reserved
= NULL
;
5042 no_vp
= (*vpp
== NULLVP
);
5044 _err
= (*dvp
->v_op
[vnop_compound_rmdir_desc
.vdesc_offset
])(&a
);
5045 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
5047 * Remove stale Apple Double file (if any).
5049 xattrfile_remove(dvp
, ndp
->ni_cnd
.cn_nameptr
, ctx
, 0);
5053 post_event_if_success(*vpp
, _err
, NOTE_DELETE
| NOTE_LINK
);
5055 post_event_if_success(dvp
, _err
, NOTE_LINK
| NOTE_WRITE
);
5058 lookup_compound_vnop_post_hook(_err
, dvp
, *vpp
, ndp
, 0);
5060 #if 0 /* Removing orphaned ._ files requires a vp.... */
5061 if (*vpp
&& _err
&& _err
!= EKEEPLOOKING
) {
5072 * Remove a ._ AppleDouble file
5074 #define AD_STALE_SECS (180)
5076 xattrfile_remove(vnode_t dvp
, const char * basename
, vfs_context_t ctx
, int force
)
5079 struct nameidata nd
;
5081 char *filename
= NULL
;
5084 if ((basename
== NULL
) || (basename
[0] == '\0') ||
5085 (basename
[0] == '.' && basename
[1] == '_')) {
5088 filename
= &smallname
[0];
5089 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
5090 if (len
>= sizeof(smallname
)) {
5091 len
++; /* snprintf result doesn't include '\0' */
5092 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
5093 len
= snprintf(filename
, len
, "._%s", basename
);
5095 NDINIT(&nd
, DELETE
, OP_UNLINK
, WANTPARENT
| LOCKLEAF
| NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
5096 CAST_USER_ADDR_T(filename
), ctx
);
5098 if (namei(&nd
) != 0)
5103 if (xvp
->v_type
!= VREG
)
5107 * When creating a new object and a "._" file already
5108 * exists, check to see if its a stale "._" file.
5112 struct vnode_attr va
;
5115 VATTR_WANTED(&va
, va_data_size
);
5116 VATTR_WANTED(&va
, va_modify_time
);
5117 if (VNOP_GETATTR(xvp
, &va
, ctx
) == 0 &&
5118 VATTR_IS_SUPPORTED(&va
, va_data_size
) &&
5119 VATTR_IS_SUPPORTED(&va
, va_modify_time
) &&
5120 va
.va_data_size
!= 0) {
5124 if ((tv
.tv_sec
> va
.va_modify_time
.tv_sec
) &&
5125 (tv
.tv_sec
- va
.va_modify_time
.tv_sec
) > AD_STALE_SECS
) {
5126 force
= 1; /* must be stale */
5133 error
= VNOP_REMOVE(dvp
, xvp
, &nd
.ni_cnd
, 0, ctx
);
5135 vnode_setneedinactive(xvp
);
5137 post_event_if_success(xvp
, error
, NOTE_DELETE
);
5138 post_event_if_success(dvp
, error
, NOTE_WRITE
);
5145 if (filename
&& filename
!= &smallname
[0]) {
5146 FREE(filename
, M_TEMP
);
5151 * Shadow uid/gid/mod to a ._ AppleDouble file
5154 xattrfile_setattr(vnode_t dvp
, const char * basename
, struct vnode_attr
* vap
,
5158 struct nameidata nd
;
5160 char *filename
= NULL
;
5163 if ((dvp
== NULLVP
) ||
5164 (basename
== NULL
) || (basename
[0] == '\0') ||
5165 (basename
[0] == '.' && basename
[1] == '_')) {
5168 filename
= &smallname
[0];
5169 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
5170 if (len
>= sizeof(smallname
)) {
5171 len
++; /* snprintf result doesn't include '\0' */
5172 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
5173 len
= snprintf(filename
, len
, "._%s", basename
);
5175 NDINIT(&nd
, LOOKUP
, OP_SETATTR
, NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
5176 CAST_USER_ADDR_T(filename
), ctx
);
5178 if (namei(&nd
) != 0)
5184 if (xvp
->v_type
== VREG
) {
5186 int thread_safe
= THREAD_SAFE_FS(dvp
);
5187 #endif /* __LP64__ */
5188 struct vnop_setattr_args a
;
5190 a
.a_desc
= &vnop_setattr_desc
;
5197 if ( (lock_fsnode(xvp
, NULL
)) )
5200 #endif /* __LP64__ */
5202 (void) (*xvp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
5206 unlock_fsnode(xvp
, NULL
);
5208 #endif /* __LP64__ */
5214 #endif /* __LP64__ */
5218 if (filename
&& filename
!= &smallname
[0]) {
5219 FREE(filename
, M_TEMP
);
5226 *#% symlink dvp L U U
5227 *#% symlink vpp - U -
5230 struct vnop_symlink_args
{
5231 struct vnodeop_desc
*a_desc
;
5234 struct componentname
*a_cnp
;
5235 struct vnode_attr
*a_vap
;
5237 vfs_context_t a_context
;
5242 VNOP_SYMLINK(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
5243 struct vnode_attr
*vap
, char *target
, vfs_context_t ctx
)
5246 struct vnop_symlink_args a
;
5249 int funnel_state
= 0;
5250 #endif /* __LP64__ */
5252 a
.a_desc
= &vnop_symlink_desc
;
5257 a
.a_target
= target
;
5261 thread_safe
= THREAD_SAFE_FS(dvp
);
5263 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
5267 #endif /* __LP64__ */
5269 _err
= (*dvp
->v_op
[vnop_symlink_desc
.vdesc_offset
])(&a
);
5270 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
5272 * Remove stale Apple Double file (if any). Posts its own knotes
5274 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 0);
5279 unlock_fsnode(dvp
, &funnel_state
);
5281 #endif /* __LP64__ */
5283 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
5291 *#% readdir vp L L L
5294 struct vnop_readdir_args
{
5295 struct vnodeop_desc
*a_desc
;
5301 vfs_context_t a_context
;
5306 VNOP_READDIR(struct vnode
*vp
, struct uio
*uio
, int flags
, int *eofflag
,
5307 int *numdirent
, vfs_context_t ctx
)
5310 struct vnop_readdir_args a
;
5313 int funnel_state
= 0;
5314 #endif /* __LP64__ */
5316 a
.a_desc
= &vnop_readdir_desc
;
5320 a
.a_eofflag
= eofflag
;
5321 a
.a_numdirent
= numdirent
;
5324 thread_safe
= THREAD_SAFE_FS(vp
);
5327 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5331 #endif /* __LP64__ */
5333 _err
= (*vp
->v_op
[vnop_readdir_desc
.vdesc_offset
])(&a
);
5337 unlock_fsnode(vp
, &funnel_state
);
5339 #endif /* __LP64__ */
5346 *#% readdirattr vp L L L
5349 struct vnop_readdirattr_args
{
5350 struct vnodeop_desc
*a_desc
;
5352 struct attrlist
*a_alist
;
5354 uint32_t a_maxcount
;
5356 uint32_t *a_newstate
;
5358 uint32_t *a_actualcount
;
5359 vfs_context_t a_context
;
5364 VNOP_READDIRATTR(struct vnode
*vp
, struct attrlist
*alist
, struct uio
*uio
, uint32_t maxcount
,
5365 uint32_t options
, uint32_t *newstate
, int *eofflag
, uint32_t *actualcount
, vfs_context_t ctx
)
5368 struct vnop_readdirattr_args a
;
5371 int funnel_state
= 0;
5372 #endif /* __LP64__ */
5374 a
.a_desc
= &vnop_readdirattr_desc
;
5378 a
.a_maxcount
= maxcount
;
5379 a
.a_options
= options
;
5380 a
.a_newstate
= newstate
;
5381 a
.a_eofflag
= eofflag
;
5382 a
.a_actualcount
= actualcount
;
5386 thread_safe
= THREAD_SAFE_FS(vp
);
5388 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5392 #endif /* __LP64__ */
5394 _err
= (*vp
->v_op
[vnop_readdirattr_desc
.vdesc_offset
])(&a
);
5398 unlock_fsnode(vp
, &funnel_state
);
5400 #endif /* __LP64__ */
5408 *#% readlink vp L L L
5411 struct vnop_readlink_args
{
5412 struct vnodeop_desc
*a_desc
;
5415 vfs_context_t a_context
;
5420 * Returns: 0 Success
5421 * lock_fsnode:ENOENT No such file or directory [only for VFS
5422 * that is not thread safe & vnode is
5423 * currently being/has been terminated]
5424 * <vfs_readlink>:EINVAL
5425 * <vfs_readlink>:???
5427 * Note: The return codes from the underlying VFS's readlink routine
5428 * can't be fully enumerated here, since third party VFS authors
5429 * may not limit their error returns to the ones documented here,
5430 * even though this may result in some programs functioning
5433 * The return codes documented above are those which may currently
5434 * be returned by HFS from hfs_vnop_readlink, not including
5435 * additional error code which may be propagated from underlying
5439 VNOP_READLINK(struct vnode
*vp
, struct uio
*uio
, vfs_context_t ctx
)
5442 struct vnop_readlink_args a
;
5445 int funnel_state
= 0;
5446 #endif /* __LP64__ */
5448 a
.a_desc
= &vnop_readlink_desc
;
5454 thread_safe
= THREAD_SAFE_FS(vp
);
5456 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5460 #endif /* __LP64__ */
5462 _err
= (*vp
->v_op
[vnop_readlink_desc
.vdesc_offset
])(&a
);
5466 unlock_fsnode(vp
, &funnel_state
);
5468 #endif /* __LP64__ */
5476 *#% inactive vp L U U
5479 struct vnop_inactive_args
{
5480 struct vnodeop_desc
*a_desc
;
5482 vfs_context_t a_context
;
5486 VNOP_INACTIVE(struct vnode
*vp
, vfs_context_t ctx
)
5489 struct vnop_inactive_args a
;
5492 int funnel_state
= 0;
5493 #endif /* __LP64__ */
5495 a
.a_desc
= &vnop_inactive_desc
;
5500 thread_safe
= THREAD_SAFE_FS(vp
);
5502 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5506 #endif /* __LP64__ */
5508 _err
= (*vp
->v_op
[vnop_inactive_desc
.vdesc_offset
])(&a
);
5512 unlock_fsnode(vp
, &funnel_state
);
5514 #endif /* __LP64__ */
5517 /* For file systems that do not support namedstream natively, mark
5518 * the shadow stream file vnode to be recycled as soon as the last
5519 * reference goes away. To avoid re-entering reclaim code, do not
5520 * call recycle on terminating namedstream vnodes.
5522 if (vnode_isnamedstream(vp
) &&
5523 (vp
->v_parent
!= NULLVP
) &&
5524 vnode_isshadow(vp
) &&
5525 ((vp
->v_lflag
& VL_TERMINATE
) == 0)) {
5537 *#% reclaim vp U U U
5540 struct vnop_reclaim_args
{
5541 struct vnodeop_desc
*a_desc
;
5543 vfs_context_t a_context
;
5547 VNOP_RECLAIM(struct vnode
*vp
, vfs_context_t ctx
)
5550 struct vnop_reclaim_args a
;
5553 int funnel_state
= 0;
5554 #endif /* __LP64__ */
5556 a
.a_desc
= &vnop_reclaim_desc
;
5561 thread_safe
= THREAD_SAFE_FS(vp
);
5563 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5565 #endif /* __LP64__ */
5567 _err
= (*vp
->v_op
[vnop_reclaim_desc
.vdesc_offset
])(&a
);
5571 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5573 #endif /* __LP64__ */
5580 * Returns: 0 Success
5581 * lock_fsnode:ENOENT No such file or directory [only for VFS
5582 * that is not thread safe & vnode is
5583 * currently being/has been terminated]
5584 * <vnop_pathconf_desc>:??? [per FS implementation specific]
5589 *#% pathconf vp L L L
5592 struct vnop_pathconf_args
{
5593 struct vnodeop_desc
*a_desc
;
5597 vfs_context_t a_context
;
5601 VNOP_PATHCONF(struct vnode
*vp
, int name
, int32_t *retval
, vfs_context_t ctx
)
5604 struct vnop_pathconf_args a
;
5607 int funnel_state
= 0;
5608 #endif /* __LP64__ */
5610 a
.a_desc
= &vnop_pathconf_desc
;
5613 a
.a_retval
= retval
;
5617 thread_safe
= THREAD_SAFE_FS(vp
);
5619 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5623 #endif /* __LP64__ */
5625 _err
= (*vp
->v_op
[vnop_pathconf_desc
.vdesc_offset
])(&a
);
5629 unlock_fsnode(vp
, &funnel_state
);
5631 #endif /* __LP64__ */
5637 * Returns: 0 Success
5638 * err_advlock:ENOTSUP
5640 * <vnop_advlock_desc>:???
5642 * Notes: VFS implementations of advisory locking using calls through
5643 * <vnop_advlock_desc> because lock enforcement does not occur
5644 * locally should try to limit themselves to the return codes
5645 * documented above for lf_advlock and err_advlock.
5650 *#% advlock vp U U U
5653 struct vnop_advlock_args
{
5654 struct vnodeop_desc
*a_desc
;
5660 vfs_context_t a_context
;
5664 VNOP_ADVLOCK(struct vnode
*vp
, caddr_t id
, int op
, struct flock
*fl
, int flags
, vfs_context_t ctx
)
5667 struct vnop_advlock_args a
;
5670 int funnel_state
= 0;
5671 #endif /* __LP64__ */
5673 a
.a_desc
= &vnop_advlock_desc
;
5682 thread_safe
= THREAD_SAFE_FS(vp
);
5684 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5686 #endif /* __LP64__ */
5688 /* Disallow advisory locking on non-seekable vnodes */
5689 if (vnode_isfifo(vp
)) {
5690 _err
= err_advlock(&a
);
5692 if ((vp
->v_flag
& VLOCKLOCAL
)) {
5693 /* Advisory locking done at this layer */
5694 _err
= lf_advlock(&a
);
5696 /* Advisory locking done by underlying filesystem */
5697 _err
= (*vp
->v_op
[vnop_advlock_desc
.vdesc_offset
])(&a
);
5703 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5705 #endif /* __LP64__ */
5715 *#% allocate vp L L L
5718 struct vnop_allocate_args
{
5719 struct vnodeop_desc
*a_desc
;
5723 off_t
*a_bytesallocated
;
5725 vfs_context_t a_context
;
5730 VNOP_ALLOCATE(struct vnode
*vp
, off_t length
, u_int32_t flags
, off_t
*bytesallocated
, off_t offset
, vfs_context_t ctx
)
5733 struct vnop_allocate_args a
;
5736 int funnel_state
= 0;
5737 #endif /* __LP64__ */
5739 a
.a_desc
= &vnop_allocate_desc
;
5741 a
.a_length
= length
;
5743 a
.a_bytesallocated
= bytesallocated
;
5744 a
.a_offset
= offset
;
5748 thread_safe
= THREAD_SAFE_FS(vp
);
5750 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5754 #endif /* __LP64__ */
5756 _err
= (*vp
->v_op
[vnop_allocate_desc
.vdesc_offset
])(&a
);
5759 add_fsevent(FSE_STAT_CHANGED
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
5765 unlock_fsnode(vp
, &funnel_state
);
5767 #endif /* __LP64__ */
5778 struct vnop_pagein_args
{
5779 struct vnodeop_desc
*a_desc
;
5782 upl_offset_t a_pl_offset
;
5786 vfs_context_t a_context
;
5790 VNOP_PAGEIN(struct vnode
*vp
, upl_t pl
, upl_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t ctx
)
5793 struct vnop_pagein_args a
;
5796 int funnel_state
= 0;
5797 #endif /* __LP64__ */
5799 a
.a_desc
= &vnop_pagein_desc
;
5802 a
.a_pl_offset
= pl_offset
;
5803 a
.a_f_offset
= f_offset
;
5809 thread_safe
= THREAD_SAFE_FS(vp
);
5811 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5813 #endif /* __LP64__ */
5815 _err
= (*vp
->v_op
[vnop_pagein_desc
.vdesc_offset
])(&a
);
5819 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5821 #endif /* __LP64__ */
5829 *#% pageout vp = = =
5832 struct vnop_pageout_args
{
5833 struct vnodeop_desc
*a_desc
;
5836 upl_offset_t a_pl_offset
;
5840 vfs_context_t a_context
;
5845 VNOP_PAGEOUT(struct vnode
*vp
, upl_t pl
, upl_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t ctx
)
5848 struct vnop_pageout_args a
;
5851 int funnel_state
= 0;
5852 #endif /* __LP64__ */
5854 a
.a_desc
= &vnop_pageout_desc
;
5857 a
.a_pl_offset
= pl_offset
;
5858 a
.a_f_offset
= f_offset
;
5864 thread_safe
= THREAD_SAFE_FS(vp
);
5866 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5868 #endif /* __LP64__ */
5870 _err
= (*vp
->v_op
[vnop_pageout_desc
.vdesc_offset
])(&a
);
5874 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5876 #endif /* __LP64__ */
5878 post_event_if_success(vp
, _err
, NOTE_WRITE
);
5884 vn_remove(vnode_t dvp
, vnode_t
*vpp
, struct nameidata
*ndp
, int32_t flags
, struct vnode_attr
*vap
, vfs_context_t ctx
)
5886 if (vnode_compound_remove_available(dvp
)) {
5887 return VNOP_COMPOUND_REMOVE(dvp
, vpp
, ndp
, flags
, vap
, ctx
);
5889 return VNOP_REMOVE(dvp
, *vpp
, &ndp
->ni_cnd
, flags
, ctx
);
5897 *#% searchfs vp L L L
5900 struct vnop_searchfs_args
{
5901 struct vnodeop_desc
*a_desc
;
5903 void *a_searchparams1
;
5904 void *a_searchparams2
;
5905 struct attrlist
*a_searchattrs
;
5906 uint32_t a_maxmatches
;
5907 struct timeval
*a_timelimit
;
5908 struct attrlist
*a_returnattrs
;
5909 uint32_t *a_nummatches
;
5910 uint32_t a_scriptcode
;
5913 struct searchstate
*a_searchstate
;
5914 vfs_context_t a_context
;
5919 VNOP_SEARCHFS(struct vnode
*vp
, void *searchparams1
, void *searchparams2
, struct attrlist
*searchattrs
, uint32_t maxmatches
, struct timeval
*timelimit
, struct attrlist
*returnattrs
, uint32_t *nummatches
, uint32_t scriptcode
, uint32_t options
, struct uio
*uio
, struct searchstate
*searchstate
, vfs_context_t ctx
)
5922 struct vnop_searchfs_args a
;
5925 int funnel_state
= 0;
5926 #endif /* __LP64__ */
5928 a
.a_desc
= &vnop_searchfs_desc
;
5930 a
.a_searchparams1
= searchparams1
;
5931 a
.a_searchparams2
= searchparams2
;
5932 a
.a_searchattrs
= searchattrs
;
5933 a
.a_maxmatches
= maxmatches
;
5934 a
.a_timelimit
= timelimit
;
5935 a
.a_returnattrs
= returnattrs
;
5936 a
.a_nummatches
= nummatches
;
5937 a
.a_scriptcode
= scriptcode
;
5938 a
.a_options
= options
;
5940 a
.a_searchstate
= searchstate
;
5944 thread_safe
= THREAD_SAFE_FS(vp
);
5946 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5950 #endif /* __LP64__ */
5952 _err
= (*vp
->v_op
[vnop_searchfs_desc
.vdesc_offset
])(&a
);
5956 unlock_fsnode(vp
, &funnel_state
);
5958 #endif /* __LP64__ */
5966 *#% copyfile fvp U U U
5967 *#% copyfile tdvp L U U
5968 *#% copyfile tvp X U U
5971 struct vnop_copyfile_args
{
5972 struct vnodeop_desc
*a_desc
;
5976 struct componentname
*a_tcnp
;
5979 vfs_context_t a_context
;
5983 VNOP_COPYFILE(struct vnode
*fvp
, struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
5984 int mode
, int flags
, vfs_context_t ctx
)
5987 struct vnop_copyfile_args a
;
5988 a
.a_desc
= &vnop_copyfile_desc
;
5996 _err
= (*fvp
->v_op
[vnop_copyfile_desc
.vdesc_offset
])(&a
);
6001 VNOP_GETXATTR(vnode_t vp
, const char *name
, uio_t uio
, size_t *size
, int options
, vfs_context_t ctx
)
6003 struct vnop_getxattr_args a
;
6007 int funnel_state
= 0;
6008 #endif /* __LP64__ */
6010 a
.a_desc
= &vnop_getxattr_desc
;
6015 a
.a_options
= options
;
6019 thread_safe
= THREAD_SAFE_FS(vp
);
6021 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
6025 #endif /* __LP64__ */
6027 error
= (*vp
->v_op
[vnop_getxattr_desc
.vdesc_offset
])(&a
);
6031 unlock_fsnode(vp
, &funnel_state
);
6033 #endif /* __LP64__ */
6039 VNOP_SETXATTR(vnode_t vp
, const char *name
, uio_t uio
, int options
, vfs_context_t ctx
)
6041 struct vnop_setxattr_args a
;
6045 int funnel_state
= 0;
6046 #endif /* __LP64__ */
6048 a
.a_desc
= &vnop_setxattr_desc
;
6052 a
.a_options
= options
;
6056 thread_safe
= THREAD_SAFE_FS(vp
);
6058 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
6062 #endif /* __LP64__ */
6064 error
= (*vp
->v_op
[vnop_setxattr_desc
.vdesc_offset
])(&a
);
6068 unlock_fsnode(vp
, &funnel_state
);
6070 #endif /* __LP64__ */
6073 vnode_uncache_authorized_action(vp
, KAUTH_INVALIDATE_CACHED_RIGHTS
);
6075 post_event_if_success(vp
, error
, NOTE_ATTRIB
);
6081 VNOP_REMOVEXATTR(vnode_t vp
, const char *name
, int options
, vfs_context_t ctx
)
6083 struct vnop_removexattr_args a
;
6087 int funnel_state
= 0;
6088 #endif /* __LP64__ */
6090 a
.a_desc
= &vnop_removexattr_desc
;
6093 a
.a_options
= options
;
6097 thread_safe
= THREAD_SAFE_FS(vp
);
6099 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
6103 #endif /* __LP64__ */
6105 error
= (*vp
->v_op
[vnop_removexattr_desc
.vdesc_offset
])(&a
);
6109 unlock_fsnode(vp
, &funnel_state
);
6111 #endif /* __LP64__ */
6113 post_event_if_success(vp
, error
, NOTE_ATTRIB
);
6119 VNOP_LISTXATTR(vnode_t vp
, uio_t uio
, size_t *size
, int options
, vfs_context_t ctx
)
6121 struct vnop_listxattr_args a
;
6125 int funnel_state
= 0;
6126 #endif /* __LP64__ */
6128 a
.a_desc
= &vnop_listxattr_desc
;
6132 a
.a_options
= options
;
6136 thread_safe
= THREAD_SAFE_FS(vp
);
6138 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
6142 #endif /* __LP64__ */
6144 error
= (*vp
->v_op
[vnop_listxattr_desc
.vdesc_offset
])(&a
);
6148 unlock_fsnode(vp
, &funnel_state
);
6150 #endif /* __LP64__ */
6159 *#% blktooff vp = = =
6162 struct vnop_blktooff_args
{
6163 struct vnodeop_desc
*a_desc
;
6170 VNOP_BLKTOOFF(struct vnode
*vp
, daddr64_t lblkno
, off_t
*offset
)
6173 struct vnop_blktooff_args a
;
6176 int funnel_state
= 0;
6177 #endif /* __LP64__ */
6179 a
.a_desc
= &vnop_blktooff_desc
;
6181 a
.a_lblkno
= lblkno
;
6182 a
.a_offset
= offset
;
6185 thread_safe
= THREAD_SAFE_FS(vp
);
6187 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
6189 #endif /* __LP64__ */
6191 _err
= (*vp
->v_op
[vnop_blktooff_desc
.vdesc_offset
])(&a
);
6195 (void) thread_funnel_set(kernel_flock
, funnel_state
);
6197 #endif /* __LP64__ */
6205 *#% offtoblk vp = = =
6208 struct vnop_offtoblk_args
{
6209 struct vnodeop_desc
*a_desc
;
6212 daddr64_t
*a_lblkno
;
6216 VNOP_OFFTOBLK(struct vnode
*vp
, off_t offset
, daddr64_t
*lblkno
)
6219 struct vnop_offtoblk_args a
;
6222 int funnel_state
= 0;
6223 #endif /* __LP64__ */
6225 a
.a_desc
= &vnop_offtoblk_desc
;
6227 a
.a_offset
= offset
;
6228 a
.a_lblkno
= lblkno
;
6231 thread_safe
= THREAD_SAFE_FS(vp
);
6233 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
6235 #endif /* __LP64__ */
6237 _err
= (*vp
->v_op
[vnop_offtoblk_desc
.vdesc_offset
])(&a
);
6241 (void) thread_funnel_set(kernel_flock
, funnel_state
);
6243 #endif /* __LP64__ */
6251 *#% blockmap vp L L L
6254 struct vnop_blockmap_args
{
6255 struct vnodeop_desc
*a_desc
;
6263 vfs_context_t a_context
;
6267 VNOP_BLOCKMAP(struct vnode
*vp
, off_t foffset
, size_t size
, daddr64_t
*bpn
, size_t *run
, void *poff
, int flags
, vfs_context_t ctx
)
6270 struct vnop_blockmap_args a
;
6273 int funnel_state
= 0;
6274 #endif /* __LP64__ */
6277 ctx
= vfs_context_current();
6279 a
.a_desc
= &vnop_blockmap_desc
;
6281 a
.a_foffset
= foffset
;
6290 thread_safe
= THREAD_SAFE_FS(vp
);
6292 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
6294 #endif /* __LP64__ */
6296 _err
= (*vp
->v_op
[vnop_blockmap_desc
.vdesc_offset
])(&a
);
6300 (void) thread_funnel_set(kernel_flock
, funnel_state
);
6302 #endif /* __LP64__ */
6308 struct vnop_strategy_args
{
6309 struct vnodeop_desc
*a_desc
;
6315 VNOP_STRATEGY(struct buf
*bp
)
6318 struct vnop_strategy_args a
;
6319 a
.a_desc
= &vnop_strategy_desc
;
6321 _err
= (*buf_vnode(bp
)->v_op
[vnop_strategy_desc
.vdesc_offset
])(&a
);
6326 struct vnop_bwrite_args
{
6327 struct vnodeop_desc
*a_desc
;
6332 VNOP_BWRITE(struct buf
*bp
)
6335 struct vnop_bwrite_args a
;
6336 a
.a_desc
= &vnop_bwrite_desc
;
6338 _err
= (*buf_vnode(bp
)->v_op
[vnop_bwrite_desc
.vdesc_offset
])(&a
);
6343 struct vnop_kqfilt_add_args
{
6344 struct vnodeop_desc
*a_desc
;
6347 vfs_context_t a_context
;
6351 VNOP_KQFILT_ADD(struct vnode
*vp
, struct knote
*kn
, vfs_context_t ctx
)
6354 struct vnop_kqfilt_add_args a
;
6357 int funnel_state
= 0;
6358 #endif /* __LP64__ */
6360 a
.a_desc
= VDESC(vnop_kqfilt_add
);
6366 thread_safe
= THREAD_SAFE_FS(vp
);
6368 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
6372 #endif /* __LP64__ */
6374 _err
= (*vp
->v_op
[vnop_kqfilt_add_desc
.vdesc_offset
])(&a
);
6378 unlock_fsnode(vp
, &funnel_state
);
6380 #endif /* __LP64__ */
6386 struct vnop_kqfilt_remove_args
{
6387 struct vnodeop_desc
*a_desc
;
6390 vfs_context_t a_context
;
6394 VNOP_KQFILT_REMOVE(struct vnode
*vp
, uintptr_t ident
, vfs_context_t ctx
)
6397 struct vnop_kqfilt_remove_args a
;
6400 int funnel_state
= 0;
6401 #endif /* __LP64__ */
6403 a
.a_desc
= VDESC(vnop_kqfilt_remove
);
6409 thread_safe
= THREAD_SAFE_FS(vp
);
6411 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
6415 #endif /* __LP64__ */
6417 _err
= (*vp
->v_op
[vnop_kqfilt_remove_desc
.vdesc_offset
])(&a
);
6421 unlock_fsnode(vp
, &funnel_state
);
6423 #endif /* __LP64__ */
6429 VNOP_MONITOR(vnode_t vp
, uint32_t events
, uint32_t flags
, void *handle
, vfs_context_t ctx
)
6432 struct vnop_monitor_args a
;
6435 int funnel_state
= 0;
6436 #endif /* __LP64__ */
6438 a
.a_desc
= VDESC(vnop_monitor
);
6440 a
.a_events
= events
;
6442 a
.a_handle
= handle
;
6446 thread_safe
= THREAD_SAFE_FS(vp
);
6448 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
6452 #endif /* __LP64__ */
6454 _err
= (*vp
->v_op
[vnop_monitor_desc
.vdesc_offset
])(&a
);
6458 unlock_fsnode(vp
, &funnel_state
);
6460 #endif /* __LP64__ */
6466 struct vnop_setlabel_args
{
6467 struct vnodeop_desc
*a_desc
;
6470 vfs_context_t a_context
;
6474 VNOP_SETLABEL(struct vnode
*vp
, struct label
*label
, vfs_context_t ctx
)
6477 struct vnop_setlabel_args a
;
6480 int funnel_state
= 0;
6481 #endif /* __LP64__ */
6483 a
.a_desc
= VDESC(vnop_setlabel
);
6489 thread_safe
= THREAD_SAFE_FS(vp
);
6491 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
6495 #endif /* __LP64__ */
6497 _err
= (*vp
->v_op
[vnop_setlabel_desc
.vdesc_offset
])(&a
);
6501 unlock_fsnode(vp
, &funnel_state
);
6503 #endif /* __LP64__ */
6511 * Get a named streamed
6514 VNOP_GETNAMEDSTREAM(vnode_t vp
, vnode_t
*svpp
, const char *name
, enum nsoperation operation
, int flags
, vfs_context_t ctx
)
6516 struct vnop_getnamedstream_args a
;
6519 if (!THREAD_SAFE_FS(vp
))
6521 #endif /* __LP64__ */
6523 a
.a_desc
= &vnop_getnamedstream_desc
;
6527 a
.a_operation
= operation
;
6531 return (*vp
->v_op
[vnop_getnamedstream_desc
.vdesc_offset
])(&a
);
6535 * Create a named streamed
6538 VNOP_MAKENAMEDSTREAM(vnode_t vp
, vnode_t
*svpp
, const char *name
, int flags
, vfs_context_t ctx
)
6540 struct vnop_makenamedstream_args a
;
6543 if (!THREAD_SAFE_FS(vp
))
6545 #endif /* __LP64__ */
6547 a
.a_desc
= &vnop_makenamedstream_desc
;
6554 return (*vp
->v_op
[vnop_makenamedstream_desc
.vdesc_offset
])(&a
);
6559 * Remove a named streamed
6562 VNOP_REMOVENAMEDSTREAM(vnode_t vp
, vnode_t svp
, const char *name
, int flags
, vfs_context_t ctx
)
6564 struct vnop_removenamedstream_args a
;
6567 if (!THREAD_SAFE_FS(vp
))
6569 #endif /* __LP64__ */
6571 a
.a_desc
= &vnop_removenamedstream_desc
;
6578 return (*vp
->v_op
[vnop_removenamedstream_desc
.vdesc_offset
])(&a
);