2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
76 * External virtual filesystem routines
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
87 #include <sys/vnode_internal.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
92 #include <sys/errno.h>
93 #include <sys/malloc.h>
94 #include <sys/domain.h>
96 #include <sys/syslog.h>
99 #include <sys/sysctl.h>
100 #include <sys/filedesc.h>
101 #include <sys/event.h>
102 #include <sys/fsevents.h>
103 #include <sys/user.h>
104 #include <sys/lockf.h>
105 #include <sys/xattr.h>
107 #include <kern/assert.h>
108 #include <kern/kalloc.h>
109 #include <kern/task.h>
111 #include <libkern/OSByteOrder.h>
113 #include <miscfs/specfs/specdev.h>
115 #include <mach/mach_types.h>
116 #include <mach/memory_object_types.h>
117 #include <mach/task.h>
120 #include <security/mac_framework.h>
131 #define THREAD_SAFE_FS(VP) \
132 ((VP)->v_unsafefs ? 0 : 1)
133 #endif /* __LP64__ */
135 #define NATIVE_XATTR(VP) \
136 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
138 static void xattrfile_remove(vnode_t dvp
, const char *basename
,
139 vfs_context_t ctx
, int force
);
140 static void xattrfile_setattr(vnode_t dvp
, const char * basename
,
141 struct vnode_attr
* vap
, vfs_context_t ctx
);
144 * vnode_setneedinactive
146 * Description: Indicate that when the last iocount on this vnode goes away,
147 * and the usecount is also zero, we should inform the filesystem
150 * Parameters: vnode_t vnode to mark
154 * Notes: Notably used when we're deleting a file--we need not have a
155 * usecount, so VNOP_INACTIVE may not get called by anyone. We
156 * want it called when we drop our iocount.
159 vnode_setneedinactive(vnode_t vp
)
164 vp
->v_lflag
|= VL_NEEDINACTIVE
;
171 lock_fsnode(vnode_t vp
, int *funnel_state
)
174 *funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
176 if (vp
->v_unsafefs
) {
177 if (vp
->v_unsafefs
->fsnodeowner
== current_thread()) {
178 vp
->v_unsafefs
->fsnode_count
++;
180 lck_mtx_lock(&vp
->v_unsafefs
->fsnodelock
);
182 if (vp
->v_lflag
& (VL_TERMWANT
| VL_TERMINATE
| VL_DEAD
)) {
183 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
186 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
189 vp
->v_unsafefs
->fsnodeowner
= current_thread();
190 vp
->v_unsafefs
->fsnode_count
= 1;
198 unlock_fsnode(vnode_t vp
, int *funnel_state
)
200 if (vp
->v_unsafefs
) {
201 if (--vp
->v_unsafefs
->fsnode_count
== 0) {
202 vp
->v_unsafefs
->fsnodeowner
= NULL
;
203 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
207 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
209 #endif /* __LP64__ */
213 /* ====================================================================== */
214 /* ************ EXTERNAL KERNEL APIS ********************************** */
215 /* ====================================================================== */
218 * implementations of exported VFS operations
221 VFS_MOUNT(mount_t mp
, vnode_t devvp
, user_addr_t data
, vfs_context_t ctx
)
226 int funnel_state
= 0;
227 #endif /* __LP64__ */
229 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_mount
== 0))
233 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
235 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
237 #endif /* __LP64__ */
239 if (vfs_context_is64bit(ctx
)) {
240 if (vfs_64bitready(mp
)) {
241 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, ctx
);
248 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, ctx
);
253 (void) thread_funnel_set(kernel_flock
, funnel_state
);
255 #endif /* __LP64__ */
261 VFS_START(mount_t mp
, int flags
, vfs_context_t ctx
)
266 int funnel_state
= 0;
267 #endif /* __LP64__ */
269 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_start
== 0))
273 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
276 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
278 #endif /* __LP64__ */
280 error
= (*mp
->mnt_op
->vfs_start
)(mp
, flags
, ctx
);
284 (void) thread_funnel_set(kernel_flock
, funnel_state
);
286 #endif /* __LP64__ */
292 VFS_UNMOUNT(mount_t mp
, int flags
, vfs_context_t ctx
)
297 int funnel_state
= 0;
298 #endif /* __LP64__ */
300 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_unmount
== 0))
304 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
307 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
309 #endif /* __LP64__ */
311 error
= (*mp
->mnt_op
->vfs_unmount
)(mp
, flags
, ctx
);
315 (void) thread_funnel_set(kernel_flock
, funnel_state
);
317 #endif /* __LP64__ */
324 * ENOTSUP Not supported
328 * Note: The return codes from the underlying VFS's root routine can't
329 * be fully enumerated here, since third party VFS authors may not
330 * limit their error returns to the ones documented here, even
331 * though this may result in some programs functioning incorrectly.
333 * The return codes documented above are those which may currently
334 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
335 * for a call to hfs_vget on the volume mount poit, not including
336 * additional error codes which may be propagated from underlying
337 * routines called by hfs_vget.
340 VFS_ROOT(mount_t mp
, struct vnode
** vpp
, vfs_context_t ctx
)
345 int funnel_state
= 0;
346 #endif /* __LP64__ */
348 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_root
== 0))
352 ctx
= vfs_context_current();
356 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
358 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
360 #endif /* __LP64__ */
362 error
= (*mp
->mnt_op
->vfs_root
)(mp
, vpp
, ctx
);
366 (void) thread_funnel_set(kernel_flock
, funnel_state
);
368 #endif /* __LP64__ */
374 VFS_QUOTACTL(mount_t mp
, int cmd
, uid_t uid
, caddr_t datap
, vfs_context_t ctx
)
379 int funnel_state
= 0;
380 #endif /* __LP64__ */
382 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_quotactl
== 0))
386 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
388 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
390 #endif /* __LP64__ */
392 error
= (*mp
->mnt_op
->vfs_quotactl
)(mp
, cmd
, uid
, datap
, ctx
);
396 (void) thread_funnel_set(kernel_flock
, funnel_state
);
398 #endif /* __LP64__ */
404 VFS_GETATTR(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
409 int funnel_state
= 0;
410 #endif /* __LP64__ */
412 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_getattr
== 0))
416 ctx
= vfs_context_current();
420 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
422 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
424 #endif /* __LP64__ */
426 error
= (*mp
->mnt_op
->vfs_getattr
)(mp
, vfa
, ctx
);
430 (void) thread_funnel_set(kernel_flock
, funnel_state
);
432 #endif /* __LP64__ */
438 VFS_SETATTR(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
443 int funnel_state
= 0;
444 #endif /* __LP64__ */
446 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_setattr
== 0))
450 ctx
= vfs_context_current();
454 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
456 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
458 #endif /* __LP64__ */
460 error
= (*mp
->mnt_op
->vfs_setattr
)(mp
, vfa
, ctx
);
464 (void) thread_funnel_set(kernel_flock
, funnel_state
);
466 #endif /* __LP64__ */
472 VFS_SYNC(mount_t mp
, int flags
, vfs_context_t ctx
)
477 int funnel_state
= 0;
478 #endif /* __LP64__ */
480 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_sync
== 0))
484 ctx
= vfs_context_current();
488 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
490 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
492 #endif /* __LP64__ */
494 error
= (*mp
->mnt_op
->vfs_sync
)(mp
, flags
, ctx
);
498 (void) thread_funnel_set(kernel_flock
, funnel_state
);
500 #endif /* __LP64__ */
506 VFS_VGET(mount_t mp
, ino64_t ino
, struct vnode
**vpp
, vfs_context_t ctx
)
511 int funnel_state
= 0;
512 #endif /* __LP64__ */
514 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_vget
== 0))
518 ctx
= vfs_context_current();
522 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
524 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
526 #endif /* __LP64__ */
528 error
= (*mp
->mnt_op
->vfs_vget
)(mp
, ino
, vpp
, ctx
);
532 (void) thread_funnel_set(kernel_flock
, funnel_state
);
534 #endif /* __LP64__ */
540 VFS_FHTOVP(mount_t mp
, int fhlen
, unsigned char * fhp
, vnode_t
* vpp
, vfs_context_t ctx
)
545 int funnel_state
= 0;
546 #endif /* __LP64__ */
548 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_fhtovp
== 0))
552 ctx
= vfs_context_current();
556 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
558 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
560 #endif /* __LP64__ */
562 error
= (*mp
->mnt_op
->vfs_fhtovp
)(mp
, fhlen
, fhp
, vpp
, ctx
);
566 (void) thread_funnel_set(kernel_flock
, funnel_state
);
568 #endif /* __LP64__ */
574 VFS_VPTOFH(struct vnode
* vp
, int *fhlenp
, unsigned char * fhp
, vfs_context_t ctx
)
579 int funnel_state
= 0;
580 #endif /* __LP64__ */
582 if ((vp
->v_mount
== dead_mountp
) || (vp
->v_mount
->mnt_op
->vfs_vptofh
== 0))
586 ctx
= vfs_context_current();
590 thread_safe
= THREAD_SAFE_FS(vp
);
592 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
594 #endif /* __LP64__ */
596 error
= (*vp
->v_mount
->mnt_op
->vfs_vptofh
)(vp
, fhlenp
, fhp
, ctx
);
600 (void) thread_funnel_set(kernel_flock
, funnel_state
);
602 #endif /* __LP64__ */
608 /* returns a copy of vfs type name for the mount_t */
610 vfs_name(mount_t mp
, char * buffer
)
612 strncpy(buffer
, mp
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
615 /* returns vfs type number for the mount_t */
617 vfs_typenum(mount_t mp
)
619 return(mp
->mnt_vtable
->vfc_typenum
);
622 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
624 vfs_mntlabel(mount_t mp
)
626 return (void*)mp
->mnt_mntlabel
;
629 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
631 vfs_flags(mount_t mp
)
633 return((uint64_t)(mp
->mnt_flag
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
)));
636 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
638 vfs_setflags(mount_t mp
, uint64_t flags
)
640 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
643 mp
->mnt_flag
|= lflags
;
647 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
649 vfs_clearflags(mount_t mp
, uint64_t flags
)
651 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
654 mp
->mnt_flag
&= ~lflags
;
658 /* Is the mount_t ronly and upgrade read/write requested? */
660 vfs_iswriteupgrade(mount_t mp
) /* ronly && MNTK_WANTRDWR */
662 return ((mp
->mnt_flag
& MNT_RDONLY
) && (mp
->mnt_kern_flag
& MNTK_WANTRDWR
));
666 /* Is the mount_t mounted ronly */
668 vfs_isrdonly(mount_t mp
)
670 return (mp
->mnt_flag
& MNT_RDONLY
);
673 /* Is the mount_t mounted for filesystem synchronous writes? */
675 vfs_issynchronous(mount_t mp
)
677 return (mp
->mnt_flag
& MNT_SYNCHRONOUS
);
680 /* Is the mount_t mounted read/write? */
682 vfs_isrdwr(mount_t mp
)
684 return ((mp
->mnt_flag
& MNT_RDONLY
) == 0);
688 /* Is mount_t marked for update (ie MNT_UPDATE) */
690 vfs_isupdate(mount_t mp
)
692 return (mp
->mnt_flag
& MNT_UPDATE
);
696 /* Is mount_t marked for reload (ie MNT_RELOAD) */
698 vfs_isreload(mount_t mp
)
700 return ((mp
->mnt_flag
& MNT_UPDATE
) && (mp
->mnt_flag
& MNT_RELOAD
));
703 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
705 vfs_isforce(mount_t mp
)
707 if ((mp
->mnt_lflag
& MNT_LFORCE
) || (mp
->mnt_kern_flag
& MNTK_FRCUNMOUNT
))
714 vfs_isunmount(mount_t mp
)
716 if ((mp
->mnt_lflag
& MNT_LUNMOUNT
)) {
724 vfs_64bitready(mount_t mp
)
726 if ((mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFS64BITREADY
))
734 vfs_authcache_ttl(mount_t mp
)
736 if ( (mp
->mnt_kern_flag
& (MNTK_AUTH_OPAQUE
| MNTK_AUTH_CACHE_TTL
)) )
737 return (mp
->mnt_authcache_ttl
);
739 return (CACHED_RIGHT_INFINITE_TTL
);
743 vfs_setauthcache_ttl(mount_t mp
, int ttl
)
746 mp
->mnt_kern_flag
|= MNTK_AUTH_CACHE_TTL
;
747 mp
->mnt_authcache_ttl
= ttl
;
752 vfs_clearauthcache_ttl(mount_t mp
)
755 mp
->mnt_kern_flag
&= ~MNTK_AUTH_CACHE_TTL
;
757 * back to the default TTL value in case
758 * MNTK_AUTH_OPAQUE is set on this mount
760 mp
->mnt_authcache_ttl
= CACHED_LOOKUP_RIGHT_TTL
;
765 vfs_markdependency(mount_t mp
)
767 proc_t p
= current_proc();
769 mp
->mnt_dependent_process
= p
;
770 mp
->mnt_dependent_pid
= proc_pid(p
);
776 vfs_authopaque(mount_t mp
)
778 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE
))
785 vfs_authopaqueaccess(mount_t mp
)
787 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE_ACCESS
))
794 vfs_setauthopaque(mount_t mp
)
797 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE
;
802 vfs_setauthopaqueaccess(mount_t mp
)
805 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE_ACCESS
;
810 vfs_clearauthopaque(mount_t mp
)
813 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE
;
818 vfs_clearauthopaqueaccess(mount_t mp
)
821 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE_ACCESS
;
826 vfs_setextendedsecurity(mount_t mp
)
829 mp
->mnt_kern_flag
|= MNTK_EXTENDED_SECURITY
;
834 vfs_clearextendedsecurity(mount_t mp
)
837 mp
->mnt_kern_flag
&= ~MNTK_EXTENDED_SECURITY
;
842 vfs_extendedsecurity(mount_t mp
)
844 return(mp
->mnt_kern_flag
& MNTK_EXTENDED_SECURITY
);
847 /* returns the max size of short symlink in this mount_t */
849 vfs_maxsymlen(mount_t mp
)
851 return(mp
->mnt_maxsymlinklen
);
854 /* set max size of short symlink on mount_t */
856 vfs_setmaxsymlen(mount_t mp
, uint32_t symlen
)
858 mp
->mnt_maxsymlinklen
= symlen
;
861 /* return a pointer to the RO vfs_statfs associated with mount_t */
863 vfs_statfs(mount_t mp
)
865 return(&mp
->mnt_vfsstat
);
869 vfs_getattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
873 if ((error
= VFS_GETATTR(mp
, vfa
, ctx
)) != 0)
877 * If we have a filesystem create time, use it to default some others.
879 if (VFSATTR_IS_SUPPORTED(vfa
, f_create_time
)) {
880 if (VFSATTR_IS_ACTIVE(vfa
, f_modify_time
) && !VFSATTR_IS_SUPPORTED(vfa
, f_modify_time
))
881 VFSATTR_RETURN(vfa
, f_modify_time
, vfa
->f_create_time
);
888 vfs_setattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
892 if (vfs_isrdonly(mp
))
895 error
= VFS_SETATTR(mp
, vfa
, ctx
);
898 * If we had alternate ways of setting vfs attributes, we'd
905 /* return the private data handle stored in mount_t */
907 vfs_fsprivate(mount_t mp
)
909 return(mp
->mnt_data
);
912 /* set the private data handle in mount_t */
914 vfs_setfsprivate(mount_t mp
, void *mntdata
)
917 mp
->mnt_data
= mntdata
;
923 * return the block size of the underlying
924 * device associated with mount_t
927 vfs_devblocksize(mount_t mp
) {
929 return(mp
->mnt_devblocksize
);
933 * Returns vnode with an iocount that must be released with vnode_put()
936 vfs_vnodecovered(mount_t mp
)
938 vnode_t vp
= mp
->mnt_vnodecovered
;
939 if ((vp
== NULL
) || (vnode_getwithref(vp
) != 0)) {
947 * return the io attributes associated with mount_t
950 vfs_ioattr(mount_t mp
, struct vfsioattr
*ioattrp
)
953 ioattrp
->io_maxreadcnt
= MAXPHYS
;
954 ioattrp
->io_maxwritecnt
= MAXPHYS
;
955 ioattrp
->io_segreadcnt
= 32;
956 ioattrp
->io_segwritecnt
= 32;
957 ioattrp
->io_maxsegreadsize
= MAXPHYS
;
958 ioattrp
->io_maxsegwritesize
= MAXPHYS
;
959 ioattrp
->io_devblocksize
= DEV_BSIZE
;
960 ioattrp
->io_flags
= 0;
962 ioattrp
->io_maxreadcnt
= mp
->mnt_maxreadcnt
;
963 ioattrp
->io_maxwritecnt
= mp
->mnt_maxwritecnt
;
964 ioattrp
->io_segreadcnt
= mp
->mnt_segreadcnt
;
965 ioattrp
->io_segwritecnt
= mp
->mnt_segwritecnt
;
966 ioattrp
->io_maxsegreadsize
= mp
->mnt_maxsegreadsize
;
967 ioattrp
->io_maxsegwritesize
= mp
->mnt_maxsegwritesize
;
968 ioattrp
->io_devblocksize
= mp
->mnt_devblocksize
;
969 ioattrp
->io_flags
= mp
->mnt_ioflags
;
971 ioattrp
->io_reserved
[0] = NULL
;
972 ioattrp
->io_reserved
[1] = NULL
;
977 * set the IO attributes associated with mount_t
980 vfs_setioattr(mount_t mp
, struct vfsioattr
* ioattrp
)
984 mp
->mnt_maxreadcnt
= ioattrp
->io_maxreadcnt
;
985 mp
->mnt_maxwritecnt
= ioattrp
->io_maxwritecnt
;
986 mp
->mnt_segreadcnt
= ioattrp
->io_segreadcnt
;
987 mp
->mnt_segwritecnt
= ioattrp
->io_segwritecnt
;
988 mp
->mnt_maxsegreadsize
= ioattrp
->io_maxsegreadsize
;
989 mp
->mnt_maxsegwritesize
= ioattrp
->io_maxsegwritesize
;
990 mp
->mnt_devblocksize
= ioattrp
->io_devblocksize
;
991 mp
->mnt_ioflags
= ioattrp
->io_flags
;
995 * Add a new filesystem into the kernel specified in passed in
996 * vfstable structure. It fills in the vnode
997 * dispatch vector that is to be passed to when vnodes are created.
998 * It returns a handle which is to be used to when the FS is to be removed
1000 typedef int (*PFI
)(void *);
1001 extern int vfs_opv_numops
;
1003 vfs_fsadd(struct vfs_fsentry
*vfe
, vfstable_t
* handle
)
1005 #pragma unused(data)
1006 struct vfstable
*newvfstbl
= NULL
;
1008 int (***opv_desc_vector_p
)(void *);
1009 int (**opv_desc_vector
)(void *);
1010 struct vnodeopv_entry_desc
*opve_descp
;
1016 * This routine is responsible for all the initialization that would
1017 * ordinarily be done as part of the system startup;
1020 if (vfe
== (struct vfs_fsentry
*)0)
1023 desccount
= vfe
->vfe_vopcnt
;
1024 if ((desccount
<=0) || ((desccount
> 8)) || (vfe
->vfe_vfsops
== (struct vfsops
*)NULL
)
1025 || (vfe
->vfe_opvdescs
== (struct vnodeopv_desc
**)NULL
))
1029 /* Non-threadsafe filesystems are not supported for K64 */
1030 if ((vfe
->vfe_flags
& (VFS_TBLTHREADSAFE
| VFS_TBLFSNODELOCK
)) == 0) {
1033 #endif /* __LP64__ */
1035 MALLOC(newvfstbl
, void *, sizeof(struct vfstable
), M_TEMP
,
1037 bzero(newvfstbl
, sizeof(struct vfstable
));
1038 newvfstbl
->vfc_vfsops
= vfe
->vfe_vfsops
;
1039 strncpy(&newvfstbl
->vfc_name
[0], vfe
->vfe_fsname
, MFSNAMELEN
);
1040 if ((vfe
->vfe_flags
& VFS_TBLNOTYPENUM
))
1041 newvfstbl
->vfc_typenum
= maxvfsconf
++;
1043 newvfstbl
->vfc_typenum
= vfe
->vfe_fstypenum
;
1045 newvfstbl
->vfc_refcount
= 0;
1046 newvfstbl
->vfc_flags
= 0;
1047 newvfstbl
->vfc_mountroot
= NULL
;
1048 newvfstbl
->vfc_next
= NULL
;
1049 newvfstbl
->vfc_vfsflags
= 0;
1050 if (vfe
->vfe_flags
& VFS_TBL64BITREADY
)
1051 newvfstbl
->vfc_vfsflags
|= VFC_VFS64BITREADY
;
1052 if (vfe
->vfe_flags
& VFS_TBLVNOP_PAGEINV2
)
1053 newvfstbl
->vfc_vfsflags
|= VFC_VFSVNOP_PAGEINV2
;
1054 if (vfe
->vfe_flags
& VFS_TBLVNOP_PAGEOUTV2
)
1055 newvfstbl
->vfc_vfsflags
|= VFC_VFSVNOP_PAGEOUTV2
;
1057 if (vfe
->vfe_flags
& VFS_TBLTHREADSAFE
)
1058 newvfstbl
->vfc_vfsflags
|= VFC_VFSTHREADSAFE
;
1059 if (vfe
->vfe_flags
& VFS_TBLFSNODELOCK
)
1060 newvfstbl
->vfc_vfsflags
|= VFC_VFSTHREADSAFE
;
1061 #endif /* __LP64__ */
1062 if ((vfe
->vfe_flags
& VFS_TBLLOCALVOL
) == VFS_TBLLOCALVOL
)
1063 newvfstbl
->vfc_flags
|= MNT_LOCAL
;
1064 if ((vfe
->vfe_flags
& VFS_TBLLOCALVOL
) && (vfe
->vfe_flags
& VFS_TBLGENERICMNTARGS
) == 0)
1065 newvfstbl
->vfc_vfsflags
|= VFC_VFSLOCALARGS
;
1067 newvfstbl
->vfc_vfsflags
|= VFC_VFSGENERICARGS
;
1069 if (vfe
->vfe_flags
& VFS_TBLNATIVEXATTR
)
1070 newvfstbl
->vfc_vfsflags
|= VFC_VFSNATIVEXATTR
;
1071 if (vfe
->vfe_flags
& VFS_TBLUNMOUNT_PREFLIGHT
)
1072 newvfstbl
->vfc_vfsflags
|= VFC_VFSPREFLIGHT
;
1073 if (vfe
->vfe_flags
& VFS_TBLREADDIR_EXTENDED
)
1074 newvfstbl
->vfc_vfsflags
|= VFC_VFSREADDIR_EXTENDED
;
1075 if (vfe
->vfe_flags
& VFS_TBLNOMACLABEL
)
1076 newvfstbl
->vfc_vfsflags
|= VFC_VFSNOMACLABEL
;
1079 * Allocate and init the vectors.
1080 * Also handle backwards compatibility.
1082 * We allocate one large block to hold all <desccount>
1083 * vnode operation vectors stored contiguously.
1085 /* XXX - shouldn't be M_TEMP */
1087 descsize
= desccount
* vfs_opv_numops
* sizeof(PFI
);
1088 MALLOC(descptr
, PFI
*, descsize
,
1090 bzero(descptr
, descsize
);
1092 newvfstbl
->vfc_descptr
= descptr
;
1093 newvfstbl
->vfc_descsize
= descsize
;
1096 for (i
= 0; i
< desccount
; i
++ ) {
1097 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
1099 * Fill in the caller's pointer to the start of the i'th vector.
1100 * They'll need to supply it when calling vnode_create.
1102 opv_desc_vector
= descptr
+ i
* vfs_opv_numops
;
1103 *opv_desc_vector_p
= opv_desc_vector
;
1105 for (j
= 0; vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
].opve_op
; j
++) {
1106 opve_descp
= &(vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
]);
1109 * Sanity check: is this operation listed
1110 * in the list of operations? We check this
1111 * by seeing if its offset is zero. Since
1112 * the default routine should always be listed
1113 * first, it should be the only one with a zero
1114 * offset. Any other operation with a zero
1115 * offset is probably not listed in
1116 * vfs_op_descs, and so is probably an error.
1118 * A panic here means the layer programmer
1119 * has committed the all-too common bug
1120 * of adding a new operation to the layer's
1121 * list of vnode operations but
1122 * not adding the operation to the system-wide
1123 * list of supported operations.
1125 if (opve_descp
->opve_op
->vdesc_offset
== 0 &&
1126 opve_descp
->opve_op
->vdesc_offset
!= VOFFSET(vnop_default
)) {
1127 printf("vfs_fsadd: operation %s not listed in %s.\n",
1128 opve_descp
->opve_op
->vdesc_name
,
1130 panic("vfs_fsadd: bad operation");
1133 * Fill in this entry.
1135 opv_desc_vector
[opve_descp
->opve_op
->vdesc_offset
] =
1136 opve_descp
->opve_impl
;
1141 * Finally, go back and replace unfilled routines
1142 * with their default. (Sigh, an O(n^3) algorithm. I
1143 * could make it better, but that'd be work, and n is small.)
1145 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
1148 * Force every operations vector to have a default routine.
1150 opv_desc_vector
= *opv_desc_vector_p
;
1151 if (opv_desc_vector
[VOFFSET(vnop_default
)] == NULL
)
1152 panic("vfs_fsadd: operation vector without default routine.");
1153 for (j
= 0; j
< vfs_opv_numops
; j
++)
1154 if (opv_desc_vector
[j
] == NULL
)
1155 opv_desc_vector
[j
] =
1156 opv_desc_vector
[VOFFSET(vnop_default
)];
1158 } /* end of each vnodeopv_desc parsing */
1162 *handle
= vfstable_add(newvfstbl
);
1164 if (newvfstbl
->vfc_typenum
<= maxvfsconf
)
1165 maxvfsconf
= newvfstbl
->vfc_typenum
+ 1;
1167 if (newvfstbl
->vfc_vfsops
->vfs_init
) {
1168 struct vfsconf vfsc
;
1169 bzero(&vfsc
, sizeof(struct vfsconf
));
1170 vfsc
.vfc_reserved1
= 0;
1171 bcopy((*handle
)->vfc_name
, vfsc
.vfc_name
, sizeof(vfsc
.vfc_name
));
1172 vfsc
.vfc_typenum
= (*handle
)->vfc_typenum
;
1173 vfsc
.vfc_refcount
= (*handle
)->vfc_refcount
;
1174 vfsc
.vfc_flags
= (*handle
)->vfc_flags
;
1175 vfsc
.vfc_reserved2
= 0;
1176 vfsc
.vfc_reserved3
= 0;
1178 (*newvfstbl
->vfc_vfsops
->vfs_init
)(&vfsc
);
1181 FREE(newvfstbl
, M_TEMP
);
1187 * Removes the filesystem from kernel.
1188 * The argument passed in is the handle that was given when
1189 * file system was added
1192 vfs_fsremove(vfstable_t handle
)
1194 struct vfstable
* vfstbl
= (struct vfstable
*)handle
;
1195 void *old_desc
= NULL
;
1198 /* Preflight check for any mounts */
1200 if ( vfstbl
->vfc_refcount
!= 0 ) {
1201 mount_list_unlock();
1206 * save the old descriptor; the free cannot occur unconditionally,
1207 * since vfstable_del() may fail.
1209 if (vfstbl
->vfc_descptr
&& vfstbl
->vfc_descsize
) {
1210 old_desc
= vfstbl
->vfc_descptr
;
1212 err
= vfstable_del(vfstbl
);
1214 mount_list_unlock();
1216 /* free the descriptor if the delete was successful */
1217 if (err
== 0 && old_desc
) {
1218 FREE(old_desc
, M_TEMP
);
1225 vfs_context_pid(vfs_context_t ctx
)
1227 return (proc_pid(vfs_context_proc(ctx
)));
1231 vfs_context_suser(vfs_context_t ctx
)
1233 return (suser(ctx
->vc_ucred
, NULL
));
1237 * Return bit field of signals posted to all threads in the context's process.
1239 * XXX Signals should be tied to threads, not processes, for most uses of this
1243 vfs_context_issignal(vfs_context_t ctx
, sigset_t mask
)
1245 proc_t p
= vfs_context_proc(ctx
);
1247 return(proc_pendingsignals(p
, mask
));
1252 vfs_context_is64bit(vfs_context_t ctx
)
1254 proc_t proc
= vfs_context_proc(ctx
);
1257 return(proc_is64bit(proc
));
1265 * Description: Given a vfs_context_t, return the proc_t associated with it.
1267 * Parameters: vfs_context_t The context to use
1269 * Returns: proc_t The process for this context
1271 * Notes: This function will return the current_proc() if any of the
1272 * following conditions are true:
1274 * o The supplied context pointer is NULL
1275 * o There is no Mach thread associated with the context
1276 * o There is no Mach task associated with the Mach thread
1277 * o There is no proc_t associated with the Mach task
1278 * o The proc_t has no per process open file table
1279 * o The proc_t is post-vfork()
1281 * This causes this function to return a value matching as
1282 * closely as possible the previous behaviour, while at the
1283 * same time avoiding the task lending that results from vfork()
1286 vfs_context_proc(vfs_context_t ctx
)
1290 if (ctx
!= NULL
&& ctx
->vc_thread
!= NULL
)
1291 proc
= (proc_t
)get_bsdthreadtask_info(ctx
->vc_thread
);
1292 if (proc
!= NULL
&& (proc
->p_fd
== NULL
|| (proc
->p_lflag
& P_LVFORK
)))
1295 return(proc
== NULL
? current_proc() : proc
);
1299 * vfs_context_get_special_port
1301 * Description: Return the requested special port from the task associated
1302 * with the given context.
1304 * Parameters: vfs_context_t The context to use
1305 * int Index of special port
1306 * ipc_port_t * Pointer to returned port
1308 * Returns: kern_return_t see task_get_special_port()
1311 vfs_context_get_special_port(vfs_context_t ctx
, int which
, ipc_port_t
*portp
)
1315 if (ctx
!= NULL
&& ctx
->vc_thread
!= NULL
)
1316 task
= get_threadtask(ctx
->vc_thread
);
1318 return task_get_special_port(task
, which
, portp
);
1322 * vfs_context_set_special_port
1324 * Description: Set the requested special port in the task associated
1325 * with the given context.
1327 * Parameters: vfs_context_t The context to use
1328 * int Index of special port
1329 * ipc_port_t New special port
1331 * Returns: kern_return_t see task_set_special_port()
1334 vfs_context_set_special_port(vfs_context_t ctx
, int which
, ipc_port_t port
)
1338 if (ctx
!= NULL
&& ctx
->vc_thread
!= NULL
)
1339 task
= get_threadtask(ctx
->vc_thread
);
1341 return task_set_special_port(task
, which
, port
);
1345 * vfs_context_thread
1347 * Description: Return the Mach thread associated with a vfs_context_t
1349 * Parameters: vfs_context_t The context to use
1351 * Returns: thread_t The thread for this context, or
1352 * NULL, if there is not one.
1354 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1355 * as a result of a static vfs_context_t declaration in a function
1356 * and will result in this function returning NULL.
1358 * This is intentional; this function should NOT return the
1359 * current_thread() in this case.
1362 vfs_context_thread(vfs_context_t ctx
)
1364 return(ctx
->vc_thread
);
1371 * Description: Returns a reference on the vnode for the current working
1372 * directory for the supplied context
1374 * Parameters: vfs_context_t The context to use
1376 * Returns: vnode_t The current working directory
1379 * Notes: The function first attempts to obtain the current directory
1380 * from the thread, and if it is not present there, falls back
1381 * to obtaining it from the process instead. If it can't be
1382 * obtained from either place, we return NULLVP.
1385 vfs_context_cwd(vfs_context_t ctx
)
1387 vnode_t cwd
= NULLVP
;
1389 if(ctx
!= NULL
&& ctx
->vc_thread
!= NULL
) {
1390 uthread_t uth
= get_bsdthread_info(ctx
->vc_thread
);
1394 * Get the cwd from the thread; if there isn't one, get it
1395 * from the process, instead.
1397 if ((cwd
= uth
->uu_cdir
) == NULLVP
&&
1398 (proc
= (proc_t
)get_bsdthreadtask_info(ctx
->vc_thread
)) != NULL
&&
1400 cwd
= proc
->p_fd
->fd_cdir
;
1407 * vfs_context_create
1409 * Description: Allocate and initialize a new context.
1411 * Parameters: vfs_context_t: Context to copy, or NULL for new
1413 * Returns: Pointer to new context
1415 * Notes: Copy cred and thread from argument, if available; else
1416 * initialize with current thread and new cred. Returns
1417 * with a reference held on the credential.
1420 vfs_context_create(vfs_context_t ctx
)
1422 vfs_context_t newcontext
;
1424 newcontext
= (vfs_context_t
)kalloc(sizeof(struct vfs_context
));
1427 kauth_cred_t safecred
;
1429 newcontext
->vc_thread
= ctx
->vc_thread
;
1430 safecred
= ctx
->vc_ucred
;
1432 newcontext
->vc_thread
= current_thread();
1433 safecred
= kauth_cred_get();
1435 if (IS_VALID_CRED(safecred
))
1436 kauth_cred_ref(safecred
);
1437 newcontext
->vc_ucred
= safecred
;
1445 vfs_context_current(void)
1447 vfs_context_t ctx
= NULL
;
1448 volatile uthread_t ut
= (uthread_t
)get_bsdthread_info(current_thread());
1451 if (ut
->uu_context
.vc_ucred
!= NULL
) {
1452 ctx
= &ut
->uu_context
;
1456 return(ctx
== NULL
? vfs_context_kernel() : ctx
);
1463 * Dangerous hack - adopt the first kernel thread as the current thread, to
1464 * get to the vfs_context_t in the uthread associated with a kernel thread.
1465 * This is used by UDF to make the call into IOCDMediaBSDClient,
1466 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1467 * ioctl() is being called from kernel or user space (and all this because
1468 * we do not pass threads into our ioctl()'s, instead of processes).
1470 * This is also used by imageboot_setup(), called early from bsd_init() after
1471 * kernproc has been given a credential.
1473 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1474 * of many Mach headers to do the reference directly rather than indirectly;
1475 * we will need to forego this convenience when we reture proc_thread().
1477 static struct vfs_context kerncontext
;
1479 vfs_context_kernel(void)
1481 if (kerncontext
.vc_ucred
== NOCRED
)
1482 kerncontext
.vc_ucred
= kernproc
->p_ucred
;
1483 if (kerncontext
.vc_thread
== NULL
)
1484 kerncontext
.vc_thread
= proc_thread(kernproc
);
1486 return(&kerncontext
);
1491 vfs_context_rele(vfs_context_t ctx
)
1494 if (IS_VALID_CRED(ctx
->vc_ucred
))
1495 kauth_cred_unref(&ctx
->vc_ucred
);
1496 kfree(ctx
, sizeof(struct vfs_context
));
1503 vfs_context_ucred(vfs_context_t ctx
)
1505 return (ctx
->vc_ucred
);
1509 * Return true if the context is owned by the superuser.
1512 vfs_context_issuser(vfs_context_t ctx
)
1514 return(kauth_cred_issuser(vfs_context_ucred(ctx
)));
1518 * Given a context, for all fields of vfs_context_t which
1519 * are not held with a reference, set those fields to the
1520 * values for the current execution context. Currently, this
1521 * just means the vc_thread.
1523 * Returns: 0 for success, nonzero for failure
1525 * The intended use is:
1526 * 1. vfs_context_create() gets the caller a context
1527 * 2. vfs_context_bind() sets the unrefcounted data
1528 * 3. vfs_context_rele() releases the context
1532 vfs_context_bind(vfs_context_t ctx
)
1534 ctx
->vc_thread
= current_thread();
1538 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1542 * Convert between vnode types and inode formats (since POSIX.1
1543 * defines mode word of stat structure in terms of inode formats).
1546 vnode_iftovt(int mode
)
1548 return(iftovt_tab
[((mode
) & S_IFMT
) >> 12]);
1552 vnode_vttoif(enum vtype indx
)
1554 return(vttoif_tab
[(int)(indx
)]);
1558 vnode_makeimode(int indx
, int mode
)
1560 return (int)(VTTOIF(indx
) | (mode
));
1565 * vnode manipulation functions.
1568 /* returns system root vnode iocount; It should be released using vnode_put() */
1574 error
= vnode_get(rootvnode
);
1576 return ((vnode_t
)0);
1583 vnode_vid(vnode_t vp
)
1585 return ((uint32_t)(vp
->v_id
));
1589 vnode_mount(vnode_t vp
)
1591 return (vp
->v_mount
);
1595 vnode_mountedhere(vnode_t vp
)
1599 if ((vp
->v_type
== VDIR
) && ((mp
= vp
->v_mountedhere
) != NULL
) &&
1600 (mp
->mnt_vnodecovered
== vp
))
1603 return (mount_t
)NULL
;
1606 /* returns vnode type of vnode_t */
1608 vnode_vtype(vnode_t vp
)
1610 return (vp
->v_type
);
1613 /* returns FS specific node saved in vnode */
1615 vnode_fsnode(vnode_t vp
)
1617 return (vp
->v_data
);
1621 vnode_clearfsnode(vnode_t vp
)
1627 vnode_specrdev(vnode_t vp
)
1633 /* Accessor functions */
1634 /* is vnode_t a root vnode */
1636 vnode_isvroot(vnode_t vp
)
1638 return ((vp
->v_flag
& VROOT
)? 1 : 0);
1641 /* is vnode_t a system vnode */
1643 vnode_issystem(vnode_t vp
)
1645 return ((vp
->v_flag
& VSYSTEM
)? 1 : 0);
1648 /* is vnode_t a swap file vnode */
1650 vnode_isswap(vnode_t vp
)
1652 return ((vp
->v_flag
& VSWAP
)? 1 : 0);
1655 /* is vnode_t a tty */
1657 vnode_istty(vnode_t vp
)
1659 return ((vp
->v_flag
& VISTTY
) ? 1 : 0);
1662 /* if vnode_t mount operation in progress */
1664 vnode_ismount(vnode_t vp
)
1666 return ((vp
->v_flag
& VMOUNT
)? 1 : 0);
1669 /* is this vnode under recyle now */
1671 vnode_isrecycled(vnode_t vp
)
1675 vnode_lock_spin(vp
);
1676 ret
= (vp
->v_lflag
& (VL_TERMINATE
|VL_DEAD
))? 1 : 0;
1681 /* vnode was created by background task requesting rapid aging
1682 and has not since been referenced by a normal task */
1684 vnode_israge(vnode_t vp
)
1686 return ((vp
->v_flag
& VRAGE
)? 1 : 0);
1689 /* is vnode_t marked to not keep data cached once it's been consumed */
1691 vnode_isnocache(vnode_t vp
)
1693 return ((vp
->v_flag
& VNOCACHE_DATA
)? 1 : 0);
1697 * has sequential readahead been disabled on this vnode
1700 vnode_isnoreadahead(vnode_t vp
)
1702 return ((vp
->v_flag
& VRAOFF
)? 1 : 0);
1706 vnode_is_openevt(vnode_t vp
)
1708 return ((vp
->v_flag
& VOPENEVT
)? 1 : 0);
1711 /* is vnode_t a standard one? */
1713 vnode_isstandard(vnode_t vp
)
1715 return ((vp
->v_flag
& VSTANDARD
)? 1 : 0);
1718 /* don't vflush() if SKIPSYSTEM */
1720 vnode_isnoflush(vnode_t vp
)
1722 return ((vp
->v_flag
& VNOFLUSH
)? 1 : 0);
1725 /* is vnode_t a regular file */
1727 vnode_isreg(vnode_t vp
)
1729 return ((vp
->v_type
== VREG
)? 1 : 0);
1732 /* is vnode_t a directory? */
1734 vnode_isdir(vnode_t vp
)
1736 return ((vp
->v_type
== VDIR
)? 1 : 0);
1739 /* is vnode_t a symbolic link ? */
1741 vnode_islnk(vnode_t vp
)
1743 return ((vp
->v_type
== VLNK
)? 1 : 0);
1746 /* is vnode_t a fifo ? */
1748 vnode_isfifo(vnode_t vp
)
1750 return ((vp
->v_type
== VFIFO
)? 1 : 0);
1753 /* is vnode_t a block device? */
1755 vnode_isblk(vnode_t vp
)
1757 return ((vp
->v_type
== VBLK
)? 1 : 0);
1761 vnode_isspec(vnode_t vp
)
1763 return (((vp
->v_type
== VCHR
) || (vp
->v_type
== VBLK
)) ? 1 : 0);
1766 /* is vnode_t a char device? */
1768 vnode_ischr(vnode_t vp
)
1770 return ((vp
->v_type
== VCHR
)? 1 : 0);
1773 /* is vnode_t a socket? */
1775 vnode_issock(vnode_t vp
)
1777 return ((vp
->v_type
== VSOCK
)? 1 : 0);
1780 /* is vnode_t a device with multiple active vnodes referring to it? */
1782 vnode_isaliased(vnode_t vp
)
1784 enum vtype vt
= vp
->v_type
;
1785 if (!((vt
== VCHR
) || (vt
== VBLK
))) {
1788 return (vp
->v_specflags
& SI_ALIASED
);
1792 /* is vnode_t a named stream? */
1794 vnode_isnamedstream(
1803 return ((vp
->v_flag
& VISNAMEDSTREAM
) ? 1 : 0);
1819 return ((vp
->v_flag
& VISSHADOW
) ? 1 : 0);
1825 /* does vnode have associated named stream vnodes ? */
1827 vnode_hasnamedstreams(
1836 return ((vp
->v_lflag
& VL_HASSTREAMS
) ? 1 : 0);
1841 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1843 vnode_setnocache(vnode_t vp
)
1845 vnode_lock_spin(vp
);
1846 vp
->v_flag
|= VNOCACHE_DATA
;
1851 vnode_clearnocache(vnode_t vp
)
1853 vnode_lock_spin(vp
);
1854 vp
->v_flag
&= ~VNOCACHE_DATA
;
1859 vnode_set_openevt(vnode_t vp
)
1861 vnode_lock_spin(vp
);
1862 vp
->v_flag
|= VOPENEVT
;
1867 vnode_clear_openevt(vnode_t vp
)
1869 vnode_lock_spin(vp
);
1870 vp
->v_flag
&= ~VOPENEVT
;
1876 vnode_setnoreadahead(vnode_t vp
)
1878 vnode_lock_spin(vp
);
1879 vp
->v_flag
|= VRAOFF
;
1884 vnode_clearnoreadahead(vnode_t vp
)
1886 vnode_lock_spin(vp
);
1887 vp
->v_flag
&= ~VRAOFF
;
1892 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1894 vnode_setnoflush(vnode_t vp
)
1896 vnode_lock_spin(vp
);
1897 vp
->v_flag
|= VNOFLUSH
;
1902 vnode_clearnoflush(vnode_t vp
)
1904 vnode_lock_spin(vp
);
1905 vp
->v_flag
&= ~VNOFLUSH
;
1910 /* is vnode_t a blkdevice and has a FS mounted on it */
1912 vnode_ismountedon(vnode_t vp
)
1914 return ((vp
->v_specflags
& SI_MOUNTEDON
)? 1 : 0);
1918 vnode_setmountedon(vnode_t vp
)
1920 vnode_lock_spin(vp
);
1921 vp
->v_specflags
|= SI_MOUNTEDON
;
1926 vnode_clearmountedon(vnode_t vp
)
1928 vnode_lock_spin(vp
);
1929 vp
->v_specflags
&= ~SI_MOUNTEDON
;
1935 vnode_settag(vnode_t vp
, int tag
)
1942 vnode_tag(vnode_t vp
)
1948 vnode_parent(vnode_t vp
)
1951 return(vp
->v_parent
);
1955 vnode_setparent(vnode_t vp
, vnode_t dvp
)
1961 vnode_name(vnode_t vp
)
1963 /* we try to keep v_name a reasonable name for the node */
1968 vnode_setname(vnode_t vp
, char * name
)
1973 /* return the registered FS name when adding the FS to kernel */
1975 vnode_vfsname(vnode_t vp
, char * buf
)
1977 strncpy(buf
, vp
->v_mount
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
1980 /* return the FS type number */
1982 vnode_vfstypenum(vnode_t vp
)
1984 return(vp
->v_mount
->mnt_vtable
->vfc_typenum
);
1988 vnode_vfs64bitready(vnode_t vp
)
1992 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
1994 if ((vp
->v_mount
!= dead_mountp
) && (vp
->v_mount
->mnt_vtable
->vfc_vfsflags
& VFC_VFS64BITREADY
))
2002 /* return the visible flags on associated mount point of vnode_t */
2004 vnode_vfsvisflags(vnode_t vp
)
2006 return(vp
->v_mount
->mnt_flag
& MNT_VISFLAGMASK
);
2009 /* return the command modifier flags on associated mount point of vnode_t */
2011 vnode_vfscmdflags(vnode_t vp
)
2013 return(vp
->v_mount
->mnt_flag
& MNT_CMDFLAGS
);
2016 /* return the max symlink of short links of vnode_t */
2018 vnode_vfsmaxsymlen(vnode_t vp
)
2020 return(vp
->v_mount
->mnt_maxsymlinklen
);
2023 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2025 vnode_vfsstatfs(vnode_t vp
)
2027 return(&vp
->v_mount
->mnt_vfsstat
);
2030 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2032 vnode_vfsfsprivate(vnode_t vp
)
2034 return(vp
->v_mount
->mnt_data
);
2037 /* is vnode_t in a rdonly mounted FS */
2039 vnode_vfsisrdonly(vnode_t vp
)
2041 return ((vp
->v_mount
->mnt_flag
& MNT_RDONLY
)? 1 : 0);
2046 * Returns vnode ref to current working directory; if a per-thread current
2047 * working directory is in effect, return that instead of the per process one.
2049 * XXX Published, but not used.
2052 current_workingdir(void)
2054 return vfs_context_cwd(vfs_context_current());
2057 /* returns vnode ref to current root(chroot) directory */
2059 current_rootdir(void)
2061 proc_t proc
= current_proc();
2064 if ( (vp
= proc
->p_fd
->fd_rdir
) ) {
2065 if ( (vnode_getwithref(vp
)) )
2072 * Get a filesec and optional acl contents from an extended attribute.
2073 * Function will attempt to retrive ACL, UUID, and GUID information using a
2074 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2076 * Parameters: vp The vnode on which to operate.
2077 * fsecp The filesec (and ACL, if any) being
2079 * ctx The vnode context in which the
2080 * operation is to be attempted.
2082 * Returns: 0 Success
2085 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
2086 * host byte order, as will be the ACL contents, if any.
2087 * Internally, we will cannonize these values from network (PPC)
2088 * byte order after we retrieve them so that the on-disk contents
2089 * of the extended attribute are identical for both PPC and Intel
2090 * (if we were not being required to provide this service via
2091 * fallback, this would be the job of the filesystem
2092 * 'VNOP_GETATTR' call).
2094 * We use ntohl() because it has a transitive property on Intel
2095 * machines and no effect on PPC mancines. This guarantees us
2097 * XXX: Deleting rather than ignoreing a corrupt security structure is
2098 * probably the only way to reset it without assistance from an
2099 * file system integrity checking tool. Right now we ignore it.
2101 * XXX: We should enummerate the possible errno values here, and where
2102 * in the code they originated.
2105 vnode_get_filesec(vnode_t vp
, kauth_filesec_t
*fsecp
, vfs_context_t ctx
)
2107 kauth_filesec_t fsec
;
2110 size_t xsize
, rsize
;
2112 uint32_t host_fsec_magic
;
2113 uint32_t host_acl_entrycount
;
2119 /* find out how big the EA is */
2120 if (vn_getxattr(vp
, KAUTH_FILESEC_XATTR
, NULL
, &xsize
, XATTR_NOSECURITY
, ctx
) != 0) {
2121 /* no EA, no filesec */
2122 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
2124 /* either way, we are done */
2129 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2130 * ACE entrly ACL, and if it's larger than that, it must have the right
2131 * number of bytes such that it contains an atomic number of ACEs,
2132 * rather than partial entries. Otherwise, we ignore it.
2134 if (!KAUTH_FILESEC_VALID(xsize
)) {
2135 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize
);
2140 /* how many entries would fit? */
2141 fsec_size
= KAUTH_FILESEC_COUNT(xsize
);
2143 /* get buffer and uio */
2144 if (((fsec
= kauth_filesec_alloc(fsec_size
)) == NULL
) ||
2145 ((fsec_uio
= uio_create(1, 0, UIO_SYSSPACE
, UIO_READ
)) == NULL
) ||
2146 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), xsize
)) {
2147 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2152 /* read security attribute */
2154 if ((error
= vn_getxattr(vp
,
2155 KAUTH_FILESEC_XATTR
,
2161 /* no attribute - no security data */
2162 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
2164 /* either way, we are done */
2169 * Validate security structure; the validation must take place in host
2170 * byte order. If it's corrupt, we will just ignore it.
2173 /* Validate the size before trying to convert it */
2174 if (rsize
< KAUTH_FILESEC_SIZE(0)) {
2175 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize
);
2179 /* Validate the magic number before trying to convert it */
2180 host_fsec_magic
= ntohl(KAUTH_FILESEC_MAGIC
);
2181 if (fsec
->fsec_magic
!= host_fsec_magic
) {
2182 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic
);
2186 /* Validate the entry count before trying to convert it. */
2187 host_acl_entrycount
= ntohl(fsec
->fsec_acl
.acl_entrycount
);
2188 if (host_acl_entrycount
!= KAUTH_FILESEC_NOACL
) {
2189 if (host_acl_entrycount
> KAUTH_ACL_MAX_ENTRIES
) {
2190 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount
);
2193 if (KAUTH_FILESEC_SIZE(host_acl_entrycount
) > rsize
) {
2194 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount
, rsize
);
2199 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST
, fsec
, NULL
);
2206 kauth_filesec_free(fsec
);
2207 if (fsec_uio
!= NULL
)
2215 * Set a filesec and optional acl contents into an extended attribute.
2216 * function will attempt to store ACL, UUID, and GUID information using a
2217 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2218 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2219 * original caller supplied an acl.
2221 * Parameters: vp The vnode on which to operate.
2222 * fsec The filesec being set.
2223 * acl The acl to be associated with 'fsec'.
2224 * ctx The vnode context in which the
2225 * operation is to be attempted.
2227 * Returns: 0 Success
2230 * Notes: Both the fsec and the acl are always valid.
2232 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2233 * as are the acl contents, if they are used. Internally, we will
2234 * cannonize these values into network (PPC) byte order before we
2235 * attempt to write them so that the on-disk contents of the
2236 * extended attribute are identical for both PPC and Intel (if we
2237 * were not being required to provide this service via fallback,
2238 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2239 * We reverse this process on the way out, so we leave with the
2240 * same byte order we started with.
2242 * XXX: We should enummerate the possible errno values here, and where
2243 * in the code they originated.
2246 vnode_set_filesec(vnode_t vp
, kauth_filesec_t fsec
, kauth_acl_t acl
, vfs_context_t ctx
)
2250 uint32_t saved_acl_copysize
;
2254 if ((fsec_uio
= uio_create(2, 0, UIO_SYSSPACE
, UIO_WRITE
)) == NULL
) {
2255 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2260 * Save the pre-converted ACL copysize, because it gets swapped too
2261 * if we are running with the wrong endianness.
2263 saved_acl_copysize
= KAUTH_ACL_COPYSIZE(acl
);
2265 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK
, fsec
, acl
);
2267 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL
));
2268 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(acl
), saved_acl_copysize
);
2269 error
= vn_setxattr(vp
,
2270 KAUTH_FILESEC_XATTR
,
2272 XATTR_NOSECURITY
, /* we have auth'ed already */
2274 VFS_DEBUG(ctx
, vp
, "SETATTR - set ACL returning %d", error
);
2276 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST
, fsec
, acl
);
2279 if (fsec_uio
!= NULL
)
2286 * Returns: 0 Success
2287 * ENOMEM Not enough space [only if has filesec]
2289 * vnode_get_filesec: ???
2290 * kauth_cred_guid2uid: ???
2291 * kauth_cred_guid2gid: ???
2292 * vfs_update_vfsstat: ???
2295 vnode_getattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2297 kauth_filesec_t fsec
;
2303 /* don't ask for extended security data if the filesystem doesn't support it */
2304 if (!vfs_extendedsecurity(vnode_mount(vp
))) {
2305 VATTR_CLEAR_ACTIVE(vap
, va_acl
);
2306 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
2307 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
2311 * If the caller wants size values we might have to synthesise, give the
2312 * filesystem the opportunity to supply better intermediate results.
2314 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
2315 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
2316 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
2317 VATTR_SET_ACTIVE(vap
, va_data_size
);
2318 VATTR_SET_ACTIVE(vap
, va_data_alloc
);
2319 VATTR_SET_ACTIVE(vap
, va_total_size
);
2320 VATTR_SET_ACTIVE(vap
, va_total_alloc
);
2323 error
= VNOP_GETATTR(vp
, vap
, ctx
);
2325 KAUTH_DEBUG("ERROR - returning %d", error
);
2330 * If extended security data was requested but not returned, try the fallback
2333 if (VATTR_NOT_RETURNED(vap
, va_acl
) || VATTR_NOT_RETURNED(vap
, va_uuuid
) || VATTR_NOT_RETURNED(vap
, va_guuid
)) {
2336 if ((vp
->v_type
== VDIR
) || (vp
->v_type
== VLNK
) || (vp
->v_type
== VREG
)) {
2337 /* try to get the filesec */
2338 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0)
2341 /* if no filesec, no attributes */
2343 VATTR_RETURN(vap
, va_acl
, NULL
);
2344 VATTR_RETURN(vap
, va_uuuid
, kauth_null_guid
);
2345 VATTR_RETURN(vap
, va_guuid
, kauth_null_guid
);
2348 /* looks good, try to return what we were asked for */
2349 VATTR_RETURN(vap
, va_uuuid
, fsec
->fsec_owner
);
2350 VATTR_RETURN(vap
, va_guuid
, fsec
->fsec_group
);
2352 /* only return the ACL if we were actually asked for it */
2353 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
2354 if (fsec
->fsec_acl
.acl_entrycount
== KAUTH_FILESEC_NOACL
) {
2355 VATTR_RETURN(vap
, va_acl
, NULL
);
2357 facl
= kauth_acl_alloc(fsec
->fsec_acl
.acl_entrycount
);
2359 kauth_filesec_free(fsec
);
2363 bcopy(&fsec
->fsec_acl
, facl
, KAUTH_ACL_COPYSIZE(&fsec
->fsec_acl
));
2364 VATTR_RETURN(vap
, va_acl
, facl
);
2367 kauth_filesec_free(fsec
);
2371 * If someone gave us an unsolicited filesec, toss it. We promise that
2372 * we're OK with a filesystem giving us anything back, but our callers
2373 * only expect what they asked for.
2375 if (VATTR_IS_SUPPORTED(vap
, va_acl
) && !VATTR_IS_ACTIVE(vap
, va_acl
)) {
2376 if (vap
->va_acl
!= NULL
)
2377 kauth_acl_free(vap
->va_acl
);
2378 VATTR_CLEAR_SUPPORTED(vap
, va_acl
);
2381 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2383 * Handle the case where we need a UID/GID, but only have extended
2384 * security information.
2386 if (VATTR_NOT_RETURNED(vap
, va_uid
) &&
2387 VATTR_IS_SUPPORTED(vap
, va_uuuid
) &&
2388 !kauth_guid_equal(&vap
->va_uuuid
, &kauth_null_guid
)) {
2389 if ((error
= kauth_cred_guid2uid(&vap
->va_uuuid
, &nuid
)) == 0)
2390 VATTR_RETURN(vap
, va_uid
, nuid
);
2392 if (VATTR_NOT_RETURNED(vap
, va_gid
) &&
2393 VATTR_IS_SUPPORTED(vap
, va_guuid
) &&
2394 !kauth_guid_equal(&vap
->va_guuid
, &kauth_null_guid
)) {
2395 if ((error
= kauth_cred_guid2gid(&vap
->va_guuid
, &ngid
)) == 0)
2396 VATTR_RETURN(vap
, va_gid
, ngid
);
2401 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2403 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
2404 if (vfs_context_issuser(ctx
) && VATTR_IS_SUPPORTED(vap
, va_uid
)) {
2406 } else if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
2407 nuid
= vp
->v_mount
->mnt_fsowner
;
2408 if (nuid
== KAUTH_UID_NONE
)
2410 } else if (VATTR_IS_SUPPORTED(vap
, va_uid
)) {
2413 /* this will always be something sensible */
2414 nuid
= vp
->v_mount
->mnt_fsowner
;
2416 if ((nuid
== 99) && !vfs_context_issuser(ctx
))
2417 nuid
= kauth_cred_getuid(vfs_context_ucred(ctx
));
2418 VATTR_RETURN(vap
, va_uid
, nuid
);
2420 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
2421 if (vfs_context_issuser(ctx
) && VATTR_IS_SUPPORTED(vap
, va_gid
)) {
2423 } else if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
2424 ngid
= vp
->v_mount
->mnt_fsgroup
;
2425 if (ngid
== KAUTH_GID_NONE
)
2427 } else if (VATTR_IS_SUPPORTED(vap
, va_gid
)) {
2430 /* this will always be something sensible */
2431 ngid
= vp
->v_mount
->mnt_fsgroup
;
2433 if ((ngid
== 99) && !vfs_context_issuser(ctx
))
2434 ngid
= kauth_cred_getgid(vfs_context_ucred(ctx
));
2435 VATTR_RETURN(vap
, va_gid
, ngid
);
2439 * Synthesise some values that can be reasonably guessed.
2441 if (!VATTR_IS_SUPPORTED(vap
, va_iosize
))
2442 VATTR_RETURN(vap
, va_iosize
, vp
->v_mount
->mnt_vfsstat
.f_iosize
);
2444 if (!VATTR_IS_SUPPORTED(vap
, va_flags
))
2445 VATTR_RETURN(vap
, va_flags
, 0);
2447 if (!VATTR_IS_SUPPORTED(vap
, va_filerev
))
2448 VATTR_RETURN(vap
, va_filerev
, 0);
2450 if (!VATTR_IS_SUPPORTED(vap
, va_gen
))
2451 VATTR_RETURN(vap
, va_gen
, 0);
2454 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2456 if (!VATTR_IS_SUPPORTED(vap
, va_data_size
))
2457 VATTR_RETURN(vap
, va_data_size
, 0);
2459 /* do we want any of the possibly-computed values? */
2460 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
2461 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
2462 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
2463 /* make sure f_bsize is valid */
2464 if (vp
->v_mount
->mnt_vfsstat
.f_bsize
== 0) {
2465 if ((error
= vfs_update_vfsstat(vp
->v_mount
, ctx
, VFS_KERNEL_EVENT
)) != 0)
2469 /* default va_data_alloc from va_data_size */
2470 if (!VATTR_IS_SUPPORTED(vap
, va_data_alloc
))
2471 VATTR_RETURN(vap
, va_data_alloc
, roundup(vap
->va_data_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
2473 /* default va_total_size from va_data_size */
2474 if (!VATTR_IS_SUPPORTED(vap
, va_total_size
))
2475 VATTR_RETURN(vap
, va_total_size
, vap
->va_data_size
);
2477 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2478 if (!VATTR_IS_SUPPORTED(vap
, va_total_alloc
))
2479 VATTR_RETURN(vap
, va_total_alloc
, roundup(vap
->va_total_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
2483 * If we don't have a change time, pull it from the modtime.
2485 if (!VATTR_IS_SUPPORTED(vap
, va_change_time
) && VATTR_IS_SUPPORTED(vap
, va_modify_time
))
2486 VATTR_RETURN(vap
, va_change_time
, vap
->va_modify_time
);
2489 * This is really only supported for the creation VNOPs, but since the field is there
2490 * we should populate it correctly.
2492 VATTR_RETURN(vap
, va_type
, vp
->v_type
);
2495 * The fsid can be obtained from the mountpoint directly.
2497 VATTR_RETURN(vap
, va_fsid
, vp
->v_mount
->mnt_vfsstat
.f_fsid
.val
[0]);
2505 * Set the attributes on a vnode in a vnode context.
2507 * Parameters: vp The vnode whose attributes to set.
2508 * vap A pointer to the attributes to set.
2509 * ctx The vnode context in which the
2510 * operation is to be attempted.
2512 * Returns: 0 Success
2515 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2517 * The contents of the data area pointed to by 'vap' may be
2518 * modified if the vnode is on a filesystem which has been
2519 * mounted with ingore ownership flags, or by the underlyng
2520 * VFS itself, or by the fallback code, if the underlying VFS
2521 * does not support ACL, UUID, or GUUID attributes directly.
2523 * XXX: We should enummerate the possible errno values here, and where
2524 * in the code they originated.
2527 vnode_setattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2529 int error
, is_perm_change
=0;
2532 * Make sure the filesystem is mounted R/W.
2533 * If not, return an error.
2535 if (vfs_isrdonly(vp
->v_mount
)) {
2540 /* For streams, va_data_size is the only setable attribute. */
2541 if ((vp
->v_flag
& VISNAMEDSTREAM
) && (vap
->va_active
!= VNODE_ATTR_va_data_size
)) {
2548 * If ownership is being ignored on this volume, we silently discard
2549 * ownership changes.
2551 if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
2552 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
2553 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
2556 if ( VATTR_IS_ACTIVE(vap
, va_uid
) || VATTR_IS_ACTIVE(vap
, va_gid
)
2557 || VATTR_IS_ACTIVE(vap
, va_mode
) || VATTR_IS_ACTIVE(vap
, va_acl
)) {
2562 * Make sure that extended security is enabled if we're going to try
2565 if (!vfs_extendedsecurity(vnode_mount(vp
)) &&
2566 (VATTR_IS_ACTIVE(vap
, va_acl
) || VATTR_IS_ACTIVE(vap
, va_uuuid
) || VATTR_IS_ACTIVE(vap
, va_guuid
))) {
2567 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2572 error
= VNOP_SETATTR(vp
, vap
, ctx
);
2574 if ((error
== 0) && !VATTR_ALL_SUPPORTED(vap
))
2575 error
= vnode_setattr_fallback(vp
, vap
, ctx
);
2578 // only send a stat_changed event if this is more than
2579 // just an access or backup time update
2580 if (error
== 0 && (vap
->va_active
!= VNODE_ATTR_BIT(va_access_time
)) && (vap
->va_active
!= VNODE_ATTR_BIT(va_backup_time
))) {
2581 if (is_perm_change
) {
2582 if (need_fsevent(FSE_CHOWN
, vp
)) {
2583 add_fsevent(FSE_CHOWN
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
2585 } else if(need_fsevent(FSE_STAT_CHANGED
, vp
)) {
2586 add_fsevent(FSE_STAT_CHANGED
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
2596 * Fallback for setting the attributes on a vnode in a vnode context. This
2597 * Function will attempt to store ACL, UUID, and GUID information utilizing
2598 * a read/modify/write operation against an EA used as a backing store for
2601 * Parameters: vp The vnode whose attributes to set.
2602 * vap A pointer to the attributes to set.
2603 * ctx The vnode context in which the
2604 * operation is to be attempted.
2606 * Returns: 0 Success
2609 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2610 * as are the fsec and lfsec, if they are used.
2612 * The contents of the data area pointed to by 'vap' may be
2613 * modified to indicate that the attribute is supported for
2614 * any given requested attribute.
2616 * XXX: We should enummerate the possible errno values here, and where
2617 * in the code they originated.
2620 vnode_setattr_fallback(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2622 kauth_filesec_t fsec
;
2624 struct kauth_filesec lfsec
;
2630 * Extended security fallback via extended attributes.
2632 * Note that we do not free the filesec; the caller is expected to
2635 if (VATTR_NOT_RETURNED(vap
, va_acl
) ||
2636 VATTR_NOT_RETURNED(vap
, va_uuuid
) ||
2637 VATTR_NOT_RETURNED(vap
, va_guuid
)) {
2638 VFS_DEBUG(ctx
, vp
, "SETATTR - doing filesec fallback");
2641 * Fail for file types that we don't permit extended security
2644 if ((vp
->v_type
!= VDIR
) && (vp
->v_type
!= VLNK
) && (vp
->v_type
!= VREG
)) {
2645 VFS_DEBUG(ctx
, vp
, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp
));
2651 * If we don't have all the extended security items, we need
2652 * to fetch the existing data to perform a read-modify-write
2656 if (!VATTR_IS_ACTIVE(vap
, va_acl
) ||
2657 !VATTR_IS_ACTIVE(vap
, va_uuuid
) ||
2658 !VATTR_IS_ACTIVE(vap
, va_guuid
)) {
2659 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0) {
2660 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error
);
2664 /* if we didn't get a filesec, use our local one */
2666 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2669 KAUTH_DEBUG("SETATTR - updating existing filesec");
2672 facl
= &fsec
->fsec_acl
;
2674 /* if we're using the local filesec, we need to initialise it */
2675 if (fsec
== &lfsec
) {
2676 fsec
->fsec_magic
= KAUTH_FILESEC_MAGIC
;
2677 fsec
->fsec_owner
= kauth_null_guid
;
2678 fsec
->fsec_group
= kauth_null_guid
;
2679 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
2680 facl
->acl_flags
= 0;
2684 * Update with the supplied attributes.
2686 if (VATTR_IS_ACTIVE(vap
, va_uuuid
)) {
2687 KAUTH_DEBUG("SETATTR - updating owner UUID");
2688 fsec
->fsec_owner
= vap
->va_uuuid
;
2689 VATTR_SET_SUPPORTED(vap
, va_uuuid
);
2691 if (VATTR_IS_ACTIVE(vap
, va_guuid
)) {
2692 KAUTH_DEBUG("SETATTR - updating group UUID");
2693 fsec
->fsec_group
= vap
->va_guuid
;
2694 VATTR_SET_SUPPORTED(vap
, va_guuid
);
2696 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
2697 if (vap
->va_acl
== NULL
) {
2698 KAUTH_DEBUG("SETATTR - removing ACL");
2699 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
2701 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap
->va_acl
->acl_entrycount
);
2704 VATTR_SET_SUPPORTED(vap
, va_acl
);
2708 * If the filesec data is all invalid, we can just remove
2709 * the EA completely.
2711 if ((facl
->acl_entrycount
== KAUTH_FILESEC_NOACL
) &&
2712 kauth_guid_equal(&fsec
->fsec_owner
, &kauth_null_guid
) &&
2713 kauth_guid_equal(&fsec
->fsec_group
, &kauth_null_guid
)) {
2714 error
= vn_removexattr(vp
, KAUTH_FILESEC_XATTR
, XATTR_NOSECURITY
, ctx
);
2715 /* no attribute is ok, nothing to delete */
2716 if (error
== ENOATTR
)
2718 VFS_DEBUG(ctx
, vp
, "SETATTR - remove filesec returning %d", error
);
2721 error
= vnode_set_filesec(vp
, fsec
, facl
, ctx
);
2722 VFS_DEBUG(ctx
, vp
, "SETATTR - update filesec returning %d", error
);
2725 /* if we fetched a filesec, dispose of the buffer */
2727 kauth_filesec_free(fsec
);
2735 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
2739 vnode_notify(vnode_t vp
, uint32_t events
, struct vnode_attr
*vap
)
2741 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
2742 uint32_t knote_mask
= (VNODE_EVENT_WRITE
| VNODE_EVENT_DELETE
| VNODE_EVENT_RENAME
2743 | VNODE_EVENT_LINK
| VNODE_EVENT_EXTEND
| VNODE_EVENT_ATTRIB
);
2744 uint32_t dir_contents_mask
= (VNODE_EVENT_DIR_CREATED
| VNODE_EVENT_FILE_CREATED
2745 | VNODE_EVENT_DIR_REMOVED
| VNODE_EVENT_FILE_REMOVED
);
2746 uint32_t knote_events
= (events
& knote_mask
);
2748 /* Permissions are not explicitly part of the kqueue model */
2749 if (events
& VNODE_EVENT_PERMS
) {
2750 knote_events
|= NOTE_ATTRIB
;
2753 /* Directory contents information just becomes NOTE_WRITE */
2754 if ((vnode_isdir(vp
)) && (events
& dir_contents_mask
)) {
2755 knote_events
|= NOTE_WRITE
;
2759 lock_vnode_and_post(vp
, knote_events
);
2762 create_fsevent_from_kevent(vp
, events
, vap
);
2773 * For a filesystem that isn't tracking its own vnode watchers:
2774 * check whether a vnode is being monitored.
2777 vnode_ismonitored(vnode_t vp
) {
2778 return (vp
->v_knotes
.slh_first
!= NULL
);
2782 * Conceived as a function available only in BSD kernel so that if kevent_register
2783 * changes what a knote of type EVFILT_VNODE is watching, it can push
2784 * that updated information down to a networked filesystem that may
2785 * need to update server-side monitoring.
2787 * Blunted to do nothing--because we want to get both kqueue and fsevents support
2788 * from the VNOP_MONITOR design, we always want all the events a filesystem can provide us.
2791 vnode_knoteupdate(__unused
struct knote
*kn
)
2794 vnode_t vp
= (vnode_t
)kn
->kn_hook
;
2795 if (vnode_getwithvid(vp
, kn
->kn_hookid
) == 0) {
2796 VNOP_MONITOR(vp
, kn
->kn_sfflags
, VNODE_MONITOR_UPDATE
, (void*)kn
, NULL
);
2803 * Initialize a struct vnode_attr and activate the attributes required
2804 * by the vnode_notify() call.
2807 vfs_get_notify_attributes(struct vnode_attr
*vap
)
2810 vap
->va_active
= VNODE_NOTIFY_ATTRS
;
2815 * Definition of vnode operations.
2821 *#% lookup dvp L ? ?
2822 *#% lookup vpp - L -
2824 struct vnop_lookup_args
{
2825 struct vnodeop_desc
*a_desc
;
2828 struct componentname
*a_cnp
;
2829 vfs_context_t a_context
;
2834 * Returns: 0 Success
2835 * lock_fsnode:ENOENT No such file or directory [only for VFS
2836 * that is not thread safe & vnode is
2837 * currently being/has been terminated]
2838 * <vfs_lookup>:ENAMETOOLONG
2839 * <vfs_lookup>:ENOENT
2840 * <vfs_lookup>:EJUSTRETURN
2841 * <vfs_lookup>:EPERM
2842 * <vfs_lookup>:EISDIR
2843 * <vfs_lookup>:ENOTDIR
2846 * Note: The return codes from the underlying VFS's lookup routine can't
2847 * be fully enumerated here, since third party VFS authors may not
2848 * limit their error returns to the ones documented here, even
2849 * though this may result in some programs functioning incorrectly.
2851 * The return codes documented above are those which may currently
2852 * be returned by HFS from hfs_lookup, not including additional
2853 * error code which may be propagated from underlying routines.
2856 VNOP_LOOKUP(vnode_t dvp
, vnode_t
*vpp
, struct componentname
*cnp
, vfs_context_t ctx
)
2859 struct vnop_lookup_args a
;
2863 int funnel_state
= 0;
2864 #endif /* __LP64__ */
2866 a
.a_desc
= &vnop_lookup_desc
;
2873 thread_safe
= THREAD_SAFE_FS(dvp
);
2875 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2879 #endif /* __LP64__ */
2881 _err
= (*dvp
->v_op
[vnop_lookup_desc
.vdesc_offset
])(&a
);
2887 if ( (cnp
->cn_flags
& ISLASTCN
) ) {
2888 if ( (cnp
->cn_flags
& LOCKPARENT
) ) {
2889 if ( !(cnp
->cn_flags
& FSNODELOCKHELD
) ) {
2891 * leave the fsnode lock held on
2892 * the directory, but restore the funnel...
2893 * also indicate that we need to drop the
2894 * fsnode_lock when we're done with the
2895 * system call processing for this path
2897 cnp
->cn_flags
|= FSNODELOCKHELD
;
2899 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2904 unlock_fsnode(dvp
, &funnel_state
);
2906 #endif /* __LP64__ */
2914 *#% create dvp L L L
2915 *#% create vpp - L -
2919 struct vnop_create_args
{
2920 struct vnodeop_desc
*a_desc
;
2923 struct componentname
*a_cnp
;
2924 struct vnode_attr
*a_vap
;
2925 vfs_context_t a_context
;
2929 VNOP_CREATE(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
2932 struct vnop_create_args a
;
2935 int funnel_state
= 0;
2936 #endif /* __LP64__ */
2938 a
.a_desc
= &vnop_create_desc
;
2946 thread_safe
= THREAD_SAFE_FS(dvp
);
2948 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2952 #endif /* __LP64__ */
2954 _err
= (*dvp
->v_op
[vnop_create_desc
.vdesc_offset
])(&a
);
2955 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
2957 * Remove stale Apple Double file (if any).
2959 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 0);
2964 unlock_fsnode(dvp
, &funnel_state
);
2966 #endif /* __LP64__ */
2968 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
2976 *#% whiteout dvp L L L
2977 *#% whiteout cnp - - -
2978 *#% whiteout flag - - -
2981 struct vnop_whiteout_args
{
2982 struct vnodeop_desc
*a_desc
;
2984 struct componentname
*a_cnp
;
2986 vfs_context_t a_context
;
2990 VNOP_WHITEOUT(vnode_t dvp
, struct componentname
* cnp
, int flags
, vfs_context_t ctx
)
2993 struct vnop_whiteout_args a
;
2996 int funnel_state
= 0;
2997 #endif /* __LP64__ */
2999 a
.a_desc
= &vnop_whiteout_desc
;
3006 thread_safe
= THREAD_SAFE_FS(dvp
);
3008 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3012 #endif /* __LP64__ */
3014 _err
= (*dvp
->v_op
[vnop_whiteout_desc
.vdesc_offset
])(&a
);
3018 unlock_fsnode(dvp
, &funnel_state
);
3020 #endif /* __LP64__ */
3022 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
3034 struct vnop_mknod_args
{
3035 struct vnodeop_desc
*a_desc
;
3038 struct componentname
*a_cnp
;
3039 struct vnode_attr
*a_vap
;
3040 vfs_context_t a_context
;
3044 VNOP_MKNOD(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
3048 struct vnop_mknod_args a
;
3051 int funnel_state
= 0;
3052 #endif /* __LP64__ */
3054 a
.a_desc
= &vnop_mknod_desc
;
3062 thread_safe
= THREAD_SAFE_FS(dvp
);
3064 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3068 #endif /* __LP64__ */
3070 _err
= (*dvp
->v_op
[vnop_mknod_desc
.vdesc_offset
])(&a
);
3074 unlock_fsnode(dvp
, &funnel_state
);
3076 #endif /* __LP64__ */
3078 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
3089 struct vnop_open_args
{
3090 struct vnodeop_desc
*a_desc
;
3093 vfs_context_t a_context
;
3097 VNOP_OPEN(vnode_t vp
, int mode
, vfs_context_t ctx
)
3100 struct vnop_open_args a
;
3103 int funnel_state
= 0;
3104 #endif /* __LP64__ */
3107 ctx
= vfs_context_current();
3109 a
.a_desc
= &vnop_open_desc
;
3115 thread_safe
= THREAD_SAFE_FS(vp
);
3117 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3118 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3119 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3120 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3125 #endif /* __LP64__ */
3127 _err
= (*vp
->v_op
[vnop_open_desc
.vdesc_offset
])(&a
);
3131 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3132 unlock_fsnode(vp
, NULL
);
3134 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3136 #endif /* __LP64__ */
3147 struct vnop_close_args
{
3148 struct vnodeop_desc
*a_desc
;
3151 vfs_context_t a_context
;
3155 VNOP_CLOSE(vnode_t vp
, int fflag
, vfs_context_t ctx
)
3158 struct vnop_close_args a
;
3161 int funnel_state
= 0;
3162 #endif /* __LP64__ */
3165 ctx
= vfs_context_current();
3167 a
.a_desc
= &vnop_close_desc
;
3173 thread_safe
= THREAD_SAFE_FS(vp
);
3175 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3176 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3177 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3178 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3183 #endif /* __LP64__ */
3185 _err
= (*vp
->v_op
[vnop_close_desc
.vdesc_offset
])(&a
);
3189 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3190 unlock_fsnode(vp
, NULL
);
3192 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3194 #endif /* __LP64__ */
3205 struct vnop_access_args
{
3206 struct vnodeop_desc
*a_desc
;
3209 vfs_context_t a_context
;
3213 VNOP_ACCESS(vnode_t vp
, int action
, vfs_context_t ctx
)
3216 struct vnop_access_args a
;
3219 int funnel_state
= 0;
3220 #endif /* __LP64__ */
3223 ctx
= vfs_context_current();
3225 a
.a_desc
= &vnop_access_desc
;
3227 a
.a_action
= action
;
3231 thread_safe
= THREAD_SAFE_FS(vp
);
3233 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3237 #endif /* __LP64__ */
3239 _err
= (*vp
->v_op
[vnop_access_desc
.vdesc_offset
])(&a
);
3243 unlock_fsnode(vp
, &funnel_state
);
3245 #endif /* __LP64__ */
3253 *#% getattr vp = = =
3256 struct vnop_getattr_args
{
3257 struct vnodeop_desc
*a_desc
;
3259 struct vnode_attr
*a_vap
;
3260 vfs_context_t a_context
;
3264 VNOP_GETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
3267 struct vnop_getattr_args a
;
3270 int funnel_state
= 0;
3271 #endif /* __LP64__ */
3273 a
.a_desc
= &vnop_getattr_desc
;
3279 thread_safe
= THREAD_SAFE_FS(vp
);
3281 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3285 #endif /* __LP64__ */
3287 _err
= (*vp
->v_op
[vnop_getattr_desc
.vdesc_offset
])(&a
);
3291 unlock_fsnode(vp
, &funnel_state
);
3293 #endif /* __LP64__ */
3301 *#% setattr vp L L L
3304 struct vnop_setattr_args
{
3305 struct vnodeop_desc
*a_desc
;
3307 struct vnode_attr
*a_vap
;
3308 vfs_context_t a_context
;
3312 VNOP_SETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
3315 struct vnop_setattr_args a
;
3318 int funnel_state
= 0;
3319 #endif /* __LP64__ */
3321 a
.a_desc
= &vnop_setattr_desc
;
3327 thread_safe
= THREAD_SAFE_FS(vp
);
3329 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3333 #endif /* __LP64__ */
3335 _err
= (*vp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
3338 * Shadow uid/gid/mod change to extended attribute file.
3340 if (_err
== 0 && !NATIVE_XATTR(vp
)) {
3341 struct vnode_attr va
;
3345 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
3346 VATTR_SET(&va
, va_uid
, vap
->va_uid
);
3349 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
3350 VATTR_SET(&va
, va_gid
, vap
->va_gid
);
3353 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
3354 VATTR_SET(&va
, va_mode
, vap
->va_mode
);
3361 dvp
= vnode_getparent(vp
);
3362 vname
= vnode_getname(vp
);
3364 xattrfile_setattr(dvp
, vname
, &va
, ctx
);
3368 vnode_putname(vname
);
3374 unlock_fsnode(vp
, &funnel_state
);
3376 #endif /* __LP64__ */
3379 * If we have changed any of the things about the file that are likely
3380 * to result in changes to authorization results, blow the vnode auth
3384 VATTR_IS_SUPPORTED(vap
, va_mode
) ||
3385 VATTR_IS_SUPPORTED(vap
, va_uid
) ||
3386 VATTR_IS_SUPPORTED(vap
, va_gid
) ||
3387 VATTR_IS_SUPPORTED(vap
, va_flags
) ||
3388 VATTR_IS_SUPPORTED(vap
, va_acl
) ||
3389 VATTR_IS_SUPPORTED(vap
, va_uuuid
) ||
3390 VATTR_IS_SUPPORTED(vap
, va_guuid
))) {
3391 vnode_uncache_authorized_action(vp
, KAUTH_INVALIDATE_CACHED_RIGHTS
);
3394 if (vfs_authopaque(vp
->v_mount
) && vnode_hasnamedstreams(vp
)) {
3396 if (vnode_getnamedstream(vp
, &svp
, XATTR_RESOURCEFORK_NAME
, NS_OPEN
, 0, ctx
) == 0) {
3397 vnode_uncache_authorized_action(svp
, KAUTH_INVALIDATE_CACHED_RIGHTS
);
3401 #endif /* NAMEDSTREAMS */
3405 post_event_if_success(vp
, _err
, NOTE_ATTRIB
);
3417 struct vnop_read_args
{
3418 struct vnodeop_desc
*a_desc
;
3422 vfs_context_t a_context
;
3426 VNOP_READ(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t ctx
)
3429 struct vnop_read_args a
;
3432 int funnel_state
= 0;
3433 #endif /* __LP64__ */
3436 ctx
= vfs_context_current();
3439 a
.a_desc
= &vnop_read_desc
;
3442 a
.a_ioflag
= ioflag
;
3446 thread_safe
= THREAD_SAFE_FS(vp
);
3448 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3449 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3450 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3451 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3456 #endif /* __LP64__ */
3458 _err
= (*vp
->v_op
[vnop_read_desc
.vdesc_offset
])(&a
);
3462 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3463 unlock_fsnode(vp
, NULL
);
3465 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3467 #endif /* __LP64__ */
3479 struct vnop_write_args
{
3480 struct vnodeop_desc
*a_desc
;
3484 vfs_context_t a_context
;
3488 VNOP_WRITE(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t ctx
)
3490 struct vnop_write_args a
;
3494 int funnel_state
= 0;
3495 #endif /* __LP64__ */
3498 ctx
= vfs_context_current();
3501 a
.a_desc
= &vnop_write_desc
;
3504 a
.a_ioflag
= ioflag
;
3508 thread_safe
= THREAD_SAFE_FS(vp
);
3510 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3511 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3512 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3513 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3518 #endif /* __LP64__ */
3520 _err
= (*vp
->v_op
[vnop_write_desc
.vdesc_offset
])(&a
);
3524 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3525 unlock_fsnode(vp
, NULL
);
3527 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3529 #endif /* __LP64__ */
3531 post_event_if_success(vp
, _err
, NOTE_WRITE
);
3543 struct vnop_ioctl_args
{
3544 struct vnodeop_desc
*a_desc
;
3549 vfs_context_t a_context
;
3553 VNOP_IOCTL(vnode_t vp
, u_long command
, caddr_t data
, int fflag
, vfs_context_t ctx
)
3556 struct vnop_ioctl_args a
;
3559 int funnel_state
= 0;
3560 #endif /* __LP64__ */
3563 ctx
= vfs_context_current();
3567 * This check should probably have been put in the TTY code instead...
3569 * We have to be careful about what we assume during startup and shutdown.
3570 * We have to be able to use the root filesystem's device vnode even when
3571 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3572 * structure. If there is no data pointer, it doesn't matter whether
3573 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZECACHE)
3574 * which passes NULL for its data pointer can therefore be used during
3575 * mount or unmount of the root filesystem.
3577 * Depending on what root filesystems need to do during mount/unmount, we
3578 * may need to loosen this check again in the future.
3580 if (vfs_context_is64bit(ctx
) && !(vnode_ischr(vp
) || vnode_isblk(vp
))) {
3581 if (data
!= NULL
&& !vnode_vfs64bitready(vp
)) {
3586 a
.a_desc
= &vnop_ioctl_desc
;
3588 a
.a_command
= command
;
3594 thread_safe
= THREAD_SAFE_FS(vp
);
3596 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3597 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3598 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3599 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3604 #endif /* __LP64__ */
3606 _err
= (*vp
->v_op
[vnop_ioctl_desc
.vdesc_offset
])(&a
);
3610 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3611 unlock_fsnode(vp
, NULL
);
3613 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3615 #endif /* __LP64__ */
3627 struct vnop_select_args
{
3628 struct vnodeop_desc
*a_desc
;
3633 vfs_context_t a_context
;
3637 VNOP_SELECT(vnode_t vp
, int which
, int fflags
, void * wql
, vfs_context_t ctx
)
3640 struct vnop_select_args a
;
3643 int funnel_state
= 0;
3644 #endif /* __LP64__ */
3647 ctx
= vfs_context_current();
3649 a
.a_desc
= &vnop_select_desc
;
3652 a
.a_fflags
= fflags
;
3657 thread_safe
= THREAD_SAFE_FS(vp
);
3659 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3660 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3661 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3662 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3667 #endif /* __LP64__ */
3669 _err
= (*vp
->v_op
[vnop_select_desc
.vdesc_offset
])(&a
);
3673 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3674 unlock_fsnode(vp
, NULL
);
3676 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3678 #endif /* __LP64__ */
3687 *#% exchange fvp L L L
3688 *#% exchange tvp L L L
3691 struct vnop_exchange_args
{
3692 struct vnodeop_desc
*a_desc
;
3696 vfs_context_t a_context
;
3700 VNOP_EXCHANGE(vnode_t fvp
, vnode_t tvp
, int options
, vfs_context_t ctx
)
3703 struct vnop_exchange_args a
;
3706 int funnel_state
= 0;
3707 vnode_t lock_first
= NULL
, lock_second
= NULL
;
3708 #endif /* __LP64__ */
3710 a
.a_desc
= &vnop_exchange_desc
;
3713 a
.a_options
= options
;
3717 thread_safe
= THREAD_SAFE_FS(fvp
);
3720 * Lock in vnode address order to avoid deadlocks
3729 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) ) {
3732 if ( (_err
= lock_fsnode(lock_second
, NULL
)) ) {
3733 unlock_fsnode(lock_first
, &funnel_state
);
3737 #endif /* __LP64__ */
3739 _err
= (*fvp
->v_op
[vnop_exchange_desc
.vdesc_offset
])(&a
);
3743 unlock_fsnode(lock_second
, NULL
);
3744 unlock_fsnode(lock_first
, &funnel_state
);
3746 #endif /* __LP64__ */
3748 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
3749 post_event_if_success(fvp
, _err
, NOTE_ATTRIB
);
3750 post_event_if_success(tvp
, _err
, NOTE_ATTRIB
);
3762 struct vnop_revoke_args
{
3763 struct vnodeop_desc
*a_desc
;
3766 vfs_context_t a_context
;
3770 VNOP_REVOKE(vnode_t vp
, int flags
, vfs_context_t ctx
)
3772 struct vnop_revoke_args a
;
3776 int funnel_state
= 0;
3777 #endif /* __LP64__ */
3779 a
.a_desc
= &vnop_revoke_desc
;
3785 thread_safe
= THREAD_SAFE_FS(vp
);
3787 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3789 #endif /* __LP64__ */
3791 _err
= (*vp
->v_op
[vnop_revoke_desc
.vdesc_offset
])(&a
);
3795 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3797 #endif /* __LP64__ */
3809 struct vnop_mmap_args
{
3810 struct vnodeop_desc
*a_desc
;
3813 vfs_context_t a_context
;
3817 VNOP_MMAP(vnode_t vp
, int fflags
, vfs_context_t ctx
)
3820 struct vnop_mmap_args a
;
3823 int funnel_state
= 0;
3824 #endif /* __LP64__ */
3826 a
.a_desc
= &vnop_mmap_desc
;
3828 a
.a_fflags
= fflags
;
3832 thread_safe
= THREAD_SAFE_FS(vp
);
3834 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3838 #endif /* __LP64__ */
3840 _err
= (*vp
->v_op
[vnop_mmap_desc
.vdesc_offset
])(&a
);
3844 unlock_fsnode(vp
, &funnel_state
);
3846 #endif /* __LP64__ */
3855 *# mnomap - vp U U U
3858 struct vnop_mnomap_args
{
3859 struct vnodeop_desc
*a_desc
;
3861 vfs_context_t a_context
;
3865 VNOP_MNOMAP(vnode_t vp
, vfs_context_t ctx
)
3868 struct vnop_mnomap_args a
;
3871 int funnel_state
= 0;
3872 #endif /* __LP64__ */
3874 a
.a_desc
= &vnop_mnomap_desc
;
3879 thread_safe
= THREAD_SAFE_FS(vp
);
3881 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3885 #endif /* __LP64__ */
3887 _err
= (*vp
->v_op
[vnop_mnomap_desc
.vdesc_offset
])(&a
);
3891 unlock_fsnode(vp
, &funnel_state
);
3893 #endif /* __LP64__ */
3905 struct vnop_fsync_args
{
3906 struct vnodeop_desc
*a_desc
;
3909 vfs_context_t a_context
;
3913 VNOP_FSYNC(vnode_t vp
, int waitfor
, vfs_context_t ctx
)
3915 struct vnop_fsync_args a
;
3919 int funnel_state
= 0;
3920 #endif /* __LP64__ */
3922 a
.a_desc
= &vnop_fsync_desc
;
3924 a
.a_waitfor
= waitfor
;
3928 thread_safe
= THREAD_SAFE_FS(vp
);
3930 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3934 #endif /* __LP64__ */
3936 _err
= (*vp
->v_op
[vnop_fsync_desc
.vdesc_offset
])(&a
);
3940 unlock_fsnode(vp
, &funnel_state
);
3942 #endif /* __LP64__ */
3951 *#% remove dvp L U U
3955 struct vnop_remove_args
{
3956 struct vnodeop_desc
*a_desc
;
3959 struct componentname
*a_cnp
;
3961 vfs_context_t a_context
;
3965 VNOP_REMOVE(vnode_t dvp
, vnode_t vp
, struct componentname
* cnp
, int flags
, vfs_context_t ctx
)
3968 struct vnop_remove_args a
;
3971 int funnel_state
= 0;
3972 #endif /* __LP64__ */
3974 a
.a_desc
= &vnop_remove_desc
;
3982 thread_safe
= THREAD_SAFE_FS(dvp
);
3984 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3988 #endif /* __LP64__ */
3990 _err
= (*dvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&a
);
3993 vnode_setneedinactive(vp
);
3995 if ( !(NATIVE_XATTR(dvp
)) ) {
3997 * Remove any associated extended attribute file (._ AppleDouble file).
3999 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 1);
4005 unlock_fsnode(vp
, &funnel_state
);
4007 #endif /* __LP64__ */
4009 post_event_if_success(vp
, _err
, NOTE_DELETE
| NOTE_LINK
);
4010 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
4023 struct vnop_link_args
{
4024 struct vnodeop_desc
*a_desc
;
4027 struct componentname
*a_cnp
;
4028 vfs_context_t a_context
;
4032 VNOP_LINK(vnode_t vp
, vnode_t tdvp
, struct componentname
* cnp
, vfs_context_t ctx
)
4035 struct vnop_link_args a
;
4038 int funnel_state
= 0;
4039 #endif /* __LP64__ */
4042 * For file systems with non-native extended attributes,
4043 * disallow linking to an existing "._" Apple Double file.
4045 if ( !NATIVE_XATTR(tdvp
) && (vp
->v_type
== VREG
)) {
4048 vname
= vnode_getname(vp
);
4049 if (vname
!= NULL
) {
4051 if (vname
[0] == '.' && vname
[1] == '_' && vname
[2] != '\0') {
4054 vnode_putname(vname
);
4059 a
.a_desc
= &vnop_link_desc
;
4066 thread_safe
= THREAD_SAFE_FS(vp
);
4068 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4072 #endif /* __LP64__ */
4074 _err
= (*tdvp
->v_op
[vnop_link_desc
.vdesc_offset
])(&a
);
4078 unlock_fsnode(vp
, &funnel_state
);
4080 #endif /* __LP64__ */
4082 post_event_if_success(vp
, _err
, NOTE_LINK
);
4083 post_event_if_success(tdvp
, _err
, NOTE_WRITE
);
4092 *#% rename fdvp U U U
4093 *#% rename fvp U U U
4094 *#% rename tdvp L U U
4095 *#% rename tvp X U U
4098 struct vnop_rename_args
{
4099 struct vnodeop_desc
*a_desc
;
4102 struct componentname
*a_fcnp
;
4105 struct componentname
*a_tcnp
;
4106 vfs_context_t a_context
;
4110 VNOP_RENAME(struct vnode
*fdvp
, struct vnode
*fvp
, struct componentname
*fcnp
,
4111 struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
4116 struct vnop_rename_args a
;
4117 char smallname1
[48];
4118 char smallname2
[48];
4119 char *xfromname
= NULL
;
4120 char *xtoname
= NULL
;
4122 int funnel_state
= 0;
4123 vnode_t lock_first
= NULL
, lock_second
= NULL
;
4124 vnode_t fdvp_unsafe
= NULLVP
;
4125 vnode_t tdvp_unsafe
= NULLVP
;
4126 #endif /* __LP64__ */
4127 vnode_t src_attr_vp
= NULLVP
;
4128 vnode_t dst_attr_vp
= NULLVP
;
4129 struct nameidata fromnd
;
4130 struct nameidata tond
;
4132 a
.a_desc
= &vnop_rename_desc
;
4142 if (!THREAD_SAFE_FS(fdvp
))
4144 if (!THREAD_SAFE_FS(tdvp
))
4147 if (fdvp_unsafe
!= NULLVP
) {
4149 * Lock parents in vnode address order to avoid deadlocks
4150 * note that it's possible for the fdvp to be unsafe,
4151 * but the tdvp to be safe because tvp could be a directory
4152 * in the root of a filesystem... in that case, tdvp is the
4153 * in the filesystem that this root is mounted on
4155 if (tdvp_unsafe
== NULL
|| fdvp_unsafe
== tdvp_unsafe
) {
4156 lock_first
= fdvp_unsafe
;
4158 } else if (fdvp_unsafe
< tdvp_unsafe
) {
4159 lock_first
= fdvp_unsafe
;
4160 lock_second
= tdvp_unsafe
;
4162 lock_first
= tdvp_unsafe
;
4163 lock_second
= fdvp_unsafe
;
4165 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) )
4168 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
4169 unlock_fsnode(lock_first
, &funnel_state
);
4174 * Lock both children in vnode address order to avoid deadlocks
4176 if (tvp
== NULL
|| tvp
== fvp
) {
4179 } else if (fvp
< tvp
) {
4186 if ( (_err
= lock_fsnode(lock_first
, NULL
)) )
4189 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
4190 unlock_fsnode(lock_first
, NULL
);
4194 #endif /* __LP64__ */
4197 * We need to preflight any potential AppleDouble file for the source file
4198 * before doing the rename operation, since we could potentially be doing
4199 * this operation on a network filesystem, and would end up duplicating
4200 * the work. Also, save the source and destination names. Skip it if the
4201 * source has a "._" prefix.
4204 if (!NATIVE_XATTR(fdvp
) &&
4205 !(fcnp
->cn_nameptr
[0] == '.' && fcnp
->cn_nameptr
[1] == '_')) {
4209 /* Get source attribute file name. */
4210 len
= fcnp
->cn_namelen
+ 3;
4211 if (len
> sizeof(smallname1
)) {
4212 MALLOC(xfromname
, char *, len
, M_TEMP
, M_WAITOK
);
4214 xfromname
= &smallname1
[0];
4216 strlcpy(xfromname
, "._", min(sizeof smallname1
, len
));
4217 strncat(xfromname
, fcnp
->cn_nameptr
, fcnp
->cn_namelen
);
4218 xfromname
[len
-1] = '\0';
4220 /* Get destination attribute file name. */
4221 len
= tcnp
->cn_namelen
+ 3;
4222 if (len
> sizeof(smallname2
)) {
4223 MALLOC(xtoname
, char *, len
, M_TEMP
, M_WAITOK
);
4225 xtoname
= &smallname2
[0];
4227 strlcpy(xtoname
, "._", min(sizeof smallname2
, len
));
4228 strncat(xtoname
, tcnp
->cn_nameptr
, tcnp
->cn_namelen
);
4229 xtoname
[len
-1] = '\0';
4232 * Look up source attribute file, keep reference on it if exists.
4233 * Note that we do the namei with the nameiop of RENAME, which is different than
4234 * in the rename syscall. It's OK if the source file does not exist, since this
4235 * is only for AppleDouble files.
4237 if (xfromname
!= NULL
) {
4238 NDINIT(&fromnd
, RENAME
, NOFOLLOW
| USEDVP
| CN_NBMOUNTLOOK
, UIO_SYSSPACE
,
4239 CAST_USER_ADDR_T(xfromname
), ctx
);
4240 fromnd
.ni_dvp
= fdvp
;
4241 error
= namei(&fromnd
);
4244 * If there was an error looking up source attribute file,
4245 * we'll behave as if it didn't exist.
4250 /* src_attr_vp indicates need to call vnode_put / nameidone later */
4251 src_attr_vp
= fromnd
.ni_vp
;
4253 if (fromnd
.ni_vp
->v_type
!= VREG
) {
4254 src_attr_vp
= NULLVP
;
4255 vnode_put(fromnd
.ni_vp
);
4259 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4260 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4261 * have a vnode here, so we drop our namei buffer for the source attribute file
4263 if (src_attr_vp
== NULLVP
) {
4271 /* do the rename of the main file. */
4272 _err
= (*fdvp
->v_op
[vnop_rename_desc
.vdesc_offset
])(&a
);
4275 if (fdvp_unsafe
!= NULLVP
) {
4276 if (lock_second
!= NULL
)
4277 unlock_fsnode(lock_second
, NULL
);
4278 unlock_fsnode(lock_first
, NULL
);
4280 #endif /* __LP64__ */
4283 if (tvp
&& tvp
!= fvp
)
4284 vnode_setneedinactive(tvp
);
4288 * Rename any associated extended attribute file (._ AppleDouble file).
4290 if (_err
== 0 && !NATIVE_XATTR(fdvp
) && xfromname
!= NULL
) {
4294 * Get destination attribute file vnode.
4295 * Note that tdvp already has an iocount reference. Make sure to check that we
4296 * get a valid vnode from namei.
4298 NDINIT(&tond
, RENAME
,
4299 NOCACHE
| NOFOLLOW
| USEDVP
| CN_NBMOUNTLOOK
, UIO_SYSSPACE
,
4300 CAST_USER_ADDR_T(xtoname
), ctx
);
4302 error
= namei(&tond
);
4308 dst_attr_vp
= tond
.ni_vp
;
4312 /* attempt to rename src -> dst */
4314 a
.a_desc
= &vnop_rename_desc
;
4316 a
.a_fvp
= src_attr_vp
;
4317 a
.a_fcnp
= &fromnd
.ni_cnd
;
4319 a
.a_tvp
= dst_attr_vp
;
4320 a
.a_tcnp
= &tond
.ni_cnd
;
4324 if (fdvp_unsafe
!= NULLVP
) {
4326 * Lock in vnode address order to avoid deadlocks
4328 if (dst_attr_vp
== NULL
|| dst_attr_vp
== src_attr_vp
) {
4329 lock_first
= src_attr_vp
;
4331 } else if (src_attr_vp
< dst_attr_vp
) {
4332 lock_first
= src_attr_vp
;
4333 lock_second
= dst_attr_vp
;
4335 lock_first
= dst_attr_vp
;
4336 lock_second
= src_attr_vp
;
4338 if ( (error
= lock_fsnode(lock_first
, NULL
)) == 0) {
4339 if (lock_second
!= NULL
&& (error
= lock_fsnode(lock_second
, NULL
)) )
4340 unlock_fsnode(lock_first
, NULL
);
4343 #endif /* __LP64__ */
4348 /* Save these off so we can later verify them (fix up below) */
4349 oname
= src_attr_vp
->v_name
;
4350 oparent
= src_attr_vp
->v_parent
;
4352 error
= (*fdvp
->v_op
[vnop_rename_desc
.vdesc_offset
])(&a
);
4355 if (fdvp_unsafe
!= NULLVP
) {
4356 if (lock_second
!= NULL
)
4357 unlock_fsnode(lock_second
, NULL
);
4358 unlock_fsnode(lock_first
, NULL
);
4360 #endif /* __LP64__ */
4363 vnode_setneedinactive(src_attr_vp
);
4365 if (dst_attr_vp
&& dst_attr_vp
!= src_attr_vp
)
4366 vnode_setneedinactive(dst_attr_vp
);
4368 * Fix up name & parent pointers on ._ file
4370 if (oname
== src_attr_vp
->v_name
&&
4371 oparent
== src_attr_vp
->v_parent
) {
4374 update_flags
= VNODE_UPDATE_NAME
;
4377 update_flags
|= VNODE_UPDATE_PARENT
;
4379 vnode_update_identity(src_attr_vp
, tdvp
,
4380 tond
.ni_cnd
.cn_nameptr
,
4381 tond
.ni_cnd
.cn_namelen
,
4382 tond
.ni_cnd
.cn_hash
,
4387 /* kevent notifications for moving resource files
4388 * _err is zero if we're here, so no need to notify directories, code
4389 * below will do that. only need to post the rename on the source and
4390 * possibly a delete on the dest
4392 post_event_if_success(src_attr_vp
, error
, NOTE_RENAME
);
4394 post_event_if_success(dst_attr_vp
, error
, NOTE_DELETE
);
4397 } else if (dst_attr_vp
) {
4399 * Just delete destination attribute file vnode if it exists, since
4400 * we didn't have a source attribute file.
4401 * Note that tdvp already has an iocount reference.
4404 struct vnop_remove_args args
;
4406 args
.a_desc
= &vnop_remove_desc
;
4408 args
.a_vp
= dst_attr_vp
;
4409 args
.a_cnp
= &tond
.ni_cnd
;
4410 args
.a_context
= ctx
;
4413 if (fdvp_unsafe
!= NULLVP
)
4414 error
= lock_fsnode(dst_attr_vp
, NULL
);
4415 #endif /* __LP64__ */
4417 error
= (*tdvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&args
);
4420 if (fdvp_unsafe
!= NULLVP
)
4421 unlock_fsnode(dst_attr_vp
, NULL
);
4422 #endif /* __LP64__ */
4425 vnode_setneedinactive(dst_attr_vp
);
4428 /* kevent notification for deleting the destination's attribute file
4429 * if it existed. Only need to post the delete on the destination, since
4430 * the code below will handle the directories.
4432 post_event_if_success(dst_attr_vp
, error
, NOTE_DELETE
);
4437 vnode_put(src_attr_vp
);
4441 vnode_put(dst_attr_vp
);
4445 if (xfromname
&& xfromname
!= &smallname1
[0]) {
4446 FREE(xfromname
, M_TEMP
);
4448 if (xtoname
&& xtoname
!= &smallname2
[0]) {
4449 FREE(xtoname
, M_TEMP
);
4454 if (fdvp_unsafe
!= NULLVP
) {
4455 if (tdvp_unsafe
!= NULLVP
)
4456 unlock_fsnode(tdvp_unsafe
, NULL
);
4457 unlock_fsnode(fdvp_unsafe
, &funnel_state
);
4459 #endif /* __LP64__ */
4461 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4463 events
= NOTE_WRITE
;
4464 if (vnode_isdir(fvp
)) {
4465 /* Link count on dir changed only if we are moving a dir and...
4466 * --Moved to new dir, not overwriting there
4467 * --Kept in same dir and DID overwrite
4469 if (((fdvp
!= tdvp
) && (!tvp
)) || ((fdvp
== tdvp
) && (tvp
))) {
4470 events
|= NOTE_LINK
;
4474 lock_vnode_and_post(fdvp
, events
);
4476 lock_vnode_and_post(tdvp
, events
);
4479 /* If you're replacing the target, post a deletion for it */
4482 lock_vnode_and_post(tvp
, NOTE_DELETE
);
4485 lock_vnode_and_post(fvp
, NOTE_RENAME
);
4498 struct vnop_mkdir_args
{
4499 struct vnodeop_desc
*a_desc
;
4502 struct componentname
*a_cnp
;
4503 struct vnode_attr
*a_vap
;
4504 vfs_context_t a_context
;
4508 VNOP_MKDIR(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
4509 struct vnode_attr
*vap
, vfs_context_t ctx
)
4512 struct vnop_mkdir_args a
;
4515 int funnel_state
= 0;
4516 #endif /* __LP64__ */
4518 a
.a_desc
= &vnop_mkdir_desc
;
4526 thread_safe
= THREAD_SAFE_FS(dvp
);
4528 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
4532 #endif /* __LP64__ */
4534 _err
= (*dvp
->v_op
[vnop_mkdir_desc
.vdesc_offset
])(&a
);
4535 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
4537 * Remove stale Apple Double file (if any).
4539 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 0);
4544 unlock_fsnode(dvp
, &funnel_state
);
4546 #endif /* __LP64__ */
4548 post_event_if_success(dvp
, _err
, NOTE_LINK
| NOTE_WRITE
);
4561 struct vnop_rmdir_args
{
4562 struct vnodeop_desc
*a_desc
;
4565 struct componentname
*a_cnp
;
4566 vfs_context_t a_context
;
4571 VNOP_RMDIR(struct vnode
*dvp
, struct vnode
*vp
, struct componentname
*cnp
, vfs_context_t ctx
)
4574 struct vnop_rmdir_args a
;
4577 int funnel_state
= 0;
4578 #endif /* __LP64__ */
4580 a
.a_desc
= &vnop_rmdir_desc
;
4587 thread_safe
= THREAD_SAFE_FS(dvp
);
4589 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4593 #endif /* __LP64__ */
4595 _err
= (*vp
->v_op
[vnop_rmdir_desc
.vdesc_offset
])(&a
);
4598 vnode_setneedinactive(vp
);
4600 if ( !(NATIVE_XATTR(dvp
)) ) {
4602 * Remove any associated extended attribute file (._ AppleDouble file).
4604 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 1);
4610 unlock_fsnode(vp
, &funnel_state
);
4612 #endif /* __LP64__ */
4614 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
4615 post_event_if_success(vp
, _err
, NOTE_DELETE
| NOTE_LINK
);
4616 post_event_if_success(dvp
, _err
, NOTE_LINK
| NOTE_WRITE
);
4622 * Remove a ._ AppleDouble file
4624 #define AD_STALE_SECS (180)
4626 xattrfile_remove(vnode_t dvp
, const char * basename
, vfs_context_t ctx
, int force
)
4629 struct nameidata nd
;
4631 char *filename
= NULL
;
4634 if ((basename
== NULL
) || (basename
[0] == '\0') ||
4635 (basename
[0] == '.' && basename
[1] == '_')) {
4638 filename
= &smallname
[0];
4639 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
4640 if (len
>= sizeof(smallname
)) {
4641 len
++; /* snprintf result doesn't include '\0' */
4642 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
4643 len
= snprintf(filename
, len
, "._%s", basename
);
4645 NDINIT(&nd
, DELETE
, WANTPARENT
| LOCKLEAF
| NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
4646 CAST_USER_ADDR_T(filename
), ctx
);
4648 if (namei(&nd
) != 0)
4653 if (xvp
->v_type
!= VREG
)
4657 * When creating a new object and a "._" file already
4658 * exists, check to see if its a stale "._" file.
4662 struct vnode_attr va
;
4665 VATTR_WANTED(&va
, va_data_size
);
4666 VATTR_WANTED(&va
, va_modify_time
);
4667 if (VNOP_GETATTR(xvp
, &va
, ctx
) == 0 &&
4668 VATTR_IS_SUPPORTED(&va
, va_data_size
) &&
4669 VATTR_IS_SUPPORTED(&va
, va_modify_time
) &&
4670 va
.va_data_size
!= 0) {
4674 if ((tv
.tv_sec
> va
.va_modify_time
.tv_sec
) &&
4675 (tv
.tv_sec
- va
.va_modify_time
.tv_sec
) > AD_STALE_SECS
) {
4676 force
= 1; /* must be stale */
4681 struct vnop_remove_args a
;
4684 int thread_safe
= THREAD_SAFE_FS(dvp
);
4685 #endif /* __LP64__ */
4687 a
.a_desc
= &vnop_remove_desc
;
4688 a
.a_dvp
= nd
.ni_dvp
;
4690 a
.a_cnp
= &nd
.ni_cnd
;
4695 if ( (lock_fsnode(xvp
, NULL
)) )
4698 #endif /* __LP64__ */
4700 error
= (*dvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&a
);
4704 unlock_fsnode(xvp
, NULL
);
4705 #endif /* __LP64__ */
4708 vnode_setneedinactive(xvp
);
4710 post_event_if_success(xvp
, error
, NOTE_DELETE
);
4711 post_event_if_success(dvp
, error
, NOTE_WRITE
);
4718 if (filename
&& filename
!= &smallname
[0]) {
4719 FREE(filename
, M_TEMP
);
4724 * Shadow uid/gid/mod to a ._ AppleDouble file
4727 xattrfile_setattr(vnode_t dvp
, const char * basename
, struct vnode_attr
* vap
,
4731 struct nameidata nd
;
4733 char *filename
= NULL
;
4736 if ((dvp
== NULLVP
) ||
4737 (basename
== NULL
) || (basename
[0] == '\0') ||
4738 (basename
[0] == '.' && basename
[1] == '_')) {
4741 filename
= &smallname
[0];
4742 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
4743 if (len
>= sizeof(smallname
)) {
4744 len
++; /* snprintf result doesn't include '\0' */
4745 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
4746 len
= snprintf(filename
, len
, "._%s", basename
);
4748 NDINIT(&nd
, LOOKUP
, NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
4749 CAST_USER_ADDR_T(filename
), ctx
);
4751 if (namei(&nd
) != 0)
4757 if (xvp
->v_type
== VREG
) {
4759 int thread_safe
= THREAD_SAFE_FS(dvp
);
4760 #endif /* __LP64__ */
4761 struct vnop_setattr_args a
;
4763 a
.a_desc
= &vnop_setattr_desc
;
4770 if ( (lock_fsnode(xvp
, NULL
)) )
4773 #endif /* __LP64__ */
4775 (void) (*xvp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
4779 unlock_fsnode(xvp
, NULL
);
4781 #endif /* __LP64__ */
4787 #endif /* __LP64__ */
4791 if (filename
&& filename
!= &smallname
[0]) {
4792 FREE(filename
, M_TEMP
);
4799 *#% symlink dvp L U U
4800 *#% symlink vpp - U -
4803 struct vnop_symlink_args
{
4804 struct vnodeop_desc
*a_desc
;
4807 struct componentname
*a_cnp
;
4808 struct vnode_attr
*a_vap
;
4810 vfs_context_t a_context
;
4815 VNOP_SYMLINK(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
4816 struct vnode_attr
*vap
, char *target
, vfs_context_t ctx
)
4819 struct vnop_symlink_args a
;
4822 int funnel_state
= 0;
4823 #endif /* __LP64__ */
4825 a
.a_desc
= &vnop_symlink_desc
;
4830 a
.a_target
= target
;
4834 thread_safe
= THREAD_SAFE_FS(dvp
);
4836 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
4840 #endif /* __LP64__ */
4842 _err
= (*dvp
->v_op
[vnop_symlink_desc
.vdesc_offset
])(&a
);
4843 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
4845 * Remove stale Apple Double file (if any). Posts its own knotes
4847 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 0);
4853 unlock_fsnode(dvp
, &funnel_state
);
4855 #endif /* __LP64__ */
4857 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
4865 *#% readdir vp L L L
4868 struct vnop_readdir_args
{
4869 struct vnodeop_desc
*a_desc
;
4875 vfs_context_t a_context
;
4880 VNOP_READDIR(struct vnode
*vp
, struct uio
*uio
, int flags
, int *eofflag
,
4881 int *numdirent
, vfs_context_t ctx
)
4884 struct vnop_readdir_args a
;
4887 int funnel_state
= 0;
4888 #endif /* __LP64__ */
4890 a
.a_desc
= &vnop_readdir_desc
;
4894 a
.a_eofflag
= eofflag
;
4895 a
.a_numdirent
= numdirent
;
4898 thread_safe
= THREAD_SAFE_FS(vp
);
4901 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4905 #endif /* __LP64__ */
4907 _err
= (*vp
->v_op
[vnop_readdir_desc
.vdesc_offset
])(&a
);
4911 unlock_fsnode(vp
, &funnel_state
);
4913 #endif /* __LP64__ */
4920 *#% readdirattr vp L L L
4923 struct vnop_readdirattr_args
{
4924 struct vnodeop_desc
*a_desc
;
4926 struct attrlist
*a_alist
;
4928 uint32_t a_maxcount
;
4930 uint32_t *a_newstate
;
4932 uint32_t *a_actualcount
;
4933 vfs_context_t a_context
;
4938 VNOP_READDIRATTR(struct vnode
*vp
, struct attrlist
*alist
, struct uio
*uio
, uint32_t maxcount
,
4939 uint32_t options
, uint32_t *newstate
, int *eofflag
, uint32_t *actualcount
, vfs_context_t ctx
)
4942 struct vnop_readdirattr_args a
;
4945 int funnel_state
= 0;
4946 #endif /* __LP64__ */
4948 a
.a_desc
= &vnop_readdirattr_desc
;
4952 a
.a_maxcount
= maxcount
;
4953 a
.a_options
= options
;
4954 a
.a_newstate
= newstate
;
4955 a
.a_eofflag
= eofflag
;
4956 a
.a_actualcount
= actualcount
;
4960 thread_safe
= THREAD_SAFE_FS(vp
);
4962 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4966 #endif /* __LP64__ */
4968 _err
= (*vp
->v_op
[vnop_readdirattr_desc
.vdesc_offset
])(&a
);
4972 unlock_fsnode(vp
, &funnel_state
);
4974 #endif /* __LP64__ */
4982 *#% readlink vp L L L
4985 struct vnop_readlink_args
{
4986 struct vnodeop_desc
*a_desc
;
4989 vfs_context_t a_context
;
4994 * Returns: 0 Success
4995 * lock_fsnode:ENOENT No such file or directory [only for VFS
4996 * that is not thread safe & vnode is
4997 * currently being/has been terminated]
4998 * <vfs_readlink>:EINVAL
4999 * <vfs_readlink>:???
5001 * Note: The return codes from the underlying VFS's readlink routine
5002 * can't be fully enumerated here, since third party VFS authors
5003 * may not limit their error returns to the ones documented here,
5004 * even though this may result in some programs functioning
5007 * The return codes documented above are those which may currently
5008 * be returned by HFS from hfs_vnop_readlink, not including
5009 * additional error code which may be propagated from underlying
5013 VNOP_READLINK(struct vnode
*vp
, struct uio
*uio
, vfs_context_t ctx
)
5016 struct vnop_readlink_args a
;
5019 int funnel_state
= 0;
5020 #endif /* __LP64__ */
5022 a
.a_desc
= &vnop_readlink_desc
;
5028 thread_safe
= THREAD_SAFE_FS(vp
);
5030 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5034 #endif /* __LP64__ */
5036 _err
= (*vp
->v_op
[vnop_readlink_desc
.vdesc_offset
])(&a
);
5040 unlock_fsnode(vp
, &funnel_state
);
5042 #endif /* __LP64__ */
5050 *#% inactive vp L U U
5053 struct vnop_inactive_args
{
5054 struct vnodeop_desc
*a_desc
;
5056 vfs_context_t a_context
;
5060 VNOP_INACTIVE(struct vnode
*vp
, vfs_context_t ctx
)
5063 struct vnop_inactive_args a
;
5066 int funnel_state
= 0;
5067 #endif /* __LP64__ */
5069 a
.a_desc
= &vnop_inactive_desc
;
5074 thread_safe
= THREAD_SAFE_FS(vp
);
5076 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5080 #endif /* __LP64__ */
5082 _err
= (*vp
->v_op
[vnop_inactive_desc
.vdesc_offset
])(&a
);
5086 unlock_fsnode(vp
, &funnel_state
);
5088 #endif /* __LP64__ */
5091 /* For file systems that do not support namedstream natively, mark
5092 * the shadow stream file vnode to be recycled as soon as the last
5093 * reference goes away. To avoid re-entering reclaim code, do not
5094 * call recycle on terminating namedstream vnodes.
5096 if (vnode_isnamedstream(vp
) &&
5097 (vp
->v_parent
!= NULLVP
) &&
5098 vnode_isshadow(vp
) &&
5099 ((vp
->v_lflag
& VL_TERMINATE
) == 0)) {
5111 *#% reclaim vp U U U
5114 struct vnop_reclaim_args
{
5115 struct vnodeop_desc
*a_desc
;
5117 vfs_context_t a_context
;
5121 VNOP_RECLAIM(struct vnode
*vp
, vfs_context_t ctx
)
5124 struct vnop_reclaim_args a
;
5127 int funnel_state
= 0;
5128 #endif /* __LP64__ */
5130 a
.a_desc
= &vnop_reclaim_desc
;
5135 thread_safe
= THREAD_SAFE_FS(vp
);
5137 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5139 #endif /* __LP64__ */
5141 _err
= (*vp
->v_op
[vnop_reclaim_desc
.vdesc_offset
])(&a
);
5145 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5147 #endif /* __LP64__ */
5154 * Returns: 0 Success
5155 * lock_fsnode:ENOENT No such file or directory [only for VFS
5156 * that is not thread safe & vnode is
5157 * currently being/has been terminated]
5158 * <vnop_pathconf_desc>:??? [per FS implementation specific]
5163 *#% pathconf vp L L L
5166 struct vnop_pathconf_args
{
5167 struct vnodeop_desc
*a_desc
;
5171 vfs_context_t a_context
;
5175 VNOP_PATHCONF(struct vnode
*vp
, int name
, int32_t *retval
, vfs_context_t ctx
)
5178 struct vnop_pathconf_args a
;
5181 int funnel_state
= 0;
5182 #endif /* __LP64__ */
5184 a
.a_desc
= &vnop_pathconf_desc
;
5187 a
.a_retval
= retval
;
5191 thread_safe
= THREAD_SAFE_FS(vp
);
5193 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5197 #endif /* __LP64__ */
5199 _err
= (*vp
->v_op
[vnop_pathconf_desc
.vdesc_offset
])(&a
);
5203 unlock_fsnode(vp
, &funnel_state
);
5205 #endif /* __LP64__ */
5211 * Returns: 0 Success
5212 * err_advlock:ENOTSUP
5214 * <vnop_advlock_desc>:???
5216 * Notes: VFS implementations of advisory locking using calls through
5217 * <vnop_advlock_desc> because lock enforcement does not occur
5218 * locally should try to limit themselves to the return codes
5219 * documented above for lf_advlock and err_advlock.
5224 *#% advlock vp U U U
5227 struct vnop_advlock_args
{
5228 struct vnodeop_desc
*a_desc
;
5234 vfs_context_t a_context
;
5238 VNOP_ADVLOCK(struct vnode
*vp
, caddr_t id
, int op
, struct flock
*fl
, int flags
, vfs_context_t ctx
)
5241 struct vnop_advlock_args a
;
5244 int funnel_state
= 0;
5245 #endif /* __LP64__ */
5247 a
.a_desc
= &vnop_advlock_desc
;
5256 thread_safe
= THREAD_SAFE_FS(vp
);
5258 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5260 #endif /* __LP64__ */
5262 /* Disallow advisory locking on non-seekable vnodes */
5263 if (vnode_isfifo(vp
)) {
5264 _err
= err_advlock(&a
);
5266 if ((vp
->v_flag
& VLOCKLOCAL
)) {
5267 /* Advisory locking done at this layer */
5268 _err
= lf_advlock(&a
);
5270 /* Advisory locking done by underlying filesystem */
5271 _err
= (*vp
->v_op
[vnop_advlock_desc
.vdesc_offset
])(&a
);
5277 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5279 #endif /* __LP64__ */
5289 *#% allocate vp L L L
5292 struct vnop_allocate_args
{
5293 struct vnodeop_desc
*a_desc
;
5297 off_t
*a_bytesallocated
;
5299 vfs_context_t a_context
;
5304 VNOP_ALLOCATE(struct vnode
*vp
, off_t length
, u_int32_t flags
, off_t
*bytesallocated
, off_t offset
, vfs_context_t ctx
)
5307 struct vnop_allocate_args a
;
5310 int funnel_state
= 0;
5311 #endif /* __LP64__ */
5313 a
.a_desc
= &vnop_allocate_desc
;
5315 a
.a_length
= length
;
5317 a
.a_bytesallocated
= bytesallocated
;
5318 a
.a_offset
= offset
;
5322 thread_safe
= THREAD_SAFE_FS(vp
);
5324 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5328 #endif /* __LP64__ */
5330 _err
= (*vp
->v_op
[vnop_allocate_desc
.vdesc_offset
])(&a
);
5333 add_fsevent(FSE_STAT_CHANGED
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
5339 unlock_fsnode(vp
, &funnel_state
);
5341 #endif /* __LP64__ */
5352 struct vnop_pagein_args
{
5353 struct vnodeop_desc
*a_desc
;
5356 upl_offset_t a_pl_offset
;
5360 vfs_context_t a_context
;
5364 VNOP_PAGEIN(struct vnode
*vp
, upl_t pl
, upl_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t ctx
)
5367 struct vnop_pagein_args a
;
5370 int funnel_state
= 0;
5371 #endif /* __LP64__ */
5373 a
.a_desc
= &vnop_pagein_desc
;
5376 a
.a_pl_offset
= pl_offset
;
5377 a
.a_f_offset
= f_offset
;
5383 thread_safe
= THREAD_SAFE_FS(vp
);
5385 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5387 #endif /* __LP64__ */
5389 _err
= (*vp
->v_op
[vnop_pagein_desc
.vdesc_offset
])(&a
);
5393 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5395 #endif /* __LP64__ */
5403 *#% pageout vp = = =
5406 struct vnop_pageout_args
{
5407 struct vnodeop_desc
*a_desc
;
5410 upl_offset_t a_pl_offset
;
5414 vfs_context_t a_context
;
5419 VNOP_PAGEOUT(struct vnode
*vp
, upl_t pl
, upl_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t ctx
)
5422 struct vnop_pageout_args a
;
5425 int funnel_state
= 0;
5426 #endif /* __LP64__ */
5428 a
.a_desc
= &vnop_pageout_desc
;
5431 a
.a_pl_offset
= pl_offset
;
5432 a
.a_f_offset
= f_offset
;
5438 thread_safe
= THREAD_SAFE_FS(vp
);
5440 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5442 #endif /* __LP64__ */
5444 _err
= (*vp
->v_op
[vnop_pageout_desc
.vdesc_offset
])(&a
);
5448 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5450 #endif /* __LP64__ */
5452 post_event_if_success(vp
, _err
, NOTE_WRITE
);
5461 *#% searchfs vp L L L
5464 struct vnop_searchfs_args
{
5465 struct vnodeop_desc
*a_desc
;
5467 void *a_searchparams1
;
5468 void *a_searchparams2
;
5469 struct attrlist
*a_searchattrs
;
5470 uint32_t a_maxmatches
;
5471 struct timeval
*a_timelimit
;
5472 struct attrlist
*a_returnattrs
;
5473 uint32_t *a_nummatches
;
5474 uint32_t a_scriptcode
;
5477 struct searchstate
*a_searchstate
;
5478 vfs_context_t a_context
;
5483 VNOP_SEARCHFS(struct vnode
*vp
, void *searchparams1
, void *searchparams2
, struct attrlist
*searchattrs
, uint32_t maxmatches
, struct timeval
*timelimit
, struct attrlist
*returnattrs
, uint32_t *nummatches
, uint32_t scriptcode
, uint32_t options
, struct uio
*uio
, struct searchstate
*searchstate
, vfs_context_t ctx
)
5486 struct vnop_searchfs_args a
;
5489 int funnel_state
= 0;
5490 #endif /* __LP64__ */
5492 a
.a_desc
= &vnop_searchfs_desc
;
5494 a
.a_searchparams1
= searchparams1
;
5495 a
.a_searchparams2
= searchparams2
;
5496 a
.a_searchattrs
= searchattrs
;
5497 a
.a_maxmatches
= maxmatches
;
5498 a
.a_timelimit
= timelimit
;
5499 a
.a_returnattrs
= returnattrs
;
5500 a
.a_nummatches
= nummatches
;
5501 a
.a_scriptcode
= scriptcode
;
5502 a
.a_options
= options
;
5504 a
.a_searchstate
= searchstate
;
5508 thread_safe
= THREAD_SAFE_FS(vp
);
5510 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5514 #endif /* __LP64__ */
5516 _err
= (*vp
->v_op
[vnop_searchfs_desc
.vdesc_offset
])(&a
);
5520 unlock_fsnode(vp
, &funnel_state
);
5522 #endif /* __LP64__ */
5530 *#% copyfile fvp U U U
5531 *#% copyfile tdvp L U U
5532 *#% copyfile tvp X U U
5535 struct vnop_copyfile_args
{
5536 struct vnodeop_desc
*a_desc
;
5540 struct componentname
*a_tcnp
;
5543 vfs_context_t a_context
;
5547 VNOP_COPYFILE(struct vnode
*fvp
, struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
5548 int mode
, int flags
, vfs_context_t ctx
)
5551 struct vnop_copyfile_args a
;
5552 a
.a_desc
= &vnop_copyfile_desc
;
5560 _err
= (*fvp
->v_op
[vnop_copyfile_desc
.vdesc_offset
])(&a
);
5565 VNOP_GETXATTR(vnode_t vp
, const char *name
, uio_t uio
, size_t *size
, int options
, vfs_context_t ctx
)
5567 struct vnop_getxattr_args a
;
5571 int funnel_state
= 0;
5572 #endif /* __LP64__ */
5574 a
.a_desc
= &vnop_getxattr_desc
;
5579 a
.a_options
= options
;
5583 thread_safe
= THREAD_SAFE_FS(vp
);
5585 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
5589 #endif /* __LP64__ */
5591 error
= (*vp
->v_op
[vnop_getxattr_desc
.vdesc_offset
])(&a
);
5595 unlock_fsnode(vp
, &funnel_state
);
5597 #endif /* __LP64__ */
5603 VNOP_SETXATTR(vnode_t vp
, const char *name
, uio_t uio
, int options
, vfs_context_t ctx
)
5605 struct vnop_setxattr_args a
;
5609 int funnel_state
= 0;
5610 #endif /* __LP64__ */
5612 a
.a_desc
= &vnop_setxattr_desc
;
5616 a
.a_options
= options
;
5620 thread_safe
= THREAD_SAFE_FS(vp
);
5622 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
5626 #endif /* __LP64__ */
5628 error
= (*vp
->v_op
[vnop_setxattr_desc
.vdesc_offset
])(&a
);
5632 unlock_fsnode(vp
, &funnel_state
);
5634 #endif /* __LP64__ */
5637 vnode_uncache_authorized_action(vp
, KAUTH_INVALIDATE_CACHED_RIGHTS
);
5639 post_event_if_success(vp
, error
, NOTE_ATTRIB
);
5645 VNOP_REMOVEXATTR(vnode_t vp
, const char *name
, int options
, vfs_context_t ctx
)
5647 struct vnop_removexattr_args a
;
5651 int funnel_state
= 0;
5652 #endif /* __LP64__ */
5654 a
.a_desc
= &vnop_removexattr_desc
;
5657 a
.a_options
= options
;
5661 thread_safe
= THREAD_SAFE_FS(vp
);
5663 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
5667 #endif /* __LP64__ */
5669 error
= (*vp
->v_op
[vnop_removexattr_desc
.vdesc_offset
])(&a
);
5673 unlock_fsnode(vp
, &funnel_state
);
5675 #endif /* __LP64__ */
5677 post_event_if_success(vp
, error
, NOTE_ATTRIB
);
5683 VNOP_LISTXATTR(vnode_t vp
, uio_t uio
, size_t *size
, int options
, vfs_context_t ctx
)
5685 struct vnop_listxattr_args a
;
5689 int funnel_state
= 0;
5690 #endif /* __LP64__ */
5692 a
.a_desc
= &vnop_listxattr_desc
;
5696 a
.a_options
= options
;
5700 thread_safe
= THREAD_SAFE_FS(vp
);
5702 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
5706 #endif /* __LP64__ */
5708 error
= (*vp
->v_op
[vnop_listxattr_desc
.vdesc_offset
])(&a
);
5712 unlock_fsnode(vp
, &funnel_state
);
5714 #endif /* __LP64__ */
5723 *#% blktooff vp = = =
5726 struct vnop_blktooff_args
{
5727 struct vnodeop_desc
*a_desc
;
5734 VNOP_BLKTOOFF(struct vnode
*vp
, daddr64_t lblkno
, off_t
*offset
)
5737 struct vnop_blktooff_args a
;
5740 int funnel_state
= 0;
5741 #endif /* __LP64__ */
5743 a
.a_desc
= &vnop_blktooff_desc
;
5745 a
.a_lblkno
= lblkno
;
5746 a
.a_offset
= offset
;
5749 thread_safe
= THREAD_SAFE_FS(vp
);
5751 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5753 #endif /* __LP64__ */
5755 _err
= (*vp
->v_op
[vnop_blktooff_desc
.vdesc_offset
])(&a
);
5759 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5761 #endif /* __LP64__ */
5769 *#% offtoblk vp = = =
5772 struct vnop_offtoblk_args
{
5773 struct vnodeop_desc
*a_desc
;
5776 daddr64_t
*a_lblkno
;
5780 VNOP_OFFTOBLK(struct vnode
*vp
, off_t offset
, daddr64_t
*lblkno
)
5783 struct vnop_offtoblk_args a
;
5786 int funnel_state
= 0;
5787 #endif /* __LP64__ */
5789 a
.a_desc
= &vnop_offtoblk_desc
;
5791 a
.a_offset
= offset
;
5792 a
.a_lblkno
= lblkno
;
5795 thread_safe
= THREAD_SAFE_FS(vp
);
5797 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5799 #endif /* __LP64__ */
5801 _err
= (*vp
->v_op
[vnop_offtoblk_desc
.vdesc_offset
])(&a
);
5805 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5807 #endif /* __LP64__ */
5815 *#% blockmap vp L L L
5818 struct vnop_blockmap_args
{
5819 struct vnodeop_desc
*a_desc
;
5827 vfs_context_t a_context
;
5831 VNOP_BLOCKMAP(struct vnode
*vp
, off_t foffset
, size_t size
, daddr64_t
*bpn
, size_t *run
, void *poff
, int flags
, vfs_context_t ctx
)
5834 struct vnop_blockmap_args a
;
5837 int funnel_state
= 0;
5838 #endif /* __LP64__ */
5841 ctx
= vfs_context_current();
5843 a
.a_desc
= &vnop_blockmap_desc
;
5845 a
.a_foffset
= foffset
;
5854 thread_safe
= THREAD_SAFE_FS(vp
);
5856 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5858 #endif /* __LP64__ */
5860 _err
= (*vp
->v_op
[vnop_blockmap_desc
.vdesc_offset
])(&a
);
5864 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5866 #endif /* __LP64__ */
5872 struct vnop_strategy_args
{
5873 struct vnodeop_desc
*a_desc
;
5879 VNOP_STRATEGY(struct buf
*bp
)
5882 struct vnop_strategy_args a
;
5883 a
.a_desc
= &vnop_strategy_desc
;
5885 _err
= (*buf_vnode(bp
)->v_op
[vnop_strategy_desc
.vdesc_offset
])(&a
);
5890 struct vnop_bwrite_args
{
5891 struct vnodeop_desc
*a_desc
;
5896 VNOP_BWRITE(struct buf
*bp
)
5899 struct vnop_bwrite_args a
;
5900 a
.a_desc
= &vnop_bwrite_desc
;
5902 _err
= (*buf_vnode(bp
)->v_op
[vnop_bwrite_desc
.vdesc_offset
])(&a
);
5907 struct vnop_kqfilt_add_args
{
5908 struct vnodeop_desc
*a_desc
;
5911 vfs_context_t a_context
;
5915 VNOP_KQFILT_ADD(struct vnode
*vp
, struct knote
*kn
, vfs_context_t ctx
)
5918 struct vnop_kqfilt_add_args a
;
5921 int funnel_state
= 0;
5922 #endif /* __LP64__ */
5924 a
.a_desc
= VDESC(vnop_kqfilt_add
);
5930 thread_safe
= THREAD_SAFE_FS(vp
);
5932 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5936 #endif /* __LP64__ */
5938 _err
= (*vp
->v_op
[vnop_kqfilt_add_desc
.vdesc_offset
])(&a
);
5942 unlock_fsnode(vp
, &funnel_state
);
5944 #endif /* __LP64__ */
5950 struct vnop_kqfilt_remove_args
{
5951 struct vnodeop_desc
*a_desc
;
5954 vfs_context_t a_context
;
5958 VNOP_KQFILT_REMOVE(struct vnode
*vp
, uintptr_t ident
, vfs_context_t ctx
)
5961 struct vnop_kqfilt_remove_args a
;
5964 int funnel_state
= 0;
5965 #endif /* __LP64__ */
5967 a
.a_desc
= VDESC(vnop_kqfilt_remove
);
5973 thread_safe
= THREAD_SAFE_FS(vp
);
5975 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5979 #endif /* __LP64__ */
5981 _err
= (*vp
->v_op
[vnop_kqfilt_remove_desc
.vdesc_offset
])(&a
);
5985 unlock_fsnode(vp
, &funnel_state
);
5987 #endif /* __LP64__ */
5993 VNOP_MONITOR(vnode_t vp
, uint32_t events
, uint32_t flags
, void *handle
, vfs_context_t ctx
)
5996 struct vnop_monitor_args a
;
5999 int funnel_state
= 0;
6000 #endif /* __LP64__ */
6002 a
.a_desc
= VDESC(vnop_monitor
);
6004 a
.a_events
= events
;
6006 a
.a_handle
= handle
;
6010 thread_safe
= THREAD_SAFE_FS(vp
);
6012 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
6016 #endif /* __LP64__ */
6018 _err
= (*vp
->v_op
[vnop_monitor_desc
.vdesc_offset
])(&a
);
6022 unlock_fsnode(vp
, &funnel_state
);
6024 #endif /* __LP64__ */
6030 struct vnop_setlabel_args
{
6031 struct vnodeop_desc
*a_desc
;
6034 vfs_context_t a_context
;
6038 VNOP_SETLABEL(struct vnode
*vp
, struct label
*label
, vfs_context_t ctx
)
6041 struct vnop_setlabel_args a
;
6044 int funnel_state
= 0;
6045 #endif /* __LP64__ */
6047 a
.a_desc
= VDESC(vnop_setlabel
);
6053 thread_safe
= THREAD_SAFE_FS(vp
);
6055 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
6059 #endif /* __LP64__ */
6061 _err
= (*vp
->v_op
[vnop_setlabel_desc
.vdesc_offset
])(&a
);
6065 unlock_fsnode(vp
, &funnel_state
);
6067 #endif /* __LP64__ */
6075 * Get a named streamed
6078 VNOP_GETNAMEDSTREAM(vnode_t vp
, vnode_t
*svpp
, const char *name
, enum nsoperation operation
, int flags
, vfs_context_t ctx
)
6080 struct vnop_getnamedstream_args a
;
6083 if (!THREAD_SAFE_FS(vp
))
6085 #endif /* __LP64__ */
6087 a
.a_desc
= &vnop_getnamedstream_desc
;
6091 a
.a_operation
= operation
;
6095 return (*vp
->v_op
[vnop_getnamedstream_desc
.vdesc_offset
])(&a
);
6099 * Create a named streamed
6102 VNOP_MAKENAMEDSTREAM(vnode_t vp
, vnode_t
*svpp
, const char *name
, int flags
, vfs_context_t ctx
)
6104 struct vnop_makenamedstream_args a
;
6107 if (!THREAD_SAFE_FS(vp
))
6109 #endif /* __LP64__ */
6111 a
.a_desc
= &vnop_makenamedstream_desc
;
6118 return (*vp
->v_op
[vnop_makenamedstream_desc
.vdesc_offset
])(&a
);
6123 * Remove a named streamed
6126 VNOP_REMOVENAMEDSTREAM(vnode_t vp
, vnode_t svp
, const char *name
, int flags
, vfs_context_t ctx
)
6128 struct vnop_removenamedstream_args a
;
6131 if (!THREAD_SAFE_FS(vp
))
6133 #endif /* __LP64__ */
6135 a
.a_desc
= &vnop_removenamedstream_desc
;
6142 return (*vp
->v_op
[vnop_removenamedstream_desc
.vdesc_offset
])(&a
);