2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
76 * External virtual filesystem routines
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
87 #include <sys/vnode_internal.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
92 #include <sys/errno.h>
93 #include <sys/malloc.h>
94 #include <sys/domain.h>
96 #include <sys/syslog.h>
99 #include <sys/sysctl.h>
100 #include <sys/filedesc.h>
101 #include <sys/fsevents.h>
102 #include <sys/user.h>
103 #include <sys/lockf.h>
104 #include <sys/xattr.h>
106 #include <kern/assert.h>
107 #include <kern/kalloc.h>
108 #include <kern/task.h>
110 #include <libkern/OSByteOrder.h>
112 #include <miscfs/specfs/specdev.h>
114 #include <mach/mach_types.h>
115 #include <mach/memory_object_types.h>
116 #include <mach/task.h>
119 #include <security/mac_framework.h>
129 #define THREAD_SAFE_FS(VP) \
130 ((VP)->v_unsafefs ? 0 : 1)
132 #define NATIVE_XATTR(VP) \
133 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
135 static void xattrfile_remove(vnode_t dvp
, const char *basename
,
136 vfs_context_t ctx
, int thread_safe
, int force
);
137 static void xattrfile_setattr(vnode_t dvp
, const char * basename
,
138 struct vnode_attr
* vap
, vfs_context_t ctx
,
143 vnode_setneedinactive(vnode_t vp
)
148 vp
->v_lflag
|= VL_NEEDINACTIVE
;
154 lock_fsnode(vnode_t vp
, int *funnel_state
)
157 *funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
159 if (vp
->v_unsafefs
) {
160 if (vp
->v_unsafefs
->fsnodeowner
== current_thread()) {
161 vp
->v_unsafefs
->fsnode_count
++;
163 lck_mtx_lock(&vp
->v_unsafefs
->fsnodelock
);
165 if (vp
->v_lflag
& (VL_TERMWANT
| VL_TERMINATE
| VL_DEAD
)) {
166 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
169 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
172 vp
->v_unsafefs
->fsnodeowner
= current_thread();
173 vp
->v_unsafefs
->fsnode_count
= 1;
181 unlock_fsnode(vnode_t vp
, int *funnel_state
)
183 if (vp
->v_unsafefs
) {
184 if (--vp
->v_unsafefs
->fsnode_count
== 0) {
185 vp
->v_unsafefs
->fsnodeowner
= NULL
;
186 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
190 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
195 /* ====================================================================== */
196 /* ************ EXTERNAL KERNEL APIS ********************************** */
197 /* ====================================================================== */
200 * prototypes for exported VFS operations
203 VFS_MOUNT(mount_t mp
, vnode_t devvp
, user_addr_t data
, vfs_context_t ctx
)
207 int funnel_state
= 0;
209 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_mount
== 0))
212 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
216 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
219 if (vfs_context_is64bit(ctx
)) {
220 if (vfs_64bitready(mp
)) {
221 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, ctx
);
228 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, ctx
);
232 (void) thread_funnel_set(kernel_flock
, funnel_state
);
238 VFS_START(mount_t mp
, int flags
, vfs_context_t ctx
)
242 int funnel_state
= 0;
244 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_start
== 0))
247 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
250 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
252 error
= (*mp
->mnt_op
->vfs_start
)(mp
, flags
, ctx
);
254 (void) thread_funnel_set(kernel_flock
, funnel_state
);
260 VFS_UNMOUNT(mount_t mp
, int flags
, vfs_context_t ctx
)
264 int funnel_state
= 0;
266 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_unmount
== 0))
269 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
272 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
274 error
= (*mp
->mnt_op
->vfs_unmount
)(mp
, flags
, ctx
);
276 (void) thread_funnel_set(kernel_flock
, funnel_state
);
283 * ENOTSUP Not supported
287 * Note: The return codes from the underlying VFS's root routine can't
288 * be fully enumerated here, since third party VFS authors may not
289 * limit their error returns to the ones documented here, even
290 * though this may result in some programs functioning incorrectly.
292 * The return codes documented above are those which may currently
293 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
294 * for a call to hfs_vget on the volume mount poit, not including
295 * additional error codes which may be propagated from underlying
296 * routines called by hfs_vget.
299 VFS_ROOT(mount_t mp
, struct vnode
** vpp
, vfs_context_t ctx
)
303 int funnel_state
= 0;
305 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_root
== 0))
309 ctx
= vfs_context_current();
311 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
314 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
316 error
= (*mp
->mnt_op
->vfs_root
)(mp
, vpp
, ctx
);
318 (void) thread_funnel_set(kernel_flock
, funnel_state
);
324 VFS_QUOTACTL(mount_t mp
, int cmd
, uid_t uid
, caddr_t datap
, vfs_context_t ctx
)
328 int funnel_state
= 0;
330 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_quotactl
== 0))
333 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
336 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
338 error
= (*mp
->mnt_op
->vfs_quotactl
)(mp
, cmd
, uid
, datap
, ctx
);
340 (void) thread_funnel_set(kernel_flock
, funnel_state
);
346 VFS_GETATTR(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
350 int funnel_state
= 0;
352 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_getattr
== 0))
356 ctx
= vfs_context_current();
359 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
362 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
364 error
= (*mp
->mnt_op
->vfs_getattr
)(mp
, vfa
, ctx
);
366 (void) thread_funnel_set(kernel_flock
, funnel_state
);
372 VFS_SETATTR(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
376 int funnel_state
= 0;
378 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_setattr
== 0))
382 ctx
= vfs_context_current();
385 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
388 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
390 error
= (*mp
->mnt_op
->vfs_setattr
)(mp
, vfa
, ctx
);
392 (void) thread_funnel_set(kernel_flock
, funnel_state
);
398 VFS_SYNC(mount_t mp
, int flags
, vfs_context_t ctx
)
402 int funnel_state
= 0;
404 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_sync
== 0))
408 ctx
= vfs_context_current();
410 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
413 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
415 error
= (*mp
->mnt_op
->vfs_sync
)(mp
, flags
, ctx
);
417 (void) thread_funnel_set(kernel_flock
, funnel_state
);
423 VFS_VGET(mount_t mp
, ino64_t ino
, struct vnode
**vpp
, vfs_context_t ctx
)
427 int funnel_state
= 0;
429 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_vget
== 0))
433 ctx
= vfs_context_current();
435 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
438 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
440 error
= (*mp
->mnt_op
->vfs_vget
)(mp
, ino
, vpp
, ctx
);
442 (void) thread_funnel_set(kernel_flock
, funnel_state
);
448 VFS_FHTOVP(mount_t mp
, int fhlen
, unsigned char * fhp
, vnode_t
* vpp
, vfs_context_t ctx
)
452 int funnel_state
= 0;
454 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_fhtovp
== 0))
458 ctx
= vfs_context_current();
460 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
463 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
465 error
= (*mp
->mnt_op
->vfs_fhtovp
)(mp
, fhlen
, fhp
, vpp
, ctx
);
467 (void) thread_funnel_set(kernel_flock
, funnel_state
);
473 VFS_VPTOFH(struct vnode
* vp
, int *fhlenp
, unsigned char * fhp
, vfs_context_t ctx
)
477 int funnel_state
= 0;
479 if ((vp
->v_mount
== dead_mountp
) || (vp
->v_mount
->mnt_op
->vfs_vptofh
== 0))
483 ctx
= vfs_context_current();
485 thread_safe
= THREAD_SAFE_FS(vp
);
488 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
490 error
= (*vp
->v_mount
->mnt_op
->vfs_vptofh
)(vp
, fhlenp
, fhp
, ctx
);
492 (void) thread_funnel_set(kernel_flock
, funnel_state
);
498 /* returns a copy of vfs type name for the mount_t */
500 vfs_name(mount_t mp
, char * buffer
)
502 strncpy(buffer
, mp
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
505 /* returns vfs type number for the mount_t */
507 vfs_typenum(mount_t mp
)
509 return(mp
->mnt_vtable
->vfc_typenum
);
513 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
515 vfs_flags(mount_t mp
)
517 return((uint64_t)(mp
->mnt_flag
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
)));
520 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
522 vfs_setflags(mount_t mp
, uint64_t flags
)
524 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
527 mp
->mnt_flag
|= lflags
;
531 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
533 vfs_clearflags(mount_t mp
, uint64_t flags
)
535 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
538 mp
->mnt_flag
&= ~lflags
;
542 /* Is the mount_t ronly and upgrade read/write requested? */
544 vfs_iswriteupgrade(mount_t mp
) /* ronly && MNTK_WANTRDWR */
546 return ((mp
->mnt_flag
& MNT_RDONLY
) && (mp
->mnt_kern_flag
& MNTK_WANTRDWR
));
550 /* Is the mount_t mounted ronly */
552 vfs_isrdonly(mount_t mp
)
554 return (mp
->mnt_flag
& MNT_RDONLY
);
557 /* Is the mount_t mounted for filesystem synchronous writes? */
559 vfs_issynchronous(mount_t mp
)
561 return (mp
->mnt_flag
& MNT_SYNCHRONOUS
);
564 /* Is the mount_t mounted read/write? */
566 vfs_isrdwr(mount_t mp
)
568 return ((mp
->mnt_flag
& MNT_RDONLY
) == 0);
572 /* Is mount_t marked for update (ie MNT_UPDATE) */
574 vfs_isupdate(mount_t mp
)
576 return (mp
->mnt_flag
& MNT_UPDATE
);
580 /* Is mount_t marked for reload (ie MNT_RELOAD) */
582 vfs_isreload(mount_t mp
)
584 return ((mp
->mnt_flag
& MNT_UPDATE
) && (mp
->mnt_flag
& MNT_RELOAD
));
587 /* Is mount_t marked for reload (ie MNT_FORCE) */
589 vfs_isforce(mount_t mp
)
591 if ((mp
->mnt_lflag
& MNT_LFORCE
) || (mp
->mnt_kern_flag
& MNTK_FRCUNMOUNT
))
598 vfs_64bitready(mount_t mp
)
600 if ((mp
->mnt_vtable
->vfc_64bitready
))
608 vfs_authcache_ttl(mount_t mp
)
610 if ( (mp
->mnt_kern_flag
& (MNTK_AUTH_OPAQUE
| MNTK_AUTH_CACHE_TTL
)) )
611 return (mp
->mnt_authcache_ttl
);
613 return (CACHED_RIGHT_INFINITE_TTL
);
617 vfs_setauthcache_ttl(mount_t mp
, int ttl
)
620 mp
->mnt_kern_flag
|= MNTK_AUTH_CACHE_TTL
;
621 mp
->mnt_authcache_ttl
= ttl
;
626 vfs_clearauthcache_ttl(mount_t mp
)
629 mp
->mnt_kern_flag
&= ~MNTK_AUTH_CACHE_TTL
;
631 * back to the default TTL value in case
632 * MNTK_AUTH_OPAQUE is set on this mount
634 mp
->mnt_authcache_ttl
= CACHED_LOOKUP_RIGHT_TTL
;
639 vfs_markdependency(mount_t mp
)
641 proc_t p
= current_proc();
643 mp
->mnt_dependent_process
= p
;
644 mp
->mnt_dependent_pid
= proc_pid(p
);
650 vfs_authopaque(mount_t mp
)
652 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE
))
659 vfs_authopaqueaccess(mount_t mp
)
661 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE_ACCESS
))
668 vfs_setauthopaque(mount_t mp
)
671 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE
;
676 vfs_setauthopaqueaccess(mount_t mp
)
679 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE_ACCESS
;
684 vfs_clearauthopaque(mount_t mp
)
687 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE
;
692 vfs_clearauthopaqueaccess(mount_t mp
)
695 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE_ACCESS
;
700 vfs_setextendedsecurity(mount_t mp
)
703 mp
->mnt_kern_flag
|= MNTK_EXTENDED_SECURITY
;
708 vfs_clearextendedsecurity(mount_t mp
)
711 mp
->mnt_kern_flag
&= ~MNTK_EXTENDED_SECURITY
;
716 vfs_extendedsecurity(mount_t mp
)
718 return(mp
->mnt_kern_flag
& MNTK_EXTENDED_SECURITY
);
721 /* returns the max size of short symlink in this mount_t */
723 vfs_maxsymlen(mount_t mp
)
725 return(mp
->mnt_maxsymlinklen
);
728 /* set max size of short symlink on mount_t */
730 vfs_setmaxsymlen(mount_t mp
, uint32_t symlen
)
732 mp
->mnt_maxsymlinklen
= symlen
;
735 /* return a pointer to the RO vfs_statfs associated with mount_t */
737 vfs_statfs(mount_t mp
)
739 return(&mp
->mnt_vfsstat
);
743 vfs_getattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
747 if ((error
= VFS_GETATTR(mp
, vfa
, ctx
)) != 0)
751 * If we have a filesystem create time, use it to default some others.
753 if (VFSATTR_IS_SUPPORTED(vfa
, f_create_time
)) {
754 if (VFSATTR_IS_ACTIVE(vfa
, f_modify_time
) && !VFSATTR_IS_SUPPORTED(vfa
, f_modify_time
))
755 VFSATTR_RETURN(vfa
, f_modify_time
, vfa
->f_create_time
);
762 vfs_setattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
766 if (vfs_isrdonly(mp
))
769 error
= VFS_SETATTR(mp
, vfa
, ctx
);
772 * If we had alternate ways of setting vfs attributes, we'd
779 /* return the private data handle stored in mount_t */
781 vfs_fsprivate(mount_t mp
)
783 return(mp
->mnt_data
);
786 /* set the private data handle in mount_t */
788 vfs_setfsprivate(mount_t mp
, void *mntdata
)
791 mp
->mnt_data
= mntdata
;
797 * return the block size of the underlying
798 * device associated with mount_t
801 vfs_devblocksize(mount_t mp
) {
803 return(mp
->mnt_devblocksize
);
808 * return the io attributes associated with mount_t
811 vfs_ioattr(mount_t mp
, struct vfsioattr
*ioattrp
)
814 ioattrp
->io_maxreadcnt
= MAXPHYS
;
815 ioattrp
->io_maxwritecnt
= MAXPHYS
;
816 ioattrp
->io_segreadcnt
= 32;
817 ioattrp
->io_segwritecnt
= 32;
818 ioattrp
->io_maxsegreadsize
= MAXPHYS
;
819 ioattrp
->io_maxsegwritesize
= MAXPHYS
;
820 ioattrp
->io_devblocksize
= DEV_BSIZE
;
821 ioattrp
->io_flags
= 0;
823 ioattrp
->io_maxreadcnt
= mp
->mnt_maxreadcnt
;
824 ioattrp
->io_maxwritecnt
= mp
->mnt_maxwritecnt
;
825 ioattrp
->io_segreadcnt
= mp
->mnt_segreadcnt
;
826 ioattrp
->io_segwritecnt
= mp
->mnt_segwritecnt
;
827 ioattrp
->io_maxsegreadsize
= mp
->mnt_maxsegreadsize
;
828 ioattrp
->io_maxsegwritesize
= mp
->mnt_maxsegwritesize
;
829 ioattrp
->io_devblocksize
= mp
->mnt_devblocksize
;
830 ioattrp
->io_flags
= mp
->mnt_ioflags
;
832 ioattrp
->io_reserved
[0] = NULL
;
833 ioattrp
->io_reserved
[1] = NULL
;
838 * set the IO attributes associated with mount_t
841 vfs_setioattr(mount_t mp
, struct vfsioattr
* ioattrp
)
845 mp
->mnt_maxreadcnt
= ioattrp
->io_maxreadcnt
;
846 mp
->mnt_maxwritecnt
= ioattrp
->io_maxwritecnt
;
847 mp
->mnt_segreadcnt
= ioattrp
->io_segreadcnt
;
848 mp
->mnt_segwritecnt
= ioattrp
->io_segwritecnt
;
849 mp
->mnt_maxsegreadsize
= ioattrp
->io_maxsegreadsize
;
850 mp
->mnt_maxsegwritesize
= ioattrp
->io_maxsegwritesize
;
851 mp
->mnt_devblocksize
= ioattrp
->io_devblocksize
;
852 mp
->mnt_ioflags
= ioattrp
->io_flags
;
856 * Add a new filesystem into the kernel specified in passed in
857 * vfstable structure. It fills in the vnode
858 * dispatch vector that is to be passed to when vnodes are created.
859 * It returns a handle which is to be used to when the FS is to be removed
861 typedef int (*PFI
)(void *);
862 extern int vfs_opv_numops
;
864 vfs_fsadd(struct vfs_fsentry
*vfe
, vfstable_t
* handle
)
867 struct vfstable
*newvfstbl
= NULL
;
869 int (***opv_desc_vector_p
)(void *);
870 int (**opv_desc_vector
)(void *);
871 struct vnodeopv_entry_desc
*opve_descp
;
877 * This routine is responsible for all the initialization that would
878 * ordinarily be done as part of the system startup;
881 if (vfe
== (struct vfs_fsentry
*)0)
884 desccount
= vfe
->vfe_vopcnt
;
885 if ((desccount
<=0) || ((desccount
> 5)) || (vfe
->vfe_vfsops
== (struct vfsops
*)NULL
)
886 || (vfe
->vfe_opvdescs
== (struct vnodeopv_desc
**)NULL
))
890 MALLOC(newvfstbl
, void *, sizeof(struct vfstable
), M_TEMP
,
892 bzero(newvfstbl
, sizeof(struct vfstable
));
893 newvfstbl
->vfc_vfsops
= vfe
->vfe_vfsops
;
894 strncpy(&newvfstbl
->vfc_name
[0], vfe
->vfe_fsname
, MFSNAMELEN
);
895 if ((vfe
->vfe_flags
& VFS_TBLNOTYPENUM
))
896 newvfstbl
->vfc_typenum
= maxvfsconf
++;
898 newvfstbl
->vfc_typenum
= vfe
->vfe_fstypenum
;
900 newvfstbl
->vfc_refcount
= 0;
901 newvfstbl
->vfc_flags
= 0;
902 newvfstbl
->vfc_mountroot
= NULL
;
903 newvfstbl
->vfc_next
= NULL
;
904 newvfstbl
->vfc_threadsafe
= 0;
905 newvfstbl
->vfc_vfsflags
= 0;
906 if (vfe
->vfe_flags
& VFS_TBL64BITREADY
)
907 newvfstbl
->vfc_64bitready
= 1;
908 if (vfe
->vfe_flags
& VFS_TBLTHREADSAFE
)
909 newvfstbl
->vfc_threadsafe
= 1;
910 if (vfe
->vfe_flags
& VFS_TBLFSNODELOCK
)
911 newvfstbl
->vfc_threadsafe
= 1;
912 if ((vfe
->vfe_flags
& VFS_TBLLOCALVOL
) == VFS_TBLLOCALVOL
)
913 newvfstbl
->vfc_flags
|= MNT_LOCAL
;
914 if ((vfe
->vfe_flags
& VFS_TBLLOCALVOL
) && (vfe
->vfe_flags
& VFS_TBLGENERICMNTARGS
) == 0)
915 newvfstbl
->vfc_vfsflags
|= VFC_VFSLOCALARGS
;
917 newvfstbl
->vfc_vfsflags
|= VFC_VFSGENERICARGS
;
919 if (vfe
->vfe_flags
& VFS_TBLNATIVEXATTR
)
920 newvfstbl
->vfc_vfsflags
|= VFC_VFSNATIVEXATTR
;
921 if (vfe
->vfe_flags
& VFS_TBLUNMOUNT_PREFLIGHT
)
922 newvfstbl
->vfc_vfsflags
|= VFC_VFSPREFLIGHT
;
923 if (vfe
->vfe_flags
& VFS_TBLREADDIR_EXTENDED
)
924 newvfstbl
->vfc_vfsflags
|= VFC_VFSREADDIR_EXTENDED
;
925 if (vfe
->vfe_flags
& VFS_TBLNOMACLABEL
)
926 newvfstbl
->vfc_vfsflags
|= VFC_VFSNOMACLABEL
;
929 * Allocate and init the vectors.
930 * Also handle backwards compatibility.
932 * We allocate one large block to hold all <desccount>
933 * vnode operation vectors stored contiguously.
935 /* XXX - shouldn't be M_TEMP */
937 descsize
= desccount
* vfs_opv_numops
* sizeof(PFI
);
938 MALLOC(descptr
, PFI
*, descsize
,
940 bzero(descptr
, descsize
);
942 newvfstbl
->vfc_descptr
= descptr
;
943 newvfstbl
->vfc_descsize
= descsize
;
946 for (i
= 0; i
< desccount
; i
++ ) {
947 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
949 * Fill in the caller's pointer to the start of the i'th vector.
950 * They'll need to supply it when calling vnode_create.
952 opv_desc_vector
= descptr
+ i
* vfs_opv_numops
;
953 *opv_desc_vector_p
= opv_desc_vector
;
955 for (j
= 0; vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
].opve_op
; j
++) {
956 opve_descp
= &(vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
]);
959 * Sanity check: is this operation listed
960 * in the list of operations? We check this
961 * by seeing if its offest is zero. Since
962 * the default routine should always be listed
963 * first, it should be the only one with a zero
964 * offset. Any other operation with a zero
965 * offset is probably not listed in
966 * vfs_op_descs, and so is probably an error.
968 * A panic here means the layer programmer
969 * has committed the all-too common bug
970 * of adding a new operation to the layer's
971 * list of vnode operations but
972 * not adding the operation to the system-wide
973 * list of supported operations.
975 if (opve_descp
->opve_op
->vdesc_offset
== 0 &&
976 opve_descp
->opve_op
->vdesc_offset
!= VOFFSET(vnop_default
)) {
977 printf("vfs_fsadd: operation %s not listed in %s.\n",
978 opve_descp
->opve_op
->vdesc_name
,
980 panic("vfs_fsadd: bad operation");
983 * Fill in this entry.
985 opv_desc_vector
[opve_descp
->opve_op
->vdesc_offset
] =
986 opve_descp
->opve_impl
;
991 * Finally, go back and replace unfilled routines
992 * with their default. (Sigh, an O(n^3) algorithm. I
993 * could make it better, but that'd be work, and n is small.)
995 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
998 * Force every operations vector to have a default routine.
1000 opv_desc_vector
= *opv_desc_vector_p
;
1001 if (opv_desc_vector
[VOFFSET(vnop_default
)] == NULL
)
1002 panic("vfs_fsadd: operation vector without default routine.");
1003 for (j
= 0; j
< vfs_opv_numops
; j
++)
1004 if (opv_desc_vector
[j
] == NULL
)
1005 opv_desc_vector
[j
] =
1006 opv_desc_vector
[VOFFSET(vnop_default
)];
1008 } /* end of each vnodeopv_desc parsing */
1012 *handle
= vfstable_add(newvfstbl
);
1014 if (newvfstbl
->vfc_typenum
<= maxvfsconf
)
1015 maxvfsconf
= newvfstbl
->vfc_typenum
+ 1;
1018 if (newvfstbl
->vfc_vfsops
->vfs_init
)
1019 (*newvfstbl
->vfc_vfsops
->vfs_init
)((struct vfsconf
*)handle
);
1021 FREE(newvfstbl
, M_TEMP
);
1027 * Removes the filesystem from kernel.
1028 * The argument passed in is the handle that was given when
1029 * file system was added
1032 vfs_fsremove(vfstable_t handle
)
1034 struct vfstable
* vfstbl
= (struct vfstable
*)handle
;
1035 void *old_desc
= NULL
;
1038 /* Preflight check for any mounts */
1040 if ( vfstbl
->vfc_refcount
!= 0 ) {
1041 mount_list_unlock();
1044 mount_list_unlock();
1047 * save the old descriptor; the free cannot occur unconditionally,
1048 * since vfstable_del() may fail.
1050 if (vfstbl
->vfc_descptr
&& vfstbl
->vfc_descsize
) {
1051 old_desc
= vfstbl
->vfc_descptr
;
1053 err
= vfstable_del(vfstbl
);
1055 /* free the descriptor if the delete was successful */
1056 if (err
== 0 && old_desc
) {
1057 FREE(old_desc
, M_TEMP
);
1064 * This returns a reference to mount_t
1065 * which should be dropped using vfs_mountrele().
1066 * Not doing so will leak a mountpoint
1067 * and associated data structures.
1070 vfs_mountref(__unused mount_t mp
) /* gives a reference */
1075 /* This drops the reference on mount_t that was acquired */
1077 vfs_mountrele(__unused mount_t mp
) /* drops reference */
1083 vfs_context_pid(vfs_context_t ctx
)
1085 return (proc_pid(vfs_context_proc(ctx
)));
1089 vfs_context_suser(vfs_context_t ctx
)
1091 return (suser(ctx
->vc_ucred
, NULL
));
1095 * XXX Signals should be tied to threads, not processes, for most uses of this
1099 vfs_context_issignal(vfs_context_t ctx
, sigset_t mask
)
1101 proc_t p
= vfs_context_proc(ctx
);
1103 return(proc_pendingsignals(p
, mask
));
1108 vfs_context_is64bit(vfs_context_t ctx
)
1110 proc_t proc
= vfs_context_proc(ctx
);
1113 return(proc_is64bit(proc
));
1121 * Description: Given a vfs_context_t, return the proc_t associated with it.
1123 * Parameters: vfs_context_t The context to use
1125 * Returns: proc_t The process for this context
1127 * Notes: This function will return the current_proc() if any of the
1128 * following conditions are true:
1130 * o The supplied context pointer is NULL
1131 * o There is no Mach thread associated with the context
1132 * o There is no Mach task associated with the Mach thread
1133 * o There is no proc_t associated with the Mach task
1134 * o The proc_t has no per process open file table
1135 * o The proc_t is post-vfork()
1137 * This causes this function to return a value matching as
1138 * closely as possible the previous behaviour, while at the
1139 * same time avoiding the task lending that results from vfork()
1142 vfs_context_proc(vfs_context_t ctx
)
1146 if (ctx
!= NULL
&& ctx
->vc_thread
!= NULL
)
1147 proc
= (proc_t
)get_bsdthreadtask_info(ctx
->vc_thread
);
1148 if (proc
!= NULL
&& (proc
->p_fd
== NULL
|| (proc
->p_lflag
& P_LVFORK
)))
1151 return(proc
== NULL
? current_proc() : proc
);
1155 * vfs_context_get_special_port
1157 * Description: Return the requested special port from the task associated
1158 * with the given context.
1160 * Parameters: vfs_context_t The context to use
1161 * int Index of special port
1162 * ipc_port_t * Pointer to returned port
1164 * Returns: kern_return_t see task_get_special_port()
1167 vfs_context_get_special_port(vfs_context_t ctx
, int which
, ipc_port_t
*portp
)
1171 if (ctx
!= NULL
&& ctx
->vc_thread
!= NULL
)
1172 task
= get_threadtask(ctx
->vc_thread
);
1174 return task_get_special_port(task
, which
, portp
);
1178 * vfs_context_set_special_port
1180 * Description: Set the requested special port in the task associated
1181 * with the given context.
1183 * Parameters: vfs_context_t The context to use
1184 * int Index of special port
1185 * ipc_port_t New special port
1187 * Returns: kern_return_t see task_set_special_port()
1190 vfs_context_set_special_port(vfs_context_t ctx
, int which
, ipc_port_t port
)
1194 if (ctx
!= NULL
&& ctx
->vc_thread
!= NULL
)
1195 task
= get_threadtask(ctx
->vc_thread
);
1197 return task_set_special_port(task
, which
, port
);
1201 * vfs_context_thread
1203 * Description: Return the Mach thread associated with a vfs_context_t
1205 * Parameters: vfs_context_t The context to use
1207 * Returns: thread_t The thread for this context, or
1208 * NULL, if there is not one.
1210 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1211 * as a result of a static vfs_context_t declaration in a function
1212 * and will result in this function returning NULL.
1214 * This is intentional; this function should NOT return the
1215 * current_thread() in this case.
1218 vfs_context_thread(vfs_context_t ctx
)
1220 return(ctx
->vc_thread
);
1227 * Description: Returns a reference on the vnode for the current working
1228 * directory for the supplied context
1230 * Parameters: vfs_context_t The context to use
1232 * Returns: vnode_t The current working directory
1235 * Notes: The function first attempts to obtain the current directory
1236 * from the thread, and if it is not present there, falls back
1237 * to obtaining it from the process instead. If it can't be
1238 * obtained from either place, we return NULLVP.
1241 vfs_context_cwd(vfs_context_t ctx
)
1243 vnode_t cwd
= NULLVP
;
1245 if(ctx
!= NULL
&& ctx
->vc_thread
!= NULL
) {
1246 uthread_t uth
= get_bsdthread_info(ctx
->vc_thread
);
1250 * Get the cwd from the thread; if there isn't one, get it
1251 * from the process, instead.
1253 if ((cwd
= uth
->uu_cdir
) == NULLVP
&&
1254 (proc
= (proc_t
)get_bsdthreadtask_info(ctx
->vc_thread
)) != NULL
&&
1256 cwd
= proc
->p_fd
->fd_cdir
;
1264 vfs_context_create(vfs_context_t ctx
)
1266 vfs_context_t newcontext
;
1268 newcontext
= (vfs_context_t
)kalloc(sizeof(struct vfs_context
));
1271 kauth_cred_t safecred
;
1273 newcontext
->vc_thread
= ctx
->vc_thread
;
1274 safecred
= ctx
->vc_ucred
;
1276 newcontext
->vc_thread
= current_thread();
1277 safecred
= kauth_cred_get();
1279 if (IS_VALID_CRED(safecred
))
1280 kauth_cred_ref(safecred
);
1281 newcontext
->vc_ucred
= safecred
;
1289 vfs_context_current(void)
1291 vfs_context_t ctx
= NULL
;
1292 volatile uthread_t ut
= (uthread_t
)get_bsdthread_info(current_thread());
1295 if (ut
->uu_context
.vc_ucred
!= NULL
) {
1296 ctx
= &ut
->uu_context
;
1300 return(ctx
== NULL
? vfs_context_kernel() : ctx
);
1307 * Dangerous hack - adopt the first kernel thread as the current thread, to
1308 * get to the vfs_context_t in the uthread associated with a kernel thread.
1309 * This is used by UDF to make the call into IOCDMediaBSDClient,
1310 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1311 * ioctl() is being called from kernel or user space (and all this because
1312 * we do not pass threads into our ioctl()'s, instead of processes).
1314 * This is also used by imageboot_setup(), called early from bsd_init() after
1315 * kernproc has been given a credential.
1317 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1318 * of many Mach headers to do the reference directly rather than indirectly;
1319 * we will need to forego this convenience when we reture proc_thread().
1321 static struct vfs_context kerncontext
;
1323 vfs_context_kernel(void)
1325 if (kerncontext
.vc_ucred
== NOCRED
)
1326 kerncontext
.vc_ucred
= kernproc
->p_ucred
;
1327 if (kerncontext
.vc_thread
== NULL
)
1328 kerncontext
.vc_thread
= proc_thread(kernproc
);
1330 return(&kerncontext
);
1335 vfs_context_rele(vfs_context_t ctx
)
1338 if (IS_VALID_CRED(ctx
->vc_ucred
))
1339 kauth_cred_unref(&ctx
->vc_ucred
);
1340 kfree(ctx
, sizeof(struct vfs_context
));
1347 vfs_context_ucred(vfs_context_t ctx
)
1349 return (ctx
->vc_ucred
);
1353 * Return true if the context is owned by the superuser.
1356 vfs_context_issuser(vfs_context_t ctx
)
1358 return(kauth_cred_issuser(vfs_context_ucred(ctx
)));
1362 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1366 * Convert between vnode types and inode formats (since POSIX.1
1367 * defines mode word of stat structure in terms of inode formats).
1370 vnode_iftovt(int mode
)
1372 return(iftovt_tab
[((mode
) & S_IFMT
) >> 12]);
1376 vnode_vttoif(enum vtype indx
)
1378 return(vttoif_tab
[(int)(indx
)]);
1382 vnode_makeimode(int indx
, int mode
)
1384 return (int)(VTTOIF(indx
) | (mode
));
1389 * vnode manipulation functions.
1392 /* returns system root vnode reference; It should be dropped using vrele() */
1398 error
= vnode_get(rootvnode
);
1400 return ((vnode_t
)0);
1407 vnode_vid(vnode_t vp
)
1409 return ((uint32_t)(vp
->v_id
));
1412 /* returns a mount reference; drop it with vfs_mountrelease() */
1414 vnode_mount(vnode_t vp
)
1416 return (vp
->v_mount
);
1419 /* returns a mount reference iff vnode_t is a dir and is a mount point */
1421 vnode_mountedhere(vnode_t vp
)
1425 if ((vp
->v_type
== VDIR
) && ((mp
= vp
->v_mountedhere
) != NULL
) &&
1426 (mp
->mnt_vnodecovered
== vp
))
1429 return (mount_t
)NULL
;
1432 /* returns vnode type of vnode_t */
1434 vnode_vtype(vnode_t vp
)
1436 return (vp
->v_type
);
1439 /* returns FS specific node saved in vnode */
1441 vnode_fsnode(vnode_t vp
)
1443 return (vp
->v_data
);
1447 vnode_clearfsnode(vnode_t vp
)
1453 vnode_specrdev(vnode_t vp
)
1459 /* Accessor functions */
1460 /* is vnode_t a root vnode */
1462 vnode_isvroot(vnode_t vp
)
1464 return ((vp
->v_flag
& VROOT
)? 1 : 0);
1467 /* is vnode_t a system vnode */
1469 vnode_issystem(vnode_t vp
)
1471 return ((vp
->v_flag
& VSYSTEM
)? 1 : 0);
1474 /* is vnode_t a swap file vnode */
1476 vnode_isswap(vnode_t vp
)
1478 return ((vp
->v_flag
& VSWAP
)? 1 : 0);
1481 /* if vnode_t mount operation in progress */
1483 vnode_ismount(vnode_t vp
)
1485 return ((vp
->v_flag
& VMOUNT
)? 1 : 0);
1488 /* is this vnode under recyle now */
1490 vnode_isrecycled(vnode_t vp
)
1494 vnode_lock_spin(vp
);
1495 ret
= (vp
->v_lflag
& (VL_TERMINATE
|VL_DEAD
))? 1 : 0;
1500 /* is vnode_t marked to not keep data cached once it's been consumed */
1502 vnode_isnocache(vnode_t vp
)
1504 return ((vp
->v_flag
& VNOCACHE_DATA
)? 1 : 0);
1508 * has sequential readahead been disabled on this vnode
1511 vnode_isnoreadahead(vnode_t vp
)
1513 return ((vp
->v_flag
& VRAOFF
)? 1 : 0);
1517 vnode_is_openevt(vnode_t vp
)
1519 return ((vp
->v_flag
& VOPENEVT
)? 1 : 0);
1522 /* is vnode_t a standard one? */
1524 vnode_isstandard(vnode_t vp
)
1526 return ((vp
->v_flag
& VSTANDARD
)? 1 : 0);
1529 /* don't vflush() if SKIPSYSTEM */
1531 vnode_isnoflush(vnode_t vp
)
1533 return ((vp
->v_flag
& VNOFLUSH
)? 1 : 0);
1536 /* is vnode_t a regular file */
1538 vnode_isreg(vnode_t vp
)
1540 return ((vp
->v_type
== VREG
)? 1 : 0);
1543 /* is vnode_t a directory? */
1545 vnode_isdir(vnode_t vp
)
1547 return ((vp
->v_type
== VDIR
)? 1 : 0);
1550 /* is vnode_t a symbolic link ? */
1552 vnode_islnk(vnode_t vp
)
1554 return ((vp
->v_type
== VLNK
)? 1 : 0);
1557 /* is vnode_t a fifo ? */
1559 vnode_isfifo(vnode_t vp
)
1561 return ((vp
->v_type
== VFIFO
)? 1 : 0);
1564 /* is vnode_t a block device? */
1566 vnode_isblk(vnode_t vp
)
1568 return ((vp
->v_type
== VBLK
)? 1 : 0);
1571 /* is vnode_t a char device? */
1573 vnode_ischr(vnode_t vp
)
1575 return ((vp
->v_type
== VCHR
)? 1 : 0);
1578 /* is vnode_t a socket? */
1580 vnode_issock(vnode_t vp
)
1582 return ((vp
->v_type
== VSOCK
)? 1 : 0);
1585 /* is vnode_t a named stream? */
1587 vnode_isnamedstream(
1596 return ((vp
->v_flag
& VISNAMEDSTREAM
) ? 1 : 0);
1612 return ((vp
->v_flag
& VISSHADOW
) ? 1 : 0);
1618 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1620 vnode_setnocache(vnode_t vp
)
1622 vnode_lock_spin(vp
);
1623 vp
->v_flag
|= VNOCACHE_DATA
;
1628 vnode_clearnocache(vnode_t vp
)
1630 vnode_lock_spin(vp
);
1631 vp
->v_flag
&= ~VNOCACHE_DATA
;
1636 vnode_set_openevt(vnode_t vp
)
1638 vnode_lock_spin(vp
);
1639 vp
->v_flag
|= VOPENEVT
;
1644 vnode_clear_openevt(vnode_t vp
)
1646 vnode_lock_spin(vp
);
1647 vp
->v_flag
&= ~VOPENEVT
;
1653 vnode_setnoreadahead(vnode_t vp
)
1655 vnode_lock_spin(vp
);
1656 vp
->v_flag
|= VRAOFF
;
1661 vnode_clearnoreadahead(vnode_t vp
)
1663 vnode_lock_spin(vp
);
1664 vp
->v_flag
&= ~VRAOFF
;
1669 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1671 vnode_setnoflush(vnode_t vp
)
1673 vnode_lock_spin(vp
);
1674 vp
->v_flag
|= VNOFLUSH
;
1679 vnode_clearnoflush(vnode_t vp
)
1681 vnode_lock_spin(vp
);
1682 vp
->v_flag
&= ~VNOFLUSH
;
1687 /* is vnode_t a blkdevice and has a FS mounted on it */
1689 vnode_ismountedon(vnode_t vp
)
1691 return ((vp
->v_specflags
& SI_MOUNTEDON
)? 1 : 0);
1695 vnode_setmountedon(vnode_t vp
)
1697 vnode_lock_spin(vp
);
1698 vp
->v_specflags
|= SI_MOUNTEDON
;
1703 vnode_clearmountedon(vnode_t vp
)
1705 vnode_lock_spin(vp
);
1706 vp
->v_specflags
&= ~SI_MOUNTEDON
;
1712 vnode_settag(vnode_t vp
, int tag
)
1719 vnode_tag(vnode_t vp
)
1725 vnode_parent(vnode_t vp
)
1728 return(vp
->v_parent
);
1732 vnode_setparent(vnode_t vp
, vnode_t dvp
)
1738 vnode_name(vnode_t vp
)
1740 /* we try to keep v_name a reasonable name for the node */
1745 vnode_setname(vnode_t vp
, char * name
)
1750 /* return the registered FS name when adding the FS to kernel */
1752 vnode_vfsname(vnode_t vp
, char * buf
)
1754 strncpy(buf
, vp
->v_mount
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
1757 /* return the FS type number */
1759 vnode_vfstypenum(vnode_t vp
)
1761 return(vp
->v_mount
->mnt_vtable
->vfc_typenum
);
1765 vnode_vfs64bitready(vnode_t vp
)
1768 if ((vp
->v_mount
->mnt_vtable
->vfc_64bitready
))
1776 /* return the visible flags on associated mount point of vnode_t */
1778 vnode_vfsvisflags(vnode_t vp
)
1780 return(vp
->v_mount
->mnt_flag
& MNT_VISFLAGMASK
);
1783 /* return the command modifier flags on associated mount point of vnode_t */
1785 vnode_vfscmdflags(vnode_t vp
)
1787 return(vp
->v_mount
->mnt_flag
& MNT_CMDFLAGS
);
1790 /* return the max symlink of short links of vnode_t */
1792 vnode_vfsmaxsymlen(vnode_t vp
)
1794 return(vp
->v_mount
->mnt_maxsymlinklen
);
1797 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
1799 vnode_vfsstatfs(vnode_t vp
)
1801 return(&vp
->v_mount
->mnt_vfsstat
);
1804 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
1806 vnode_vfsfsprivate(vnode_t vp
)
1808 return(vp
->v_mount
->mnt_data
);
1811 /* is vnode_t in a rdonly mounted FS */
1813 vnode_vfsisrdonly(vnode_t vp
)
1815 return ((vp
->v_mount
->mnt_flag
& MNT_RDONLY
)? 1 : 0);
1820 * Returns vnode ref to current working directory; if a per-thread current
1821 * working directory is in effect, return that instead of the per process one.
1823 * XXX Published, but not used.
1826 current_workingdir(void)
1828 return vfs_context_cwd(vfs_context_current());
1831 /* returns vnode ref to current root(chroot) directory */
1833 current_rootdir(void)
1835 proc_t proc
= current_proc();
1838 if ( (vp
= proc
->p_fd
->fd_rdir
) ) {
1839 if ( (vnode_getwithref(vp
)) )
1846 * Get a filesec and optional acl contents from an extended attribute.
1847 * Function will attempt to retrive ACL, UUID, and GUID information using a
1848 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
1850 * Parameters: vp The vnode on which to operate.
1851 * fsecp The filesec (and ACL, if any) being
1853 * ctx The vnode context in which the
1854 * operation is to be attempted.
1856 * Returns: 0 Success
1859 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
1860 * host byte order, as will be the ACL contents, if any.
1861 * Internally, we will cannonize these values from network (PPC)
1862 * byte order after we retrieve them so that the on-disk contents
1863 * of the extended attribute are identical for both PPC and Intel
1864 * (if we were not being required to provide this service via
1865 * fallback, this would be the job of the filesystem
1866 * 'VNOP_GETATTR' call).
1868 * We use ntohl() because it has a transitive property on Intel
1869 * machines and no effect on PPC mancines. This guarantees us
1871 * XXX: Deleting rather than ignoreing a corrupt security structure is
1872 * probably the only way to reset it without assistance from an
1873 * file system integrity checking tool. Right now we ignore it.
1875 * XXX: We should enummerate the possible errno values here, and where
1876 * in the code they originated.
1879 vnode_get_filesec(vnode_t vp
, kauth_filesec_t
*fsecp
, vfs_context_t ctx
)
1881 kauth_filesec_t fsec
;
1884 size_t xsize
, rsize
;
1886 uint32_t host_fsec_magic
;
1887 uint32_t host_acl_entrycount
;
1893 /* find out how big the EA is */
1894 if (vn_getxattr(vp
, KAUTH_FILESEC_XATTR
, NULL
, &xsize
, XATTR_NOSECURITY
, ctx
) != 0) {
1895 /* no EA, no filesec */
1896 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
1898 /* either way, we are done */
1903 * To be valid, a kauth_filesec_t must be large enough to hold a zero
1904 * ACE entrly ACL, and if it's larger than that, it must have the right
1905 * number of bytes such that it contains an atomic number of ACEs,
1906 * rather than partial entries. Otherwise, we ignore it.
1908 if (!KAUTH_FILESEC_VALID(xsize
)) {
1909 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize
);
1914 /* how many entries would fit? */
1915 fsec_size
= KAUTH_FILESEC_COUNT(xsize
);
1917 /* get buffer and uio */
1918 if (((fsec
= kauth_filesec_alloc(fsec_size
)) == NULL
) ||
1919 ((fsec_uio
= uio_create(1, 0, UIO_SYSSPACE
, UIO_READ
)) == NULL
) ||
1920 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), xsize
)) {
1921 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
1926 /* read security attribute */
1928 if ((error
= vn_getxattr(vp
,
1929 KAUTH_FILESEC_XATTR
,
1935 /* no attribute - no security data */
1936 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
1938 /* either way, we are done */
1943 * Validate security structure; the validation must take place in host
1944 * byte order. If it's corrupt, we will just ignore it.
1947 /* Validate the size before trying to convert it */
1948 if (rsize
< KAUTH_FILESEC_SIZE(0)) {
1949 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize
);
1953 /* Validate the magic number before trying to convert it */
1954 host_fsec_magic
= ntohl(KAUTH_FILESEC_MAGIC
);
1955 if (fsec
->fsec_magic
!= host_fsec_magic
) {
1956 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic
);
1960 /* Validate the entry count before trying to convert it. */
1961 host_acl_entrycount
= ntohl(fsec
->fsec_acl
.acl_entrycount
);
1962 if (host_acl_entrycount
!= KAUTH_FILESEC_NOACL
) {
1963 if (host_acl_entrycount
> KAUTH_ACL_MAX_ENTRIES
) {
1964 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount
);
1967 if (KAUTH_FILESEC_SIZE(host_acl_entrycount
) > rsize
) {
1968 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount
, rsize
);
1973 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST
, fsec
, NULL
);
1980 kauth_filesec_free(fsec
);
1981 if (fsec_uio
!= NULL
)
1989 * Set a filesec and optional acl contents into an extended attribute.
1990 * function will attempt to store ACL, UUID, and GUID information using a
1991 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
1992 * may or may not point to the `fsec->fsec_acl`, depending on whether the
1993 * original caller supplied an acl.
1995 * Parameters: vp The vnode on which to operate.
1996 * fsec The filesec being set.
1997 * acl The acl to be associated with 'fsec'.
1998 * ctx The vnode context in which the
1999 * operation is to be attempted.
2001 * Returns: 0 Success
2004 * Notes: Both the fsec and the acl are always valid.
2006 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2007 * as are the acl contents, if they are used. Internally, we will
2008 * cannonize these values into network (PPC) byte order before we
2009 * attempt to write them so that the on-disk contents of the
2010 * extended attribute are identical for both PPC and Intel (if we
2011 * were not being required to provide this service via fallback,
2012 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2013 * We reverse this process on the way out, so we leave with the
2014 * same byte order we started with.
2016 * XXX: We should enummerate the possible errno values here, and where
2017 * in the code they originated.
2020 vnode_set_filesec(vnode_t vp
, kauth_filesec_t fsec
, kauth_acl_t acl
, vfs_context_t ctx
)
2024 uint32_t saved_acl_copysize
;
2028 if ((fsec_uio
= uio_create(2, 0, UIO_SYSSPACE
, UIO_WRITE
)) == NULL
) {
2029 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2034 * Save the pre-converted ACL copysize, because it gets swapped too
2035 * if we are running with the wrong endianness.
2037 saved_acl_copysize
= KAUTH_ACL_COPYSIZE(acl
);
2039 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK
, fsec
, acl
);
2041 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), sizeof(struct kauth_filesec
) - sizeof(struct kauth_acl
));
2042 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(acl
), saved_acl_copysize
);
2043 error
= vn_setxattr(vp
,
2044 KAUTH_FILESEC_XATTR
,
2046 XATTR_NOSECURITY
, /* we have auth'ed already */
2048 VFS_DEBUG(ctx
, vp
, "SETATTR - set ACL returning %d", error
);
2050 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST
, fsec
, acl
);
2053 if (fsec_uio
!= NULL
)
2060 * Returns: 0 Success
2061 * ENOMEM Not enough space [only if has filesec]
2063 * vnode_get_filesec: ???
2064 * kauth_cred_guid2uid: ???
2065 * kauth_cred_guid2gid: ???
2066 * vfs_update_vfsstat: ???
2069 vnode_getattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2071 kauth_filesec_t fsec
;
2077 /* don't ask for extended security data if the filesystem doesn't support it */
2078 if (!vfs_extendedsecurity(vnode_mount(vp
))) {
2079 VATTR_CLEAR_ACTIVE(vap
, va_acl
);
2080 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
2081 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
2085 * If the caller wants size values we might have to synthesise, give the
2086 * filesystem the opportunity to supply better intermediate results.
2088 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
2089 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
2090 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
2091 VATTR_SET_ACTIVE(vap
, va_data_size
);
2092 VATTR_SET_ACTIVE(vap
, va_data_alloc
);
2093 VATTR_SET_ACTIVE(vap
, va_total_size
);
2094 VATTR_SET_ACTIVE(vap
, va_total_alloc
);
2097 error
= VNOP_GETATTR(vp
, vap
, ctx
);
2099 KAUTH_DEBUG("ERROR - returning %d", error
);
2104 * If extended security data was requested but not returned, try the fallback
2107 if (VATTR_NOT_RETURNED(vap
, va_acl
) || VATTR_NOT_RETURNED(vap
, va_uuuid
) || VATTR_NOT_RETURNED(vap
, va_guuid
)) {
2110 if ((vp
->v_type
== VDIR
) || (vp
->v_type
== VLNK
) || (vp
->v_type
== VREG
)) {
2111 /* try to get the filesec */
2112 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0)
2115 /* if no filesec, no attributes */
2117 VATTR_RETURN(vap
, va_acl
, NULL
);
2118 VATTR_RETURN(vap
, va_uuuid
, kauth_null_guid
);
2119 VATTR_RETURN(vap
, va_guuid
, kauth_null_guid
);
2122 /* looks good, try to return what we were asked for */
2123 VATTR_RETURN(vap
, va_uuuid
, fsec
->fsec_owner
);
2124 VATTR_RETURN(vap
, va_guuid
, fsec
->fsec_group
);
2126 /* only return the ACL if we were actually asked for it */
2127 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
2128 if (fsec
->fsec_acl
.acl_entrycount
== KAUTH_FILESEC_NOACL
) {
2129 VATTR_RETURN(vap
, va_acl
, NULL
);
2131 facl
= kauth_acl_alloc(fsec
->fsec_acl
.acl_entrycount
);
2133 kauth_filesec_free(fsec
);
2137 bcopy(&fsec
->fsec_acl
, facl
, KAUTH_ACL_COPYSIZE(&fsec
->fsec_acl
));
2138 VATTR_RETURN(vap
, va_acl
, facl
);
2141 kauth_filesec_free(fsec
);
2145 * If someone gave us an unsolicited filesec, toss it. We promise that
2146 * we're OK with a filesystem giving us anything back, but our callers
2147 * only expect what they asked for.
2149 if (VATTR_IS_SUPPORTED(vap
, va_acl
) && !VATTR_IS_ACTIVE(vap
, va_acl
)) {
2150 if (vap
->va_acl
!= NULL
)
2151 kauth_acl_free(vap
->va_acl
);
2152 VATTR_CLEAR_SUPPORTED(vap
, va_acl
);
2155 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2157 * Handle the case where we need a UID/GID, but only have extended
2158 * security information.
2160 if (VATTR_NOT_RETURNED(vap
, va_uid
) &&
2161 VATTR_IS_SUPPORTED(vap
, va_uuuid
) &&
2162 !kauth_guid_equal(&vap
->va_uuuid
, &kauth_null_guid
)) {
2163 if ((error
= kauth_cred_guid2uid(&vap
->va_uuuid
, &nuid
)) == 0)
2164 VATTR_RETURN(vap
, va_uid
, nuid
);
2166 if (VATTR_NOT_RETURNED(vap
, va_gid
) &&
2167 VATTR_IS_SUPPORTED(vap
, va_guuid
) &&
2168 !kauth_guid_equal(&vap
->va_guuid
, &kauth_null_guid
)) {
2169 if ((error
= kauth_cred_guid2gid(&vap
->va_guuid
, &ngid
)) == 0)
2170 VATTR_RETURN(vap
, va_gid
, ngid
);
2175 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2177 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
2178 if (vfs_context_issuser(ctx
) && VATTR_IS_SUPPORTED(vap
, va_uid
)) {
2180 } else if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
2181 nuid
= vp
->v_mount
->mnt_fsowner
;
2182 if (nuid
== KAUTH_UID_NONE
)
2184 } else if (VATTR_IS_SUPPORTED(vap
, va_uid
)) {
2187 /* this will always be something sensible */
2188 nuid
= vp
->v_mount
->mnt_fsowner
;
2190 if ((nuid
== 99) && !vfs_context_issuser(ctx
))
2191 nuid
= kauth_cred_getuid(vfs_context_ucred(ctx
));
2192 VATTR_RETURN(vap
, va_uid
, nuid
);
2194 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
2195 if (vfs_context_issuser(ctx
) && VATTR_IS_SUPPORTED(vap
, va_gid
)) {
2197 } else if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
2198 ngid
= vp
->v_mount
->mnt_fsgroup
;
2199 if (ngid
== KAUTH_GID_NONE
)
2201 } else if (VATTR_IS_SUPPORTED(vap
, va_gid
)) {
2204 /* this will always be something sensible */
2205 ngid
= vp
->v_mount
->mnt_fsgroup
;
2207 if ((ngid
== 99) && !vfs_context_issuser(ctx
))
2208 ngid
= kauth_cred_getgid(vfs_context_ucred(ctx
));
2209 VATTR_RETURN(vap
, va_gid
, ngid
);
2213 * Synthesise some values that can be reasonably guessed.
2215 if (!VATTR_IS_SUPPORTED(vap
, va_iosize
))
2216 VATTR_RETURN(vap
, va_iosize
, vp
->v_mount
->mnt_vfsstat
.f_iosize
);
2218 if (!VATTR_IS_SUPPORTED(vap
, va_flags
))
2219 VATTR_RETURN(vap
, va_flags
, 0);
2221 if (!VATTR_IS_SUPPORTED(vap
, va_filerev
))
2222 VATTR_RETURN(vap
, va_filerev
, 0);
2224 if (!VATTR_IS_SUPPORTED(vap
, va_gen
))
2225 VATTR_RETURN(vap
, va_gen
, 0);
2228 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2230 if (!VATTR_IS_SUPPORTED(vap
, va_data_size
))
2231 VATTR_RETURN(vap
, va_data_size
, 0);
2233 /* do we want any of the possibly-computed values? */
2234 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
2235 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
2236 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
2237 /* make sure f_bsize is valid */
2238 if (vp
->v_mount
->mnt_vfsstat
.f_bsize
== 0) {
2239 if ((error
= vfs_update_vfsstat(vp
->v_mount
, ctx
, VFS_KERNEL_EVENT
)) != 0)
2243 /* default va_data_alloc from va_data_size */
2244 if (!VATTR_IS_SUPPORTED(vap
, va_data_alloc
))
2245 VATTR_RETURN(vap
, va_data_alloc
, roundup(vap
->va_data_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
2247 /* default va_total_size from va_data_size */
2248 if (!VATTR_IS_SUPPORTED(vap
, va_total_size
))
2249 VATTR_RETURN(vap
, va_total_size
, vap
->va_data_size
);
2251 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2252 if (!VATTR_IS_SUPPORTED(vap
, va_total_alloc
))
2253 VATTR_RETURN(vap
, va_total_alloc
, roundup(vap
->va_total_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
2257 * If we don't have a change time, pull it from the modtime.
2259 if (!VATTR_IS_SUPPORTED(vap
, va_change_time
) && VATTR_IS_SUPPORTED(vap
, va_modify_time
))
2260 VATTR_RETURN(vap
, va_change_time
, vap
->va_modify_time
);
2263 * This is really only supported for the creation VNOPs, but since the field is there
2264 * we should populate it correctly.
2266 VATTR_RETURN(vap
, va_type
, vp
->v_type
);
2269 * The fsid can be obtained from the mountpoint directly.
2271 VATTR_RETURN(vap
, va_fsid
, vp
->v_mount
->mnt_vfsstat
.f_fsid
.val
[0]);
2279 * Set the attributes on a vnode in a vnode context.
2281 * Parameters: vp The vnode whose attributes to set.
2282 * vap A pointer to the attributes to set.
2283 * ctx The vnode context in which the
2284 * operation is to be attempted.
2286 * Returns: 0 Success
2289 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2291 * The contents of the data area pointed to by 'vap' may be
2292 * modified if the vnode is on a filesystem which has been
2293 * mounted with ingore ownership flags, or by the underlyng
2294 * VFS itself, or by the fallback code, if the underlying VFS
2295 * does not support ACL, UUID, or GUUID attributes directly.
2297 * XXX: We should enummerate the possible errno values here, and where
2298 * in the code they originated.
2301 vnode_setattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2303 int error
, is_perm_change
=0;
2306 * Make sure the filesystem is mounted R/W.
2307 * If not, return an error.
2309 if (vfs_isrdonly(vp
->v_mount
)) {
2314 /* For streams, va_data_size is the only setable attribute. */
2315 if ((vp
->v_flag
& VISNAMEDSTREAM
) && (vap
->va_active
!= VNODE_ATTR_va_data_size
)) {
2322 * If ownership is being ignored on this volume, we silently discard
2323 * ownership changes.
2325 if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
2326 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
2327 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
2330 if ( VATTR_IS_ACTIVE(vap
, va_uid
) || VATTR_IS_ACTIVE(vap
, va_gid
)
2331 || VATTR_IS_ACTIVE(vap
, va_mode
) || VATTR_IS_ACTIVE(vap
, va_acl
)) {
2336 * Make sure that extended security is enabled if we're going to try
2339 if (!vfs_extendedsecurity(vnode_mount(vp
)) &&
2340 (VATTR_IS_ACTIVE(vap
, va_acl
) || VATTR_IS_ACTIVE(vap
, va_uuuid
) || VATTR_IS_ACTIVE(vap
, va_guuid
))) {
2341 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2346 error
= VNOP_SETATTR(vp
, vap
, ctx
);
2348 if ((error
== 0) && !VATTR_ALL_SUPPORTED(vap
))
2349 error
= vnode_setattr_fallback(vp
, vap
, ctx
);
2352 // only send a stat_changed event if this is more than
2353 // just an access time update
2354 if (error
== 0 && (vap
->va_active
!= VNODE_ATTR_BIT(va_access_time
))) {
2355 if (is_perm_change
) {
2356 if (need_fsevent(FSE_CHOWN
, vp
)) {
2357 add_fsevent(FSE_CHOWN
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
2359 } else if(need_fsevent(FSE_STAT_CHANGED
, vp
)) {
2360 add_fsevent(FSE_STAT_CHANGED
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
2370 * Fallback for setting the attributes on a vnode in a vnode context. This
2371 * Function will attempt to store ACL, UUID, and GUID information utilizing
2372 * a read/modify/write operation against an EA used as a backing store for
2375 * Parameters: vp The vnode whose attributes to set.
2376 * vap A pointer to the attributes to set.
2377 * ctx The vnode context in which the
2378 * operation is to be attempted.
2380 * Returns: 0 Success
2383 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2384 * as are the fsec and lfsec, if they are used.
2386 * The contents of the data area pointed to by 'vap' may be
2387 * modified to indicate that the attribute is supported for
2388 * any given requested attribute.
2390 * XXX: We should enummerate the possible errno values here, and where
2391 * in the code they originated.
2394 vnode_setattr_fallback(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2396 kauth_filesec_t fsec
;
2398 struct kauth_filesec lfsec
;
2404 * Extended security fallback via extended attributes.
2406 * Note that we do not free the filesec; the caller is expected to
2409 if (VATTR_NOT_RETURNED(vap
, va_acl
) ||
2410 VATTR_NOT_RETURNED(vap
, va_uuuid
) ||
2411 VATTR_NOT_RETURNED(vap
, va_guuid
)) {
2412 VFS_DEBUG(ctx
, vp
, "SETATTR - doing filesec fallback");
2415 * Fail for file types that we don't permit extended security
2418 if ((vp
->v_type
!= VDIR
) && (vp
->v_type
!= VLNK
) && (vp
->v_type
!= VREG
)) {
2419 VFS_DEBUG(ctx
, vp
, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp
));
2425 * If we don't have all the extended security items, we need
2426 * to fetch the existing data to perform a read-modify-write
2430 if (!VATTR_IS_ACTIVE(vap
, va_acl
) ||
2431 !VATTR_IS_ACTIVE(vap
, va_uuuid
) ||
2432 !VATTR_IS_ACTIVE(vap
, va_guuid
)) {
2433 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0) {
2434 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error
);
2438 /* if we didn't get a filesec, use our local one */
2440 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2443 KAUTH_DEBUG("SETATTR - updating existing filesec");
2446 facl
= &fsec
->fsec_acl
;
2448 /* if we're using the local filesec, we need to initialise it */
2449 if (fsec
== &lfsec
) {
2450 fsec
->fsec_magic
= KAUTH_FILESEC_MAGIC
;
2451 fsec
->fsec_owner
= kauth_null_guid
;
2452 fsec
->fsec_group
= kauth_null_guid
;
2453 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
2454 facl
->acl_flags
= 0;
2458 * Update with the supplied attributes.
2460 if (VATTR_IS_ACTIVE(vap
, va_uuuid
)) {
2461 KAUTH_DEBUG("SETATTR - updating owner UUID");
2462 fsec
->fsec_owner
= vap
->va_uuuid
;
2463 VATTR_SET_SUPPORTED(vap
, va_uuuid
);
2465 if (VATTR_IS_ACTIVE(vap
, va_guuid
)) {
2466 KAUTH_DEBUG("SETATTR - updating group UUID");
2467 fsec
->fsec_group
= vap
->va_guuid
;
2468 VATTR_SET_SUPPORTED(vap
, va_guuid
);
2470 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
2471 if (vap
->va_acl
== NULL
) {
2472 KAUTH_DEBUG("SETATTR - removing ACL");
2473 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
2475 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap
->va_acl
->acl_entrycount
);
2478 VATTR_SET_SUPPORTED(vap
, va_acl
);
2482 * If the filesec data is all invalid, we can just remove
2483 * the EA completely.
2485 if ((facl
->acl_entrycount
== KAUTH_FILESEC_NOACL
) &&
2486 kauth_guid_equal(&fsec
->fsec_owner
, &kauth_null_guid
) &&
2487 kauth_guid_equal(&fsec
->fsec_group
, &kauth_null_guid
)) {
2488 error
= vn_removexattr(vp
, KAUTH_FILESEC_XATTR
, XATTR_NOSECURITY
, ctx
);
2489 /* no attribute is ok, nothing to delete */
2490 if (error
== ENOATTR
)
2492 VFS_DEBUG(ctx
, vp
, "SETATTR - remove filesec returning %d", error
);
2495 error
= vnode_set_filesec(vp
, fsec
, facl
, ctx
);
2496 VFS_DEBUG(ctx
, vp
, "SETATTR - update filesec returning %d", error
);
2499 /* if we fetched a filesec, dispose of the buffer */
2501 kauth_filesec_free(fsec
);
2509 * Definition of vnode operations.
2515 *#% lookup dvp L ? ?
2516 *#% lookup vpp - L -
2518 struct vnop_lookup_args
{
2519 struct vnodeop_desc
*a_desc
;
2522 struct componentname
*a_cnp
;
2523 vfs_context_t a_context
;
2528 * Returns: 0 Success
2529 * lock_fsnode:ENOENT No such file or directory [only for VFS
2530 * that is not thread safe & vnode is
2531 * currently being/has been terminated]
2532 * <vfs_lookup>:ENAMETOOLONG
2533 * <vfs_lookup>:ENOENT
2534 * <vfs_lookup>:EJUSTRETURN
2535 * <vfs_lookup>:EPERM
2536 * <vfs_lookup>:EISDIR
2537 * <vfs_lookup>:ENOTDIR
2540 * Note: The return codes from the underlying VFS's lookup routine can't
2541 * be fully enumerated here, since third party VFS authors may not
2542 * limit their error returns to the ones documented here, even
2543 * though this may result in some programs functioning incorrectly.
2545 * The return codes documented above are those which may currently
2546 * be returned by HFS from hfs_lookup, not including additional
2547 * error code which may be propagated from underlying routines.
2550 VNOP_LOOKUP(vnode_t dvp
, vnode_t
*vpp
, struct componentname
*cnp
, vfs_context_t ctx
)
2553 struct vnop_lookup_args a
;
2556 int funnel_state
= 0;
2558 a
.a_desc
= &vnop_lookup_desc
;
2563 thread_safe
= THREAD_SAFE_FS(dvp
);
2566 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2570 _err
= (*dvp
->v_op
[vnop_lookup_desc
.vdesc_offset
])(&a
);
2575 if ( (cnp
->cn_flags
& ISLASTCN
) ) {
2576 if ( (cnp
->cn_flags
& LOCKPARENT
) ) {
2577 if ( !(cnp
->cn_flags
& FSNODELOCKHELD
) ) {
2579 * leave the fsnode lock held on
2580 * the directory, but restore the funnel...
2581 * also indicate that we need to drop the
2582 * fsnode_lock when we're done with the
2583 * system call processing for this path
2585 cnp
->cn_flags
|= FSNODELOCKHELD
;
2587 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2592 unlock_fsnode(dvp
, &funnel_state
);
2600 *#% create dvp L L L
2601 *#% create vpp - L -
2605 struct vnop_create_args
{
2606 struct vnodeop_desc
*a_desc
;
2609 struct componentname
*a_cnp
;
2610 struct vnode_attr
*a_vap
;
2611 vfs_context_t a_context
;
2615 VNOP_CREATE(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
2618 struct vnop_create_args a
;
2620 int funnel_state
= 0;
2622 a
.a_desc
= &vnop_create_desc
;
2628 thread_safe
= THREAD_SAFE_FS(dvp
);
2631 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2635 _err
= (*dvp
->v_op
[vnop_create_desc
.vdesc_offset
])(&a
);
2636 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
2638 * Remove stale Apple Double file (if any).
2640 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, thread_safe
, 0);
2643 unlock_fsnode(dvp
, &funnel_state
);
2651 *#% whiteout dvp L L L
2652 *#% whiteout cnp - - -
2653 *#% whiteout flag - - -
2656 struct vnop_whiteout_args
{
2657 struct vnodeop_desc
*a_desc
;
2659 struct componentname
*a_cnp
;
2661 vfs_context_t a_context
;
2665 VNOP_WHITEOUT(vnode_t dvp
, struct componentname
* cnp
, int flags
, vfs_context_t ctx
)
2668 struct vnop_whiteout_args a
;
2670 int funnel_state
= 0;
2672 a
.a_desc
= &vnop_whiteout_desc
;
2677 thread_safe
= THREAD_SAFE_FS(dvp
);
2680 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2684 _err
= (*dvp
->v_op
[vnop_whiteout_desc
.vdesc_offset
])(&a
);
2686 unlock_fsnode(dvp
, &funnel_state
);
2698 struct vnop_mknod_args
{
2699 struct vnodeop_desc
*a_desc
;
2702 struct componentname
*a_cnp
;
2703 struct vnode_attr
*a_vap
;
2704 vfs_context_t a_context
;
2708 VNOP_MKNOD(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
2712 struct vnop_mknod_args a
;
2714 int funnel_state
= 0;
2716 a
.a_desc
= &vnop_mknod_desc
;
2722 thread_safe
= THREAD_SAFE_FS(dvp
);
2725 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2729 _err
= (*dvp
->v_op
[vnop_mknod_desc
.vdesc_offset
])(&a
);
2731 unlock_fsnode(dvp
, &funnel_state
);
2742 struct vnop_open_args
{
2743 struct vnodeop_desc
*a_desc
;
2746 vfs_context_t a_context
;
2750 VNOP_OPEN(vnode_t vp
, int mode
, vfs_context_t ctx
)
2753 struct vnop_open_args a
;
2755 int funnel_state
= 0;
2758 ctx
= vfs_context_current();
2760 a
.a_desc
= &vnop_open_desc
;
2764 thread_safe
= THREAD_SAFE_FS(vp
);
2767 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2768 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2769 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2770 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2775 _err
= (*vp
->v_op
[vnop_open_desc
.vdesc_offset
])(&a
);
2777 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2778 unlock_fsnode(vp
, NULL
);
2780 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2791 struct vnop_close_args
{
2792 struct vnodeop_desc
*a_desc
;
2795 vfs_context_t a_context
;
2799 VNOP_CLOSE(vnode_t vp
, int fflag
, vfs_context_t ctx
)
2802 struct vnop_close_args a
;
2804 int funnel_state
= 0;
2807 ctx
= vfs_context_current();
2809 a
.a_desc
= &vnop_close_desc
;
2813 thread_safe
= THREAD_SAFE_FS(vp
);
2816 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2817 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2818 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2819 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2824 _err
= (*vp
->v_op
[vnop_close_desc
.vdesc_offset
])(&a
);
2826 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2827 unlock_fsnode(vp
, NULL
);
2829 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2840 struct vnop_access_args
{
2841 struct vnodeop_desc
*a_desc
;
2844 vfs_context_t a_context
;
2848 VNOP_ACCESS(vnode_t vp
, int action
, vfs_context_t ctx
)
2851 struct vnop_access_args a
;
2853 int funnel_state
= 0;
2856 ctx
= vfs_context_current();
2858 a
.a_desc
= &vnop_access_desc
;
2860 a
.a_action
= action
;
2862 thread_safe
= THREAD_SAFE_FS(vp
);
2865 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2869 _err
= (*vp
->v_op
[vnop_access_desc
.vdesc_offset
])(&a
);
2871 unlock_fsnode(vp
, &funnel_state
);
2879 *#% getattr vp = = =
2882 struct vnop_getattr_args
{
2883 struct vnodeop_desc
*a_desc
;
2885 struct vnode_attr
*a_vap
;
2886 vfs_context_t a_context
;
2890 VNOP_GETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
2893 struct vnop_getattr_args a
;
2895 int funnel_state
= 0; /* protected by thread_safe */
2897 a
.a_desc
= &vnop_getattr_desc
;
2901 thread_safe
= THREAD_SAFE_FS(vp
);
2904 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2908 _err
= (*vp
->v_op
[vnop_getattr_desc
.vdesc_offset
])(&a
);
2910 unlock_fsnode(vp
, &funnel_state
);
2918 *#% setattr vp L L L
2921 struct vnop_setattr_args
{
2922 struct vnodeop_desc
*a_desc
;
2924 struct vnode_attr
*a_vap
;
2925 vfs_context_t a_context
;
2929 VNOP_SETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
2932 struct vnop_setattr_args a
;
2934 int funnel_state
= 0; /* protected by thread_safe */
2936 a
.a_desc
= &vnop_setattr_desc
;
2940 thread_safe
= THREAD_SAFE_FS(vp
);
2943 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2947 _err
= (*vp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
2950 * Shadow uid/gid/mod change to extended attribute file.
2952 if (_err
== 0 && !NATIVE_XATTR(vp
)) {
2953 struct vnode_attr va
;
2957 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
2958 VATTR_SET(&va
, va_uid
, vap
->va_uid
);
2961 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
2962 VATTR_SET(&va
, va_gid
, vap
->va_gid
);
2965 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
2966 VATTR_SET(&va
, va_mode
, vap
->va_mode
);
2973 dvp
= vnode_getparent(vp
);
2974 vname
= vnode_getname(vp
);
2976 xattrfile_setattr(dvp
, vname
, &va
, ctx
, thread_safe
);
2980 vnode_putname(vname
);
2984 unlock_fsnode(vp
, &funnel_state
);
2987 * If we have changed any of the things about the file that are likely
2988 * to result in changes to authorization results, blow the vnode auth
2992 VATTR_IS_SUPPORTED(vap
, va_mode
) ||
2993 VATTR_IS_SUPPORTED(vap
, va_uid
) ||
2994 VATTR_IS_SUPPORTED(vap
, va_gid
) ||
2995 VATTR_IS_SUPPORTED(vap
, va_flags
) ||
2996 VATTR_IS_SUPPORTED(vap
, va_acl
) ||
2997 VATTR_IS_SUPPORTED(vap
, va_uuuid
) ||
2998 VATTR_IS_SUPPORTED(vap
, va_guuid
)))
2999 vnode_uncache_authorized_action(vp
, KAUTH_INVALIDATE_CACHED_RIGHTS
);
3011 struct vnop_read_args
{
3012 struct vnodeop_desc
*a_desc
;
3016 vfs_context_t a_context
;
3020 VNOP_READ(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t ctx
)
3023 struct vnop_read_args a
;
3025 int funnel_state
= 0;
3028 ctx
= vfs_context_current();
3031 a
.a_desc
= &vnop_read_desc
;
3034 a
.a_ioflag
= ioflag
;
3036 thread_safe
= THREAD_SAFE_FS(vp
);
3039 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3040 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3041 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3042 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3047 _err
= (*vp
->v_op
[vnop_read_desc
.vdesc_offset
])(&a
);
3050 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3051 unlock_fsnode(vp
, NULL
);
3053 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3065 struct vnop_write_args
{
3066 struct vnodeop_desc
*a_desc
;
3070 vfs_context_t a_context
;
3074 VNOP_WRITE(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t ctx
)
3076 struct vnop_write_args a
;
3079 int funnel_state
= 0;
3082 ctx
= vfs_context_current();
3085 a
.a_desc
= &vnop_write_desc
;
3088 a
.a_ioflag
= ioflag
;
3090 thread_safe
= THREAD_SAFE_FS(vp
);
3093 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3094 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3095 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3096 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3101 _err
= (*vp
->v_op
[vnop_write_desc
.vdesc_offset
])(&a
);
3104 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3105 unlock_fsnode(vp
, NULL
);
3107 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3119 struct vnop_ioctl_args
{
3120 struct vnodeop_desc
*a_desc
;
3125 vfs_context_t a_context
;
3129 VNOP_IOCTL(vnode_t vp
, u_long command
, caddr_t data
, int fflag
, vfs_context_t ctx
)
3132 struct vnop_ioctl_args a
;
3134 int funnel_state
= 0;
3137 ctx
= vfs_context_current();
3140 if (vfs_context_is64bit(ctx
)) {
3141 if (!vnode_vfs64bitready(vp
)) {
3146 a
.a_desc
= &vnop_ioctl_desc
;
3148 a
.a_command
= command
;
3152 thread_safe
= THREAD_SAFE_FS(vp
);
3155 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3156 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3157 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3158 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3163 _err
= (*vp
->v_op
[vnop_ioctl_desc
.vdesc_offset
])(&a
);
3165 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3166 unlock_fsnode(vp
, NULL
);
3168 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3180 struct vnop_select_args
{
3181 struct vnodeop_desc
*a_desc
;
3186 vfs_context_t a_context
;
3190 VNOP_SELECT(vnode_t vp
, int which
, int fflags
, void * wql
, vfs_context_t ctx
)
3193 struct vnop_select_args a
;
3195 int funnel_state
= 0;
3198 ctx
= vfs_context_current();
3200 a
.a_desc
= &vnop_select_desc
;
3203 a
.a_fflags
= fflags
;
3206 thread_safe
= THREAD_SAFE_FS(vp
);
3209 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3210 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3211 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3212 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3217 _err
= (*vp
->v_op
[vnop_select_desc
.vdesc_offset
])(&a
);
3219 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3220 unlock_fsnode(vp
, NULL
);
3222 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3231 *#% exchange fvp L L L
3232 *#% exchange tvp L L L
3235 struct vnop_exchange_args
{
3236 struct vnodeop_desc
*a_desc
;
3240 vfs_context_t a_context
;
3244 VNOP_EXCHANGE(vnode_t fvp
, vnode_t tvp
, int options
, vfs_context_t ctx
)
3247 struct vnop_exchange_args a
;
3249 int funnel_state
= 0;
3250 vnode_t lock_first
= NULL
, lock_second
= NULL
;
3252 a
.a_desc
= &vnop_exchange_desc
;
3255 a
.a_options
= options
;
3257 thread_safe
= THREAD_SAFE_FS(fvp
);
3261 * Lock in vnode address order to avoid deadlocks
3270 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) ) {
3273 if ( (_err
= lock_fsnode(lock_second
, NULL
)) ) {
3274 unlock_fsnode(lock_first
, &funnel_state
);
3278 _err
= (*fvp
->v_op
[vnop_exchange_desc
.vdesc_offset
])(&a
);
3280 unlock_fsnode(lock_second
, NULL
);
3281 unlock_fsnode(lock_first
, &funnel_state
);
3293 struct vnop_revoke_args
{
3294 struct vnodeop_desc
*a_desc
;
3297 vfs_context_t a_context
;
3301 VNOP_REVOKE(vnode_t vp
, int flags
, vfs_context_t ctx
)
3303 struct vnop_revoke_args a
;
3306 int funnel_state
= 0;
3308 a
.a_desc
= &vnop_revoke_desc
;
3312 thread_safe
= THREAD_SAFE_FS(vp
);
3315 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3317 _err
= (*vp
->v_op
[vnop_revoke_desc
.vdesc_offset
])(&a
);
3319 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3331 struct vnop_mmap_args
{
3332 struct vnodeop_desc
*a_desc
;
3335 vfs_context_t a_context
;
3339 VNOP_MMAP(vnode_t vp
, int fflags
, vfs_context_t ctx
)
3342 struct vnop_mmap_args a
;
3344 int funnel_state
= 0;
3346 a
.a_desc
= &vnop_mmap_desc
;
3348 a
.a_fflags
= fflags
;
3350 thread_safe
= THREAD_SAFE_FS(vp
);
3353 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3357 _err
= (*vp
->v_op
[vnop_mmap_desc
.vdesc_offset
])(&a
);
3359 unlock_fsnode(vp
, &funnel_state
);
3368 *# mnomap - vp U U U
3371 struct vnop_mnomap_args
{
3372 struct vnodeop_desc
*a_desc
;
3374 vfs_context_t a_context
;
3378 VNOP_MNOMAP(vnode_t vp
, vfs_context_t ctx
)
3381 struct vnop_mnomap_args a
;
3383 int funnel_state
= 0;
3385 a
.a_desc
= &vnop_mnomap_desc
;
3388 thread_safe
= THREAD_SAFE_FS(vp
);
3391 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3395 _err
= (*vp
->v_op
[vnop_mnomap_desc
.vdesc_offset
])(&a
);
3397 unlock_fsnode(vp
, &funnel_state
);
3409 struct vnop_fsync_args
{
3410 struct vnodeop_desc
*a_desc
;
3413 vfs_context_t a_context
;
3417 VNOP_FSYNC(vnode_t vp
, int waitfor
, vfs_context_t ctx
)
3419 struct vnop_fsync_args a
;
3422 int funnel_state
= 0;
3424 a
.a_desc
= &vnop_fsync_desc
;
3426 a
.a_waitfor
= waitfor
;
3428 thread_safe
= THREAD_SAFE_FS(vp
);
3431 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3435 _err
= (*vp
->v_op
[vnop_fsync_desc
.vdesc_offset
])(&a
);
3437 unlock_fsnode(vp
, &funnel_state
);
3446 *#% remove dvp L U U
3450 struct vnop_remove_args
{
3451 struct vnodeop_desc
*a_desc
;
3454 struct componentname
*a_cnp
;
3456 vfs_context_t a_context
;
3460 VNOP_REMOVE(vnode_t dvp
, vnode_t vp
, struct componentname
* cnp
, int flags
, vfs_context_t ctx
)
3463 struct vnop_remove_args a
;
3465 int funnel_state
= 0;
3467 a
.a_desc
= &vnop_remove_desc
;
3473 thread_safe
= THREAD_SAFE_FS(dvp
);
3476 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3480 _err
= (*dvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&a
);
3483 vnode_setneedinactive(vp
);
3485 if ( !(NATIVE_XATTR(dvp
)) ) {
3487 * Remove any associated extended attribute file (._ AppleDouble file).
3489 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, thread_safe
, 1);
3493 unlock_fsnode(vp
, &funnel_state
);
3506 struct vnop_link_args
{
3507 struct vnodeop_desc
*a_desc
;
3510 struct componentname
*a_cnp
;
3511 vfs_context_t a_context
;
3515 VNOP_LINK(vnode_t vp
, vnode_t tdvp
, struct componentname
* cnp
, vfs_context_t ctx
)
3518 struct vnop_link_args a
;
3520 int funnel_state
= 0;
3523 * For file systems with non-native extended attributes,
3524 * disallow linking to an existing "._" Apple Double file.
3526 if ( !NATIVE_XATTR(tdvp
) && (vp
->v_type
== VREG
)) {
3529 vname
= vnode_getname(vp
);
3530 if (vname
!= NULL
) {
3532 if (vname
[0] == '.' && vname
[1] == '_' && vname
[2] != '\0') {
3535 vnode_putname(vname
);
3540 a
.a_desc
= &vnop_link_desc
;
3545 thread_safe
= THREAD_SAFE_FS(vp
);
3548 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3552 _err
= (*tdvp
->v_op
[vnop_link_desc
.vdesc_offset
])(&a
);
3554 unlock_fsnode(vp
, &funnel_state
);
3563 *#% rename fdvp U U U
3564 *#% rename fvp U U U
3565 *#% rename tdvp L U U
3566 *#% rename tvp X U U
3569 struct vnop_rename_args
{
3570 struct vnodeop_desc
*a_desc
;
3573 struct componentname
*a_fcnp
;
3576 struct componentname
*a_tcnp
;
3577 vfs_context_t a_context
;
3581 VNOP_RENAME(struct vnode
*fdvp
, struct vnode
*fvp
, struct componentname
*fcnp
,
3582 struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
3586 struct vnop_rename_args a
;
3587 int funnel_state
= 0;
3588 char smallname1
[48];
3589 char smallname2
[48];
3590 char *xfromname
= NULL
;
3591 char *xtoname
= NULL
;
3592 vnode_t lock_first
= NULL
, lock_second
= NULL
;
3593 vnode_t fdvp_unsafe
= NULLVP
;
3594 vnode_t tdvp_unsafe
= NULLVP
;
3596 a
.a_desc
= &vnop_rename_desc
;
3605 if (!THREAD_SAFE_FS(fdvp
))
3607 if (!THREAD_SAFE_FS(tdvp
))
3610 if (fdvp_unsafe
!= NULLVP
) {
3612 * Lock parents in vnode address order to avoid deadlocks
3613 * note that it's possible for the fdvp to be unsafe,
3614 * but the tdvp to be safe because tvp could be a directory
3615 * in the root of a filesystem... in that case, tdvp is the
3616 * in the filesystem that this root is mounted on
3618 if (tdvp_unsafe
== NULL
|| fdvp_unsafe
== tdvp_unsafe
) {
3619 lock_first
= fdvp_unsafe
;
3621 } else if (fdvp_unsafe
< tdvp_unsafe
) {
3622 lock_first
= fdvp_unsafe
;
3623 lock_second
= tdvp_unsafe
;
3625 lock_first
= tdvp_unsafe
;
3626 lock_second
= fdvp_unsafe
;
3628 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) )
3631 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
3632 unlock_fsnode(lock_first
, &funnel_state
);
3637 * Lock both children in vnode address order to avoid deadlocks
3639 if (tvp
== NULL
|| tvp
== fvp
) {
3642 } else if (fvp
< tvp
) {
3649 if ( (_err
= lock_fsnode(lock_first
, NULL
)) )
3652 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
3653 unlock_fsnode(lock_first
, NULL
);
3658 * Save source and destination names (._ AppleDouble files).
3659 * Skip if source already has a "._" prefix.
3661 if (!NATIVE_XATTR(fdvp
) &&
3662 !(fcnp
->cn_nameptr
[0] == '.' && fcnp
->cn_nameptr
[1] == '_')) {
3665 /* Get source attribute file name. */
3666 len
= fcnp
->cn_namelen
+ 3;
3667 if (len
> sizeof(smallname1
)) {
3668 MALLOC(xfromname
, char *, len
, M_TEMP
, M_WAITOK
);
3670 xfromname
= &smallname1
[0];
3672 strlcpy(xfromname
, "._", min(sizeof smallname1
, len
));
3673 strncat(xfromname
, fcnp
->cn_nameptr
, fcnp
->cn_namelen
);
3674 xfromname
[len
-1] = '\0';
3676 /* Get destination attribute file name. */
3677 len
= tcnp
->cn_namelen
+ 3;
3678 if (len
> sizeof(smallname2
)) {
3679 MALLOC(xtoname
, char *, len
, M_TEMP
, M_WAITOK
);
3681 xtoname
= &smallname2
[0];
3683 strlcpy(xtoname
, "._", min(sizeof smallname2
, len
));
3684 strncat(xtoname
, tcnp
->cn_nameptr
, tcnp
->cn_namelen
);
3685 xtoname
[len
-1] = '\0';
3688 _err
= (*fdvp
->v_op
[vnop_rename_desc
.vdesc_offset
])(&a
);
3690 if (fdvp_unsafe
!= NULLVP
) {
3691 if (lock_second
!= NULL
)
3692 unlock_fsnode(lock_second
, NULL
);
3693 unlock_fsnode(lock_first
, NULL
);
3696 if (tvp
&& tvp
!= fvp
)
3697 vnode_setneedinactive(tvp
);
3701 * Rename any associated extended attribute file (._ AppleDouble file).
3703 if (_err
== 0 && !NATIVE_XATTR(fdvp
) && xfromname
!= NULL
) {
3704 struct nameidata fromnd
, tond
;
3709 * Get source attribute file vnode.
3710 * Note that fdvp already has an iocount reference and
3711 * using DELETE will take an additional reference.
3713 NDINIT(&fromnd
, DELETE
, NOFOLLOW
| USEDVP
| CN_NBMOUNTLOOK
, UIO_SYSSPACE
,
3714 CAST_USER_ADDR_T(xfromname
), ctx
);
3715 fromnd
.ni_dvp
= fdvp
;
3716 error
= namei(&fromnd
);
3719 /* When source doesn't exist there still may be a destination. */
3720 if (error
== ENOENT
) {
3725 } else if (fromnd
.ni_vp
->v_type
!= VREG
) {
3726 vnode_put(fromnd
.ni_vp
);
3731 struct vnop_remove_args args
;
3734 * Get destination attribute file vnode.
3735 * Note that tdvp already has an iocount reference.
3737 NDINIT(&tond
, DELETE
, NOFOLLOW
| USEDVP
| CN_NBMOUNTLOOK
, UIO_SYSSPACE
,
3738 CAST_USER_ADDR_T(xtoname
), ctx
);
3740 error
= namei(&tond
);
3744 if (tond
.ni_vp
->v_type
!= VREG
) {
3745 vnode_put(tond
.ni_vp
);
3749 args
.a_desc
= &vnop_remove_desc
;
3751 args
.a_vp
= tond
.ni_vp
;
3752 args
.a_cnp
= &tond
.ni_cnd
;
3753 args
.a_context
= ctx
;
3755 if (fdvp_unsafe
!= NULLVP
)
3756 error
= lock_fsnode(tond
.ni_vp
, NULL
);
3758 error
= (*tdvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&args
);
3760 if (fdvp_unsafe
!= NULLVP
)
3761 unlock_fsnode(tond
.ni_vp
, NULL
);
3764 vnode_setneedinactive(tond
.ni_vp
);
3766 vnode_put(tond
.ni_vp
);
3772 * Get destination attribute file vnode.
3774 NDINIT(&tond
, RENAME
,
3775 NOCACHE
| NOFOLLOW
| USEDVP
| CN_NBMOUNTLOOK
, UIO_SYSSPACE
,
3776 CAST_USER_ADDR_T(xtoname
), ctx
);
3778 error
= namei(&tond
);
3781 vnode_put(fromnd
.ni_vp
);
3785 a
.a_desc
= &vnop_rename_desc
;
3787 a
.a_fvp
= fromnd
.ni_vp
;
3788 a
.a_fcnp
= &fromnd
.ni_cnd
;
3790 a
.a_tvp
= tond
.ni_vp
;
3791 a
.a_tcnp
= &tond
.ni_cnd
;
3794 if (fdvp_unsafe
!= NULLVP
) {
3796 * Lock in vnode address order to avoid deadlocks
3798 if (tond
.ni_vp
== NULL
|| tond
.ni_vp
== fromnd
.ni_vp
) {
3799 lock_first
= fromnd
.ni_vp
;
3801 } else if (fromnd
.ni_vp
< tond
.ni_vp
) {
3802 lock_first
= fromnd
.ni_vp
;
3803 lock_second
= tond
.ni_vp
;
3805 lock_first
= tond
.ni_vp
;
3806 lock_second
= fromnd
.ni_vp
;
3808 if ( (error
= lock_fsnode(lock_first
, NULL
)) == 0) {
3809 if (lock_second
!= NULL
&& (error
= lock_fsnode(lock_second
, NULL
)) )
3810 unlock_fsnode(lock_first
, NULL
);
3817 /* Save these off so we can later verify them (fix up below) */
3818 oname
= fromnd
.ni_vp
->v_name
;
3819 oparent
= fromnd
.ni_vp
->v_parent
;
3821 error
= (*fdvp
->v_op
[vnop_rename_desc
.vdesc_offset
])(&a
);
3823 if (fdvp_unsafe
!= NULLVP
) {
3824 if (lock_second
!= NULL
)
3825 unlock_fsnode(lock_second
, NULL
);
3826 unlock_fsnode(lock_first
, NULL
);
3829 vnode_setneedinactive(fromnd
.ni_vp
);
3831 if (tond
.ni_vp
&& tond
.ni_vp
!= fromnd
.ni_vp
)
3832 vnode_setneedinactive(tond
.ni_vp
);
3834 * Fix up name & parent pointers on ._ file
3836 if (oname
== fromnd
.ni_vp
->v_name
&&
3837 oparent
== fromnd
.ni_vp
->v_parent
) {
3840 update_flags
= VNODE_UPDATE_NAME
;
3843 update_flags
|= VNODE_UPDATE_PARENT
;
3845 vnode_update_identity(fromnd
.ni_vp
, tdvp
,
3846 tond
.ni_cnd
.cn_nameptr
,
3847 tond
.ni_cnd
.cn_namelen
,
3848 tond
.ni_cnd
.cn_hash
,
3853 vnode_put(fromnd
.ni_vp
);
3855 vnode_put(tond
.ni_vp
);
3861 if (xfromname
&& xfromname
!= &smallname1
[0]) {
3862 FREE(xfromname
, M_TEMP
);
3864 if (xtoname
&& xtoname
!= &smallname2
[0]) {
3865 FREE(xtoname
, M_TEMP
);
3868 if (fdvp_unsafe
!= NULLVP
) {
3869 if (tdvp_unsafe
!= NULLVP
)
3870 unlock_fsnode(tdvp_unsafe
, NULL
);
3871 unlock_fsnode(fdvp_unsafe
, &funnel_state
);
3883 struct vnop_mkdir_args
{
3884 struct vnodeop_desc
*a_desc
;
3887 struct componentname
*a_cnp
;
3888 struct vnode_attr
*a_vap
;
3889 vfs_context_t a_context
;
3893 VNOP_MKDIR(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
3894 struct vnode_attr
*vap
, vfs_context_t ctx
)
3897 struct vnop_mkdir_args a
;
3899 int funnel_state
= 0;
3901 a
.a_desc
= &vnop_mkdir_desc
;
3907 thread_safe
= THREAD_SAFE_FS(dvp
);
3910 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3914 _err
= (*dvp
->v_op
[vnop_mkdir_desc
.vdesc_offset
])(&a
);
3915 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
3917 * Remove stale Apple Double file (if any).
3919 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, thread_safe
, 0);
3922 unlock_fsnode(dvp
, &funnel_state
);
3935 struct vnop_rmdir_args
{
3936 struct vnodeop_desc
*a_desc
;
3939 struct componentname
*a_cnp
;
3940 vfs_context_t a_context
;
3945 VNOP_RMDIR(struct vnode
*dvp
, struct vnode
*vp
, struct componentname
*cnp
, vfs_context_t ctx
)
3948 struct vnop_rmdir_args a
;
3950 int funnel_state
= 0;
3952 a
.a_desc
= &vnop_rmdir_desc
;
3957 thread_safe
= THREAD_SAFE_FS(dvp
);
3960 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3964 _err
= (*vp
->v_op
[vnop_rmdir_desc
.vdesc_offset
])(&a
);
3967 vnode_setneedinactive(vp
);
3969 if ( !(NATIVE_XATTR(dvp
)) ) {
3971 * Remove any associated extended attribute file (._ AppleDouble file).
3973 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, thread_safe
, 1);
3977 unlock_fsnode(vp
, &funnel_state
);
3983 * Remove a ._ AppleDouble file
3985 #define AD_STALE_SECS (180)
3987 xattrfile_remove(vnode_t dvp
, const char * basename
, vfs_context_t ctx
, int thread_safe
, int force
) {
3989 struct nameidata nd
;
3991 char *filename
= NULL
;
3994 if ((basename
== NULL
) || (basename
[0] == '\0') ||
3995 (basename
[0] == '.' && basename
[1] == '_')) {
3998 filename
= &smallname
[0];
3999 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
4000 if (len
>= sizeof(smallname
)) {
4001 len
++; /* snprintf result doesn't include '\0' */
4002 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
4003 len
= snprintf(filename
, len
, "._%s", basename
);
4005 NDINIT(&nd
, DELETE
, WANTPARENT
| LOCKLEAF
| NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
4006 CAST_USER_ADDR_T(filename
), ctx
);
4008 if (namei(&nd
) != 0)
4013 if (xvp
->v_type
!= VREG
)
4017 * When creating a new object and a "._" file already
4018 * exists, check to see if its a stale "._" file.
4022 struct vnode_attr va
;
4025 VATTR_WANTED(&va
, va_data_size
);
4026 VATTR_WANTED(&va
, va_modify_time
);
4027 if (VNOP_GETATTR(xvp
, &va
, ctx
) == 0 &&
4028 VATTR_IS_SUPPORTED(&va
, va_data_size
) &&
4029 VATTR_IS_SUPPORTED(&va
, va_modify_time
) &&
4030 va
.va_data_size
!= 0) {
4034 if ((tv
.tv_sec
> va
.va_modify_time
.tv_sec
) &&
4035 (tv
.tv_sec
- va
.va_modify_time
.tv_sec
) > AD_STALE_SECS
) {
4036 force
= 1; /* must be stale */
4041 struct vnop_remove_args a
;
4044 a
.a_desc
= &vnop_remove_desc
;
4045 a
.a_dvp
= nd
.ni_dvp
;
4047 a
.a_cnp
= &nd
.ni_cnd
;
4051 if ( (lock_fsnode(xvp
, NULL
)) )
4054 error
= (*dvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&a
);
4057 unlock_fsnode(xvp
, NULL
);
4060 vnode_setneedinactive(xvp
);
4066 if (filename
&& filename
!= &smallname
[0]) {
4067 FREE(filename
, M_TEMP
);
4072 * Shadow uid/gid/mod to a ._ AppleDouble file
4075 xattrfile_setattr(vnode_t dvp
, const char * basename
, struct vnode_attr
* vap
,
4076 vfs_context_t ctx
, int thread_safe
) {
4078 struct nameidata nd
;
4080 char *filename
= NULL
;
4083 if ((dvp
== NULLVP
) ||
4084 (basename
== NULL
) || (basename
[0] == '\0') ||
4085 (basename
[0] == '.' && basename
[1] == '_')) {
4088 filename
= &smallname
[0];
4089 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
4090 if (len
>= sizeof(smallname
)) {
4091 len
++; /* snprintf result doesn't include '\0' */
4092 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
4093 len
= snprintf(filename
, len
, "._%s", basename
);
4095 NDINIT(&nd
, LOOKUP
, NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
4096 CAST_USER_ADDR_T(filename
), ctx
);
4098 if (namei(&nd
) != 0)
4104 if (xvp
->v_type
== VREG
) {
4105 struct vnop_setattr_args a
;
4107 a
.a_desc
= &vnop_setattr_desc
;
4113 if ( (lock_fsnode(xvp
, NULL
)) )
4116 (void) (*xvp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
4118 unlock_fsnode(xvp
, NULL
);
4124 if (filename
&& filename
!= &smallname
[0]) {
4125 FREE(filename
, M_TEMP
);
4132 *#% symlink dvp L U U
4133 *#% symlink vpp - U -
4136 struct vnop_symlink_args
{
4137 struct vnodeop_desc
*a_desc
;
4140 struct componentname
*a_cnp
;
4141 struct vnode_attr
*a_vap
;
4143 vfs_context_t a_context
;
4148 VNOP_SYMLINK(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
4149 struct vnode_attr
*vap
, char *target
, vfs_context_t ctx
)
4152 struct vnop_symlink_args a
;
4154 int funnel_state
= 0;
4156 a
.a_desc
= &vnop_symlink_desc
;
4161 a
.a_target
= target
;
4163 thread_safe
= THREAD_SAFE_FS(dvp
);
4166 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
4170 _err
= (*dvp
->v_op
[vnop_symlink_desc
.vdesc_offset
])(&a
);
4171 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
4173 * Remove stale Apple Double file (if any).
4175 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, thread_safe
, 0);
4178 unlock_fsnode(dvp
, &funnel_state
);
4186 *#% readdir vp L L L
4189 struct vnop_readdir_args
{
4190 struct vnodeop_desc
*a_desc
;
4196 vfs_context_t a_context
;
4201 VNOP_READDIR(struct vnode
*vp
, struct uio
*uio
, int flags
, int *eofflag
,
4202 int *numdirent
, vfs_context_t ctx
)
4205 struct vnop_readdir_args a
;
4207 int funnel_state
= 0;
4209 a
.a_desc
= &vnop_readdir_desc
;
4213 a
.a_eofflag
= eofflag
;
4214 a
.a_numdirent
= numdirent
;
4216 thread_safe
= THREAD_SAFE_FS(vp
);
4219 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4223 _err
= (*vp
->v_op
[vnop_readdir_desc
.vdesc_offset
])(&a
);
4225 unlock_fsnode(vp
, &funnel_state
);
4233 *#% readdirattr vp L L L
4236 struct vnop_readdirattr_args
{
4237 struct vnodeop_desc
*a_desc
;
4239 struct attrlist
*a_alist
;
4245 u_long
*a_actualcount
;
4246 vfs_context_t a_context
;
4251 VNOP_READDIRATTR(struct vnode
*vp
, struct attrlist
*alist
, struct uio
*uio
, u_long maxcount
,
4252 u_long options
, u_long
*newstate
, int *eofflag
, u_long
*actualcount
, vfs_context_t ctx
)
4255 struct vnop_readdirattr_args a
;
4257 int funnel_state
= 0;
4259 a
.a_desc
= &vnop_readdirattr_desc
;
4263 a
.a_maxcount
= maxcount
;
4264 a
.a_options
= options
;
4265 a
.a_newstate
= newstate
;
4266 a
.a_eofflag
= eofflag
;
4267 a
.a_actualcount
= actualcount
;
4269 thread_safe
= THREAD_SAFE_FS(vp
);
4272 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4276 _err
= (*vp
->v_op
[vnop_readdirattr_desc
.vdesc_offset
])(&a
);
4278 unlock_fsnode(vp
, &funnel_state
);
4286 *#% readlink vp L L L
4289 struct vnop_readlink_args
{
4290 struct vnodeop_desc
*a_desc
;
4293 vfs_context_t a_context
;
4298 * Returns: 0 Success
4299 * lock_fsnode:ENOENT No such file or directory [only for VFS
4300 * that is not thread safe & vnode is
4301 * currently being/has been terminated]
4302 * <vfs_readlink>:EINVAL
4303 * <vfs_readlink>:???
4305 * Note: The return codes from the underlying VFS's readlink routine
4306 * can't be fully enumerated here, since third party VFS authors
4307 * may not limit their error returns to the ones documented here,
4308 * even though this may result in some programs functioning
4311 * The return codes documented above are those which may currently
4312 * be returned by HFS from hfs_vnop_readlink, not including
4313 * additional error code which may be propagated from underlying
4317 VNOP_READLINK(struct vnode
*vp
, struct uio
*uio
, vfs_context_t ctx
)
4320 struct vnop_readlink_args a
;
4322 int funnel_state
= 0;
4324 a
.a_desc
= &vnop_readlink_desc
;
4328 thread_safe
= THREAD_SAFE_FS(vp
);
4331 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4335 _err
= (*vp
->v_op
[vnop_readlink_desc
.vdesc_offset
])(&a
);
4337 unlock_fsnode(vp
, &funnel_state
);
4345 *#% inactive vp L U U
4348 struct vnop_inactive_args
{
4349 struct vnodeop_desc
*a_desc
;
4351 vfs_context_t a_context
;
4355 VNOP_INACTIVE(struct vnode
*vp
, vfs_context_t ctx
)
4358 struct vnop_inactive_args a
;
4360 int funnel_state
= 0;
4362 a
.a_desc
= &vnop_inactive_desc
;
4365 thread_safe
= THREAD_SAFE_FS(vp
);
4368 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4372 _err
= (*vp
->v_op
[vnop_inactive_desc
.vdesc_offset
])(&a
);
4374 unlock_fsnode(vp
, &funnel_state
);
4378 /* For file systems that do not support namedstreams natively, mark
4379 * the shadow stream file vnode to be recycled as soon as the last
4380 * reference goes away. To avoid re-entering reclaim code, do not
4381 * call recycle on terminating named stream vnodes.
4383 if (vnode_isnamedstream(vp
) &&
4384 (vp
->v_parent
!= NULLVP
) &&
4385 (vnode_isshadow(vp
)) &&
4386 ((vp
->v_lflag
& VL_TERMINATE
) == 0)) {
4398 *#% reclaim vp U U U
4401 struct vnop_reclaim_args
{
4402 struct vnodeop_desc
*a_desc
;
4404 vfs_context_t a_context
;
4408 VNOP_RECLAIM(struct vnode
*vp
, vfs_context_t ctx
)
4411 struct vnop_reclaim_args a
;
4413 int funnel_state
= 0;
4415 a
.a_desc
= &vnop_reclaim_desc
;
4418 thread_safe
= THREAD_SAFE_FS(vp
);
4421 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4423 _err
= (*vp
->v_op
[vnop_reclaim_desc
.vdesc_offset
])(&a
);
4425 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4432 * Returns: 0 Success
4433 * lock_fsnode:ENOENT No such file or directory [only for VFS
4434 * that is not thread safe & vnode is
4435 * currently being/has been terminated]
4436 * <vnop_pathconf_desc>:??? [per FS implementation specific]
4441 *#% pathconf vp L L L
4444 struct vnop_pathconf_args
{
4445 struct vnodeop_desc
*a_desc
;
4448 register_t
*a_retval
;
4449 vfs_context_t a_context
;
4453 VNOP_PATHCONF(struct vnode
*vp
, int name
, register_t
*retval
, vfs_context_t ctx
)
4456 struct vnop_pathconf_args a
;
4458 int funnel_state
= 0;
4460 a
.a_desc
= &vnop_pathconf_desc
;
4463 a
.a_retval
= retval
;
4465 thread_safe
= THREAD_SAFE_FS(vp
);
4468 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4472 _err
= (*vp
->v_op
[vnop_pathconf_desc
.vdesc_offset
])(&a
);
4474 unlock_fsnode(vp
, &funnel_state
);
4480 * Returns: 0 Success
4481 * err_advlock:ENOTSUP
4483 * <vnop_advlock_desc>:???
4485 * Notes: VFS implementations of advisory locking using calls through
4486 * <vnop_advlock_desc> because lock enforcement does not occur
4487 * locally should try to limit themselves to the return codes
4488 * documented above for lf_advlock and err_advlock.
4493 *#% advlock vp U U U
4496 struct vnop_advlock_args
{
4497 struct vnodeop_desc
*a_desc
;
4503 vfs_context_t a_context
;
4507 VNOP_ADVLOCK(struct vnode
*vp
, caddr_t id
, int op
, struct flock
*fl
, int flags
, vfs_context_t ctx
)
4510 struct vnop_advlock_args a
;
4512 int funnel_state
= 0;
4513 struct uthread
* uth
;
4515 a
.a_desc
= &vnop_advlock_desc
;
4522 thread_safe
= THREAD_SAFE_FS(vp
);
4524 uth
= get_bsdthread_info(current_thread());
4526 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4528 /* Disallow advisory locking on non-seekable vnodes */
4529 if (vnode_isfifo(vp
)) {
4530 _err
= err_advlock(&a
);
4532 if ((vp
->v_flag
& VLOCKLOCAL
)) {
4533 /* Advisory locking done at this layer */
4534 _err
= lf_advlock(&a
);
4536 /* Advisory locking done by underlying filesystem */
4537 _err
= (*vp
->v_op
[vnop_advlock_desc
.vdesc_offset
])(&a
);
4541 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4551 *#% allocate vp L L L
4554 struct vnop_allocate_args
{
4555 struct vnodeop_desc
*a_desc
;
4559 off_t
*a_bytesallocated
;
4561 vfs_context_t a_context
;
4566 VNOP_ALLOCATE(struct vnode
*vp
, off_t length
, u_int32_t flags
, off_t
*bytesallocated
, off_t offset
, vfs_context_t ctx
)
4569 struct vnop_allocate_args a
;
4571 int funnel_state
= 0;
4573 a
.a_desc
= &vnop_allocate_desc
;
4575 a
.a_length
= length
;
4577 a
.a_bytesallocated
= bytesallocated
;
4578 a
.a_offset
= offset
;
4580 thread_safe
= THREAD_SAFE_FS(vp
);
4583 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4587 _err
= (*vp
->v_op
[vnop_allocate_desc
.vdesc_offset
])(&a
);
4589 unlock_fsnode(vp
, &funnel_state
);
4600 struct vnop_pagein_args
{
4601 struct vnodeop_desc
*a_desc
;
4604 vm_offset_t a_pl_offset
;
4608 vfs_context_t a_context
;
4612 VNOP_PAGEIN(struct vnode
*vp
, upl_t pl
, vm_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t ctx
)
4615 struct vnop_pagein_args a
;
4617 int funnel_state
= 0;
4619 a
.a_desc
= &vnop_pagein_desc
;
4622 a
.a_pl_offset
= pl_offset
;
4623 a
.a_f_offset
= f_offset
;
4627 thread_safe
= THREAD_SAFE_FS(vp
);
4630 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4632 _err
= (*vp
->v_op
[vnop_pagein_desc
.vdesc_offset
])(&a
);
4634 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4642 *#% pageout vp = = =
4645 struct vnop_pageout_args
{
4646 struct vnodeop_desc
*a_desc
;
4649 vm_offset_t a_pl_offset
;
4653 vfs_context_t a_context
;
4658 VNOP_PAGEOUT(struct vnode
*vp
, upl_t pl
, vm_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t ctx
)
4661 struct vnop_pageout_args a
;
4663 int funnel_state
= 0;
4665 a
.a_desc
= &vnop_pageout_desc
;
4668 a
.a_pl_offset
= pl_offset
;
4669 a
.a_f_offset
= f_offset
;
4673 thread_safe
= THREAD_SAFE_FS(vp
);
4676 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4678 _err
= (*vp
->v_op
[vnop_pageout_desc
.vdesc_offset
])(&a
);
4680 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4689 *#% searchfs vp L L L
4692 struct vnop_searchfs_args
{
4693 struct vnodeop_desc
*a_desc
;
4695 void *a_searchparams1
;
4696 void *a_searchparams2
;
4697 struct attrlist
*a_searchattrs
;
4698 u_long a_maxmatches
;
4699 struct timeval
*a_timelimit
;
4700 struct attrlist
*a_returnattrs
;
4701 u_long
*a_nummatches
;
4702 u_long a_scriptcode
;
4705 struct searchstate
*a_searchstate
;
4706 vfs_context_t a_context
;
4711 VNOP_SEARCHFS(struct vnode
*vp
, void *searchparams1
, void *searchparams2
, struct attrlist
*searchattrs
, u_long maxmatches
, struct timeval
*timelimit
, struct attrlist
*returnattrs
, u_long
*nummatches
, u_long scriptcode
, u_long options
, struct uio
*uio
, struct searchstate
*searchstate
, vfs_context_t ctx
)
4714 struct vnop_searchfs_args a
;
4716 int funnel_state
= 0;
4718 a
.a_desc
= &vnop_searchfs_desc
;
4720 a
.a_searchparams1
= searchparams1
;
4721 a
.a_searchparams2
= searchparams2
;
4722 a
.a_searchattrs
= searchattrs
;
4723 a
.a_maxmatches
= maxmatches
;
4724 a
.a_timelimit
= timelimit
;
4725 a
.a_returnattrs
= returnattrs
;
4726 a
.a_nummatches
= nummatches
;
4727 a
.a_scriptcode
= scriptcode
;
4728 a
.a_options
= options
;
4730 a
.a_searchstate
= searchstate
;
4732 thread_safe
= THREAD_SAFE_FS(vp
);
4735 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4739 _err
= (*vp
->v_op
[vnop_searchfs_desc
.vdesc_offset
])(&a
);
4741 unlock_fsnode(vp
, &funnel_state
);
4749 *#% copyfile fvp U U U
4750 *#% copyfile tdvp L U U
4751 *#% copyfile tvp X U U
4754 struct vnop_copyfile_args
{
4755 struct vnodeop_desc
*a_desc
;
4759 struct componentname
*a_tcnp
;
4762 vfs_context_t a_context
;
4766 VNOP_COPYFILE(struct vnode
*fvp
, struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
4767 int mode
, int flags
, vfs_context_t ctx
)
4770 struct vnop_copyfile_args a
;
4771 a
.a_desc
= &vnop_copyfile_desc
;
4779 _err
= (*fvp
->v_op
[vnop_copyfile_desc
.vdesc_offset
])(&a
);
4784 VNOP_GETXATTR(vnode_t vp
, const char *name
, uio_t uio
, size_t *size
, int options
, vfs_context_t ctx
)
4786 struct vnop_getxattr_args a
;
4789 int funnel_state
= 0;
4791 a
.a_desc
= &vnop_getxattr_desc
;
4796 a
.a_options
= options
;
4799 thread_safe
= THREAD_SAFE_FS(vp
);
4801 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4805 error
= (*vp
->v_op
[vnop_getxattr_desc
.vdesc_offset
])(&a
);
4807 unlock_fsnode(vp
, &funnel_state
);
4813 VNOP_SETXATTR(vnode_t vp
, const char *name
, uio_t uio
, int options
, vfs_context_t ctx
)
4815 struct vnop_setxattr_args a
;
4818 int funnel_state
= 0;
4820 a
.a_desc
= &vnop_setxattr_desc
;
4824 a
.a_options
= options
;
4827 thread_safe
= THREAD_SAFE_FS(vp
);
4829 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4833 error
= (*vp
->v_op
[vnop_setxattr_desc
.vdesc_offset
])(&a
);
4835 unlock_fsnode(vp
, &funnel_state
);
4838 vnode_uncache_authorized_action(vp
, KAUTH_INVALIDATE_CACHED_RIGHTS
);
4843 VNOP_REMOVEXATTR(vnode_t vp
, const char *name
, int options
, vfs_context_t ctx
)
4845 struct vnop_removexattr_args a
;
4848 int funnel_state
= 0;
4850 a
.a_desc
= &vnop_removexattr_desc
;
4853 a
.a_options
= options
;
4856 thread_safe
= THREAD_SAFE_FS(vp
);
4858 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4862 error
= (*vp
->v_op
[vnop_removexattr_desc
.vdesc_offset
])(&a
);
4864 unlock_fsnode(vp
, &funnel_state
);
4870 VNOP_LISTXATTR(vnode_t vp
, uio_t uio
, size_t *size
, int options
, vfs_context_t ctx
)
4872 struct vnop_listxattr_args a
;
4875 int funnel_state
= 0;
4877 a
.a_desc
= &vnop_listxattr_desc
;
4881 a
.a_options
= options
;
4884 thread_safe
= THREAD_SAFE_FS(vp
);
4886 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4890 error
= (*vp
->v_op
[vnop_listxattr_desc
.vdesc_offset
])(&a
);
4892 unlock_fsnode(vp
, &funnel_state
);
4901 *#% blktooff vp = = =
4904 struct vnop_blktooff_args
{
4905 struct vnodeop_desc
*a_desc
;
4912 VNOP_BLKTOOFF(struct vnode
*vp
, daddr64_t lblkno
, off_t
*offset
)
4915 struct vnop_blktooff_args a
;
4917 int funnel_state
= 0;
4919 a
.a_desc
= &vnop_blktooff_desc
;
4921 a
.a_lblkno
= lblkno
;
4922 a
.a_offset
= offset
;
4923 thread_safe
= THREAD_SAFE_FS(vp
);
4926 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4928 _err
= (*vp
->v_op
[vnop_blktooff_desc
.vdesc_offset
])(&a
);
4930 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4938 *#% offtoblk vp = = =
4941 struct vnop_offtoblk_args
{
4942 struct vnodeop_desc
*a_desc
;
4945 daddr64_t
*a_lblkno
;
4949 VNOP_OFFTOBLK(struct vnode
*vp
, off_t offset
, daddr64_t
*lblkno
)
4952 struct vnop_offtoblk_args a
;
4954 int funnel_state
= 0;
4956 a
.a_desc
= &vnop_offtoblk_desc
;
4958 a
.a_offset
= offset
;
4959 a
.a_lblkno
= lblkno
;
4960 thread_safe
= THREAD_SAFE_FS(vp
);
4963 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4965 _err
= (*vp
->v_op
[vnop_offtoblk_desc
.vdesc_offset
])(&a
);
4967 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4975 *#% blockmap vp L L L
4978 struct vnop_blockmap_args
{
4979 struct vnodeop_desc
*a_desc
;
4987 vfs_context_t a_context
;
4991 VNOP_BLOCKMAP(struct vnode
*vp
, off_t foffset
, size_t size
, daddr64_t
*bpn
, size_t *run
, void *poff
, int flags
, vfs_context_t ctx
)
4994 struct vnop_blockmap_args a
;
4996 int funnel_state
= 0;
4999 ctx
= vfs_context_current();
5001 a
.a_desc
= &vnop_blockmap_desc
;
5003 a
.a_foffset
= foffset
;
5010 thread_safe
= THREAD_SAFE_FS(vp
);
5013 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5015 _err
= (*vp
->v_op
[vnop_blockmap_desc
.vdesc_offset
])(&a
);
5017 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5023 struct vnop_strategy_args
{
5024 struct vnodeop_desc
*a_desc
;
5030 VNOP_STRATEGY(struct buf
*bp
)
5033 struct vnop_strategy_args a
;
5034 a
.a_desc
= &vnop_strategy_desc
;
5036 _err
= (*buf_vnode(bp
)->v_op
[vnop_strategy_desc
.vdesc_offset
])(&a
);
5041 struct vnop_bwrite_args
{
5042 struct vnodeop_desc
*a_desc
;
5047 VNOP_BWRITE(struct buf
*bp
)
5050 struct vnop_bwrite_args a
;
5051 a
.a_desc
= &vnop_bwrite_desc
;
5053 _err
= (*buf_vnode(bp
)->v_op
[vnop_bwrite_desc
.vdesc_offset
])(&a
);
5058 struct vnop_kqfilt_add_args
{
5059 struct vnodeop_desc
*a_desc
;
5062 vfs_context_t a_context
;
5066 VNOP_KQFILT_ADD(struct vnode
*vp
, struct knote
*kn
, vfs_context_t ctx
)
5069 struct vnop_kqfilt_add_args a
;
5071 int funnel_state
= 0;
5073 a
.a_desc
= VDESC(vnop_kqfilt_add
);
5077 thread_safe
= THREAD_SAFE_FS(vp
);
5080 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5084 _err
= (*vp
->v_op
[vnop_kqfilt_add_desc
.vdesc_offset
])(&a
);
5086 unlock_fsnode(vp
, &funnel_state
);
5092 struct vnop_kqfilt_remove_args
{
5093 struct vnodeop_desc
*a_desc
;
5096 vfs_context_t a_context
;
5100 VNOP_KQFILT_REMOVE(struct vnode
*vp
, uintptr_t ident
, vfs_context_t ctx
)
5103 struct vnop_kqfilt_remove_args a
;
5105 int funnel_state
= 0;
5107 a
.a_desc
= VDESC(vnop_kqfilt_remove
);
5111 thread_safe
= THREAD_SAFE_FS(vp
);
5114 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5118 _err
= (*vp
->v_op
[vnop_kqfilt_remove_desc
.vdesc_offset
])(&a
);
5120 unlock_fsnode(vp
, &funnel_state
);
5126 struct vnop_setlabel_args
{
5127 struct vnodeop_desc
*a_desc
;
5130 vfs_context_t a_context
;
5134 VNOP_SETLABEL(struct vnode
*vp
, struct label
*label
, vfs_context_t ctx
)
5137 struct vnop_setlabel_args a
;
5139 int funnel_state
= 0;
5141 a
.a_desc
= VDESC(vnop_setlabel
);
5145 thread_safe
= THREAD_SAFE_FS(vp
);
5148 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5152 _err
= (*vp
->v_op
[vnop_setlabel_desc
.vdesc_offset
])(&a
);
5154 unlock_fsnode(vp
, &funnel_state
);
5162 * Get a named streamed
5165 VNOP_GETNAMEDSTREAM(vnode_t vp
, vnode_t
*svpp
, const char *name
, enum nsoperation operation
, int flags
, vfs_context_t ctx
)
5167 struct vnop_getnamedstream_args a
;
5169 if (!THREAD_SAFE_FS(vp
))
5171 a
.a_desc
= &vnop_getnamedstream_desc
;
5175 a
.a_operation
= operation
;
5179 return (*vp
->v_op
[vnop_getnamedstream_desc
.vdesc_offset
])(&a
);
5183 * Create a named streamed
5186 VNOP_MAKENAMEDSTREAM(vnode_t vp
, vnode_t
*svpp
, const char *name
, int flags
, vfs_context_t ctx
)
5188 struct vnop_makenamedstream_args a
;
5190 if (!THREAD_SAFE_FS(vp
))
5192 a
.a_desc
= &vnop_makenamedstream_desc
;
5199 return (*vp
->v_op
[vnop_makenamedstream_desc
.vdesc_offset
])(&a
);
5204 * Remove a named streamed
5207 VNOP_REMOVENAMEDSTREAM(vnode_t vp
, vnode_t svp
, const char *name
, int flags
, vfs_context_t ctx
)
5209 struct vnop_removenamedstream_args a
;
5211 if (!THREAD_SAFE_FS(vp
))
5213 a
.a_desc
= &vnop_removenamedstream_desc
;
5220 return (*vp
->v_op
[vnop_removenamedstream_desc
.vdesc_offset
])(&a
);