2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
76 * External virtual filesystem routines
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
87 #include <sys/vnode_internal.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
92 #include <sys/errno.h>
93 #include <sys/malloc.h>
94 #include <sys/domain.h>
96 #include <sys/syslog.h>
99 #include <sys/sysctl.h>
100 #include <sys/filedesc.h>
101 #include <sys/fsevents.h>
102 #include <sys/user.h>
103 #include <sys/lockf.h>
104 #include <sys/xattr.h>
106 #include <kern/assert.h>
107 #include <kern/kalloc.h>
108 #include <kern/task.h>
110 #include <libkern/OSByteOrder.h>
112 #include <miscfs/specfs/specdev.h>
114 #include <mach/mach_types.h>
115 #include <mach/memory_object_types.h>
116 #include <mach/task.h>
119 #include <security/mac_framework.h>
129 #define THREAD_SAFE_FS(VP) \
130 ((VP)->v_unsafefs ? 0 : 1)
132 #define NATIVE_XATTR(VP) \
133 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
135 static void xattrfile_remove(vnode_t dvp
, const char *basename
,
136 vfs_context_t ctx
, int thread_safe
, int force
);
137 static void xattrfile_setattr(vnode_t dvp
, const char * basename
,
138 struct vnode_attr
* vap
, vfs_context_t ctx
,
143 vnode_setneedinactive(vnode_t vp
)
148 vp
->v_lflag
|= VL_NEEDINACTIVE
;
154 lock_fsnode(vnode_t vp
, int *funnel_state
)
157 *funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
159 if (vp
->v_unsafefs
) {
160 if (vp
->v_unsafefs
->fsnodeowner
== current_thread()) {
161 vp
->v_unsafefs
->fsnode_count
++;
163 lck_mtx_lock(&vp
->v_unsafefs
->fsnodelock
);
165 if (vp
->v_lflag
& (VL_TERMWANT
| VL_TERMINATE
| VL_DEAD
)) {
166 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
169 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
172 vp
->v_unsafefs
->fsnodeowner
= current_thread();
173 vp
->v_unsafefs
->fsnode_count
= 1;
181 unlock_fsnode(vnode_t vp
, int *funnel_state
)
183 if (vp
->v_unsafefs
) {
184 if (--vp
->v_unsafefs
->fsnode_count
== 0) {
185 vp
->v_unsafefs
->fsnodeowner
= NULL
;
186 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
190 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
195 /* ====================================================================== */
196 /* ************ EXTERNAL KERNEL APIS ********************************** */
197 /* ====================================================================== */
200 * prototypes for exported VFS operations
203 VFS_MOUNT(mount_t mp
, vnode_t devvp
, user_addr_t data
, vfs_context_t ctx
)
207 int funnel_state
= 0;
209 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_mount
== 0))
212 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
216 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
219 if (vfs_context_is64bit(ctx
)) {
220 if (vfs_64bitready(mp
)) {
221 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, ctx
);
228 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, ctx
);
232 (void) thread_funnel_set(kernel_flock
, funnel_state
);
238 VFS_START(mount_t mp
, int flags
, vfs_context_t ctx
)
242 int funnel_state
= 0;
244 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_start
== 0))
247 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
250 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
252 error
= (*mp
->mnt_op
->vfs_start
)(mp
, flags
, ctx
);
254 (void) thread_funnel_set(kernel_flock
, funnel_state
);
260 VFS_UNMOUNT(mount_t mp
, int flags
, vfs_context_t ctx
)
264 int funnel_state
= 0;
266 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_unmount
== 0))
269 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
272 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
274 error
= (*mp
->mnt_op
->vfs_unmount
)(mp
, flags
, ctx
);
276 (void) thread_funnel_set(kernel_flock
, funnel_state
);
283 * ENOTSUP Not supported
287 * Note: The return codes from the underlying VFS's root routine can't
288 * be fully enumerated here, since third party VFS authors may not
289 * limit their error returns to the ones documented here, even
290 * though this may result in some programs functioning incorrectly.
292 * The return codes documented above are those which may currently
293 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
294 * for a call to hfs_vget on the volume mount poit, not including
295 * additional error codes which may be propagated from underlying
296 * routines called by hfs_vget.
299 VFS_ROOT(mount_t mp
, struct vnode
** vpp
, vfs_context_t ctx
)
303 int funnel_state
= 0;
305 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_root
== 0))
309 ctx
= vfs_context_current();
311 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
314 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
316 error
= (*mp
->mnt_op
->vfs_root
)(mp
, vpp
, ctx
);
318 (void) thread_funnel_set(kernel_flock
, funnel_state
);
324 VFS_QUOTACTL(mount_t mp
, int cmd
, uid_t uid
, caddr_t datap
, vfs_context_t ctx
)
328 int funnel_state
= 0;
330 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_quotactl
== 0))
333 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
336 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
338 error
= (*mp
->mnt_op
->vfs_quotactl
)(mp
, cmd
, uid
, datap
, ctx
);
340 (void) thread_funnel_set(kernel_flock
, funnel_state
);
346 VFS_GETATTR(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
350 int funnel_state
= 0;
352 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_getattr
== 0))
356 ctx
= vfs_context_current();
359 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
362 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
364 error
= (*mp
->mnt_op
->vfs_getattr
)(mp
, vfa
, ctx
);
366 (void) thread_funnel_set(kernel_flock
, funnel_state
);
372 VFS_SETATTR(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
376 int funnel_state
= 0;
378 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_setattr
== 0))
382 ctx
= vfs_context_current();
385 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
388 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
390 error
= (*mp
->mnt_op
->vfs_setattr
)(mp
, vfa
, ctx
);
392 (void) thread_funnel_set(kernel_flock
, funnel_state
);
398 VFS_SYNC(mount_t mp
, int flags
, vfs_context_t ctx
)
402 int funnel_state
= 0;
404 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_sync
== 0))
408 ctx
= vfs_context_current();
410 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
413 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
415 error
= (*mp
->mnt_op
->vfs_sync
)(mp
, flags
, ctx
);
417 (void) thread_funnel_set(kernel_flock
, funnel_state
);
423 VFS_VGET(mount_t mp
, ino64_t ino
, struct vnode
**vpp
, vfs_context_t ctx
)
427 int funnel_state
= 0;
429 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_vget
== 0))
433 ctx
= vfs_context_current();
435 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
438 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
440 error
= (*mp
->mnt_op
->vfs_vget
)(mp
, ino
, vpp
, ctx
);
442 (void) thread_funnel_set(kernel_flock
, funnel_state
);
448 VFS_FHTOVP(mount_t mp
, int fhlen
, unsigned char * fhp
, vnode_t
* vpp
, vfs_context_t ctx
)
452 int funnel_state
= 0;
454 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_fhtovp
== 0))
458 ctx
= vfs_context_current();
460 thread_safe
= mp
->mnt_vtable
->vfc_threadsafe
;
463 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
465 error
= (*mp
->mnt_op
->vfs_fhtovp
)(mp
, fhlen
, fhp
, vpp
, ctx
);
467 (void) thread_funnel_set(kernel_flock
, funnel_state
);
473 VFS_VPTOFH(struct vnode
* vp
, int *fhlenp
, unsigned char * fhp
, vfs_context_t ctx
)
477 int funnel_state
= 0;
479 if ((vp
->v_mount
== dead_mountp
) || (vp
->v_mount
->mnt_op
->vfs_vptofh
== 0))
483 ctx
= vfs_context_current();
485 thread_safe
= THREAD_SAFE_FS(vp
);
488 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
490 error
= (*vp
->v_mount
->mnt_op
->vfs_vptofh
)(vp
, fhlenp
, fhp
, ctx
);
492 (void) thread_funnel_set(kernel_flock
, funnel_state
);
498 /* returns a copy of vfs type name for the mount_t */
500 vfs_name(mount_t mp
, char * buffer
)
502 strncpy(buffer
, mp
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
505 /* returns vfs type number for the mount_t */
507 vfs_typenum(mount_t mp
)
509 return(mp
->mnt_vtable
->vfc_typenum
);
513 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
515 vfs_flags(mount_t mp
)
517 return((uint64_t)(mp
->mnt_flag
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
)));
520 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
522 vfs_setflags(mount_t mp
, uint64_t flags
)
524 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
527 mp
->mnt_flag
|= lflags
;
531 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
533 vfs_clearflags(mount_t mp
, uint64_t flags
)
535 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
538 mp
->mnt_flag
&= ~lflags
;
542 /* Is the mount_t ronly and upgrade read/write requested? */
544 vfs_iswriteupgrade(mount_t mp
) /* ronly && MNTK_WANTRDWR */
546 return ((mp
->mnt_flag
& MNT_RDONLY
) && (mp
->mnt_kern_flag
& MNTK_WANTRDWR
));
550 /* Is the mount_t mounted ronly */
552 vfs_isrdonly(mount_t mp
)
554 return (mp
->mnt_flag
& MNT_RDONLY
);
557 /* Is the mount_t mounted for filesystem synchronous writes? */
559 vfs_issynchronous(mount_t mp
)
561 return (mp
->mnt_flag
& MNT_SYNCHRONOUS
);
564 /* Is the mount_t mounted read/write? */
566 vfs_isrdwr(mount_t mp
)
568 return ((mp
->mnt_flag
& MNT_RDONLY
) == 0);
572 /* Is mount_t marked for update (ie MNT_UPDATE) */
574 vfs_isupdate(mount_t mp
)
576 return (mp
->mnt_flag
& MNT_UPDATE
);
580 /* Is mount_t marked for reload (ie MNT_RELOAD) */
582 vfs_isreload(mount_t mp
)
584 return ((mp
->mnt_flag
& MNT_UPDATE
) && (mp
->mnt_flag
& MNT_RELOAD
));
587 /* Is mount_t marked for reload (ie MNT_FORCE) */
589 vfs_isforce(mount_t mp
)
591 if ((mp
->mnt_lflag
& MNT_LFORCE
) || (mp
->mnt_kern_flag
& MNTK_FRCUNMOUNT
))
598 vfs_64bitready(mount_t mp
)
600 if ((mp
->mnt_vtable
->vfc_64bitready
))
608 vfs_authcache_ttl(mount_t mp
)
610 if ( (mp
->mnt_kern_flag
& (MNTK_AUTH_OPAQUE
| MNTK_AUTH_CACHE_TTL
)) )
611 return (mp
->mnt_authcache_ttl
);
613 return (CACHED_RIGHT_INFINITE_TTL
);
617 vfs_setauthcache_ttl(mount_t mp
, int ttl
)
620 mp
->mnt_kern_flag
|= MNTK_AUTH_CACHE_TTL
;
621 mp
->mnt_authcache_ttl
= ttl
;
626 vfs_clearauthcache_ttl(mount_t mp
)
629 mp
->mnt_kern_flag
&= ~MNTK_AUTH_CACHE_TTL
;
631 * back to the default TTL value in case
632 * MNTK_AUTH_OPAQUE is set on this mount
634 mp
->mnt_authcache_ttl
= CACHED_LOOKUP_RIGHT_TTL
;
639 vfs_markdependency(mount_t mp
)
641 proc_t p
= current_proc();
643 mp
->mnt_dependent_process
= p
;
644 mp
->mnt_dependent_pid
= proc_pid(p
);
650 vfs_authopaque(mount_t mp
)
652 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE
))
659 vfs_authopaqueaccess(mount_t mp
)
661 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE_ACCESS
))
668 vfs_setauthopaque(mount_t mp
)
671 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE
;
676 vfs_setauthopaqueaccess(mount_t mp
)
679 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE_ACCESS
;
684 vfs_clearauthopaque(mount_t mp
)
687 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE
;
692 vfs_clearauthopaqueaccess(mount_t mp
)
695 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE_ACCESS
;
700 vfs_setextendedsecurity(mount_t mp
)
703 mp
->mnt_kern_flag
|= MNTK_EXTENDED_SECURITY
;
708 vfs_clearextendedsecurity(mount_t mp
)
711 mp
->mnt_kern_flag
&= ~MNTK_EXTENDED_SECURITY
;
716 vfs_extendedsecurity(mount_t mp
)
718 return(mp
->mnt_kern_flag
& MNTK_EXTENDED_SECURITY
);
721 /* returns the max size of short symlink in this mount_t */
723 vfs_maxsymlen(mount_t mp
)
725 return(mp
->mnt_maxsymlinklen
);
728 /* set max size of short symlink on mount_t */
730 vfs_setmaxsymlen(mount_t mp
, uint32_t symlen
)
732 mp
->mnt_maxsymlinklen
= symlen
;
735 /* return a pointer to the RO vfs_statfs associated with mount_t */
737 vfs_statfs(mount_t mp
)
739 return(&mp
->mnt_vfsstat
);
743 vfs_getattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
747 if ((error
= VFS_GETATTR(mp
, vfa
, ctx
)) != 0)
751 * If we have a filesystem create time, use it to default some others.
753 if (VFSATTR_IS_SUPPORTED(vfa
, f_create_time
)) {
754 if (VFSATTR_IS_ACTIVE(vfa
, f_modify_time
) && !VFSATTR_IS_SUPPORTED(vfa
, f_modify_time
))
755 VFSATTR_RETURN(vfa
, f_modify_time
, vfa
->f_create_time
);
762 vfs_setattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
766 if (vfs_isrdonly(mp
))
769 error
= VFS_SETATTR(mp
, vfa
, ctx
);
772 * If we had alternate ways of setting vfs attributes, we'd
779 /* return the private data handle stored in mount_t */
781 vfs_fsprivate(mount_t mp
)
783 return(mp
->mnt_data
);
786 /* set the private data handle in mount_t */
788 vfs_setfsprivate(mount_t mp
, void *mntdata
)
791 mp
->mnt_data
= mntdata
;
797 * return the block size of the underlying
798 * device associated with mount_t
801 vfs_devblocksize(mount_t mp
) {
803 return(mp
->mnt_devblocksize
);
808 * return the io attributes associated with mount_t
811 vfs_ioattr(mount_t mp
, struct vfsioattr
*ioattrp
)
814 ioattrp
->io_maxreadcnt
= MAXPHYS
;
815 ioattrp
->io_maxwritecnt
= MAXPHYS
;
816 ioattrp
->io_segreadcnt
= 32;
817 ioattrp
->io_segwritecnt
= 32;
818 ioattrp
->io_maxsegreadsize
= MAXPHYS
;
819 ioattrp
->io_maxsegwritesize
= MAXPHYS
;
820 ioattrp
->io_devblocksize
= DEV_BSIZE
;
821 ioattrp
->io_flags
= 0;
823 ioattrp
->io_maxreadcnt
= mp
->mnt_maxreadcnt
;
824 ioattrp
->io_maxwritecnt
= mp
->mnt_maxwritecnt
;
825 ioattrp
->io_segreadcnt
= mp
->mnt_segreadcnt
;
826 ioattrp
->io_segwritecnt
= mp
->mnt_segwritecnt
;
827 ioattrp
->io_maxsegreadsize
= mp
->mnt_maxsegreadsize
;
828 ioattrp
->io_maxsegwritesize
= mp
->mnt_maxsegwritesize
;
829 ioattrp
->io_devblocksize
= mp
->mnt_devblocksize
;
830 ioattrp
->io_flags
= mp
->mnt_ioflags
;
832 ioattrp
->io_reserved
[0] = NULL
;
833 ioattrp
->io_reserved
[1] = NULL
;
838 * set the IO attributes associated with mount_t
841 vfs_setioattr(mount_t mp
, struct vfsioattr
* ioattrp
)
845 mp
->mnt_maxreadcnt
= ioattrp
->io_maxreadcnt
;
846 mp
->mnt_maxwritecnt
= ioattrp
->io_maxwritecnt
;
847 mp
->mnt_segreadcnt
= ioattrp
->io_segreadcnt
;
848 mp
->mnt_segwritecnt
= ioattrp
->io_segwritecnt
;
849 mp
->mnt_maxsegreadsize
= ioattrp
->io_maxsegreadsize
;
850 mp
->mnt_maxsegwritesize
= ioattrp
->io_maxsegwritesize
;
851 mp
->mnt_devblocksize
= ioattrp
->io_devblocksize
;
852 mp
->mnt_ioflags
= ioattrp
->io_flags
;
856 * Add a new filesystem into the kernel specified in passed in
857 * vfstable structure. It fills in the vnode
858 * dispatch vector that is to be passed to when vnodes are created.
859 * It returns a handle which is to be used to when the FS is to be removed
861 typedef int (*PFI
)(void *);
862 extern int vfs_opv_numops
;
864 vfs_fsadd(struct vfs_fsentry
*vfe
, vfstable_t
* handle
)
867 struct vfstable
*newvfstbl
= NULL
;
869 int (***opv_desc_vector_p
)(void *);
870 int (**opv_desc_vector
)(void *);
871 struct vnodeopv_entry_desc
*opve_descp
;
877 * This routine is responsible for all the initialization that would
878 * ordinarily be done as part of the system startup;
881 if (vfe
== (struct vfs_fsentry
*)0)
884 desccount
= vfe
->vfe_vopcnt
;
885 if ((desccount
<=0) || ((desccount
> 5)) || (vfe
->vfe_vfsops
== (struct vfsops
*)NULL
)
886 || (vfe
->vfe_opvdescs
== (struct vnodeopv_desc
**)NULL
))
890 MALLOC(newvfstbl
, void *, sizeof(struct vfstable
), M_TEMP
,
892 bzero(newvfstbl
, sizeof(struct vfstable
));
893 newvfstbl
->vfc_vfsops
= vfe
->vfe_vfsops
;
894 strncpy(&newvfstbl
->vfc_name
[0], vfe
->vfe_fsname
, MFSNAMELEN
);
895 if ((vfe
->vfe_flags
& VFS_TBLNOTYPENUM
))
896 newvfstbl
->vfc_typenum
= maxvfsconf
++;
898 newvfstbl
->vfc_typenum
= vfe
->vfe_fstypenum
;
900 newvfstbl
->vfc_refcount
= 0;
901 newvfstbl
->vfc_flags
= 0;
902 newvfstbl
->vfc_mountroot
= NULL
;
903 newvfstbl
->vfc_next
= NULL
;
904 newvfstbl
->vfc_threadsafe
= 0;
905 newvfstbl
->vfc_vfsflags
= 0;
906 if (vfe
->vfe_flags
& VFS_TBL64BITREADY
)
907 newvfstbl
->vfc_64bitready
= 1;
908 if (vfe
->vfe_flags
& VFS_TBLTHREADSAFE
)
909 newvfstbl
->vfc_threadsafe
= 1;
910 if (vfe
->vfe_flags
& VFS_TBLFSNODELOCK
)
911 newvfstbl
->vfc_threadsafe
= 1;
912 if ((vfe
->vfe_flags
& VFS_TBLLOCALVOL
) == VFS_TBLLOCALVOL
)
913 newvfstbl
->vfc_flags
|= MNT_LOCAL
;
914 if ((vfe
->vfe_flags
& VFS_TBLLOCALVOL
) && (vfe
->vfe_flags
& VFS_TBLGENERICMNTARGS
) == 0)
915 newvfstbl
->vfc_vfsflags
|= VFC_VFSLOCALARGS
;
917 newvfstbl
->vfc_vfsflags
|= VFC_VFSGENERICARGS
;
919 if (vfe
->vfe_flags
& VFS_TBLNATIVEXATTR
)
920 newvfstbl
->vfc_vfsflags
|= VFC_VFSNATIVEXATTR
;
921 if (vfe
->vfe_flags
& VFS_TBLUNMOUNT_PREFLIGHT
)
922 newvfstbl
->vfc_vfsflags
|= VFC_VFSPREFLIGHT
;
923 if (vfe
->vfe_flags
& VFS_TBLREADDIR_EXTENDED
)
924 newvfstbl
->vfc_vfsflags
|= VFC_VFSREADDIR_EXTENDED
;
925 if (vfe
->vfe_flags
& VFS_TBLNOMACLABEL
)
926 newvfstbl
->vfc_vfsflags
|= VFC_VFSNOMACLABEL
;
929 * Allocate and init the vectors.
930 * Also handle backwards compatibility.
932 * We allocate one large block to hold all <desccount>
933 * vnode operation vectors stored contiguously.
935 /* XXX - shouldn't be M_TEMP */
937 descsize
= desccount
* vfs_opv_numops
* sizeof(PFI
);
938 MALLOC(descptr
, PFI
*, descsize
,
940 bzero(descptr
, descsize
);
942 newvfstbl
->vfc_descptr
= descptr
;
943 newvfstbl
->vfc_descsize
= descsize
;
946 for (i
= 0; i
< desccount
; i
++ ) {
947 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
949 * Fill in the caller's pointer to the start of the i'th vector.
950 * They'll need to supply it when calling vnode_create.
952 opv_desc_vector
= descptr
+ i
* vfs_opv_numops
;
953 *opv_desc_vector_p
= opv_desc_vector
;
955 for (j
= 0; vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
].opve_op
; j
++) {
956 opve_descp
= &(vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
]);
959 * Sanity check: is this operation listed
960 * in the list of operations? We check this
961 * by seeing if its offest is zero. Since
962 * the default routine should always be listed
963 * first, it should be the only one with a zero
964 * offset. Any other operation with a zero
965 * offset is probably not listed in
966 * vfs_op_descs, and so is probably an error.
968 * A panic here means the layer programmer
969 * has committed the all-too common bug
970 * of adding a new operation to the layer's
971 * list of vnode operations but
972 * not adding the operation to the system-wide
973 * list of supported operations.
975 if (opve_descp
->opve_op
->vdesc_offset
== 0 &&
976 opve_descp
->opve_op
->vdesc_offset
!= VOFFSET(vnop_default
)) {
977 printf("vfs_fsadd: operation %s not listed in %s.\n",
978 opve_descp
->opve_op
->vdesc_name
,
980 panic("vfs_fsadd: bad operation");
983 * Fill in this entry.
985 opv_desc_vector
[opve_descp
->opve_op
->vdesc_offset
] =
986 opve_descp
->opve_impl
;
991 * Finally, go back and replace unfilled routines
992 * with their default. (Sigh, an O(n^3) algorithm. I
993 * could make it better, but that'd be work, and n is small.)
995 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
998 * Force every operations vector to have a default routine.
1000 opv_desc_vector
= *opv_desc_vector_p
;
1001 if (opv_desc_vector
[VOFFSET(vnop_default
)] == NULL
)
1002 panic("vfs_fsadd: operation vector without default routine.");
1003 for (j
= 0; j
< vfs_opv_numops
; j
++)
1004 if (opv_desc_vector
[j
] == NULL
)
1005 opv_desc_vector
[j
] =
1006 opv_desc_vector
[VOFFSET(vnop_default
)];
1008 } /* end of each vnodeopv_desc parsing */
1012 *handle
= vfstable_add(newvfstbl
);
1014 if (newvfstbl
->vfc_typenum
<= maxvfsconf
)
1015 maxvfsconf
= newvfstbl
->vfc_typenum
+ 1;
1018 if (newvfstbl
->vfc_vfsops
->vfs_init
)
1019 (*newvfstbl
->vfc_vfsops
->vfs_init
)((struct vfsconf
*)handle
);
1021 FREE(newvfstbl
, M_TEMP
);
1027 * Removes the filesystem from kernel.
1028 * The argument passed in is the handle that was given when
1029 * file system was added
1032 vfs_fsremove(vfstable_t handle
)
1034 struct vfstable
* vfstbl
= (struct vfstable
*)handle
;
1035 void *old_desc
= NULL
;
1038 /* Preflight check for any mounts */
1040 if ( vfstbl
->vfc_refcount
!= 0 ) {
1041 mount_list_unlock();
1044 mount_list_unlock();
1047 * save the old descriptor; the free cannot occur unconditionally,
1048 * since vfstable_del() may fail.
1050 if (vfstbl
->vfc_descptr
&& vfstbl
->vfc_descsize
) {
1051 old_desc
= vfstbl
->vfc_descptr
;
1053 err
= vfstable_del(vfstbl
);
1055 /* free the descriptor if the delete was successful */
1056 if (err
== 0 && old_desc
) {
1057 FREE(old_desc
, M_TEMP
);
1064 * This returns a reference to mount_t
1065 * which should be dropped using vfs_mountrele().
1066 * Not doing so will leak a mountpoint
1067 * and associated data structures.
1070 vfs_mountref(__unused mount_t mp
) /* gives a reference */
1075 /* This drops the reference on mount_t that was acquired */
1077 vfs_mountrele(__unused mount_t mp
) /* drops reference */
1083 vfs_context_pid(vfs_context_t ctx
)
1085 return (proc_pid(vfs_context_proc(ctx
)));
1089 vfs_context_suser(vfs_context_t ctx
)
1091 return (suser(ctx
->vc_ucred
, NULL
));
1095 * XXX Signals should be tied to threads, not processes, for most uses of this
1099 vfs_context_issignal(vfs_context_t ctx
, sigset_t mask
)
1101 proc_t p
= vfs_context_proc(ctx
);
1103 return(proc_pendingsignals(p
, mask
));
1108 vfs_context_is64bit(vfs_context_t ctx
)
1110 proc_t proc
= vfs_context_proc(ctx
);
1113 return(proc_is64bit(proc
));
1121 * Description: Given a vfs_context_t, return the proc_t associated with it.
1123 * Parameters: vfs_context_t The context to use
1125 * Returns: proc_t The process for this context
1127 * Notes: This function will return the current_proc() if any of the
1128 * following conditions are true:
1130 * o The supplied context pointer is NULL
1131 * o There is no Mach thread associated with the context
1132 * o There is no Mach task associated with the Mach thread
1133 * o There is no proc_t associated with the Mach task
1134 * o The proc_t has no per process open file table
1135 * o The proc_t is post-vfork()
1137 * This causes this function to return a value matching as
1138 * closely as possible the previous behaviour, while at the
1139 * same time avoiding the task lending that results from vfork()
1142 vfs_context_proc(vfs_context_t ctx
)
1146 if (ctx
!= NULL
&& ctx
->vc_thread
!= NULL
)
1147 proc
= (proc_t
)get_bsdthreadtask_info(ctx
->vc_thread
);
1148 if (proc
!= NULL
&& (proc
->p_fd
== NULL
|| (proc
->p_lflag
& P_LVFORK
)))
1151 return(proc
== NULL
? current_proc() : proc
);
1155 * vfs_context_get_special_port
1157 * Description: Return the requested special port from the task associated
1158 * with the given context.
1160 * Parameters: vfs_context_t The context to use
1161 * int Index of special port
1162 * ipc_port_t * Pointer to returned port
1164 * Returns: kern_return_t see task_get_special_port()
1167 vfs_context_get_special_port(vfs_context_t ctx
, int which
, ipc_port_t
*portp
)
1171 if (ctx
!= NULL
&& ctx
->vc_thread
!= NULL
)
1172 task
= get_threadtask(ctx
->vc_thread
);
1174 return task_get_special_port(task
, which
, portp
);
1178 * vfs_context_set_special_port
1180 * Description: Set the requested special port in the task associated
1181 * with the given context.
1183 * Parameters: vfs_context_t The context to use
1184 * int Index of special port
1185 * ipc_port_t New special port
1187 * Returns: kern_return_t see task_set_special_port()
1190 vfs_context_set_special_port(vfs_context_t ctx
, int which
, ipc_port_t port
)
1194 if (ctx
!= NULL
&& ctx
->vc_thread
!= NULL
)
1195 task
= get_threadtask(ctx
->vc_thread
);
1197 return task_set_special_port(task
, which
, port
);
1201 * vfs_context_thread
1203 * Description: Return the Mach thread associated with a vfs_context_t
1205 * Parameters: vfs_context_t The context to use
1207 * Returns: thread_t The thread for this context, or
1208 * NULL, if there is not one.
1210 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1211 * as a result of a static vfs_context_t declaration in a function
1212 * and will result in this function returning NULL.
1214 * This is intentional; this function should NOT return the
1215 * current_thread() in this case.
1218 vfs_context_thread(vfs_context_t ctx
)
1220 return(ctx
->vc_thread
);
1227 * Description: Returns a reference on the vnode for the current working
1228 * directory for the supplied context
1230 * Parameters: vfs_context_t The context to use
1232 * Returns: vnode_t The current working directory
1235 * Notes: The function first attempts to obtain the current directory
1236 * from the thread, and if it is not present there, falls back
1237 * to obtaining it from the process instead. If it can't be
1238 * obtained from either place, we return NULLVP.
1241 vfs_context_cwd(vfs_context_t ctx
)
1243 vnode_t cwd
= NULLVP
;
1245 if(ctx
!= NULL
&& ctx
->vc_thread
!= NULL
) {
1246 uthread_t uth
= get_bsdthread_info(ctx
->vc_thread
);
1250 * Get the cwd from the thread; if there isn't one, get it
1251 * from the process, instead.
1253 if ((cwd
= uth
->uu_cdir
) == NULLVP
&&
1254 (proc
= (proc_t
)get_bsdthreadtask_info(ctx
->vc_thread
)) != NULL
&&
1256 cwd
= proc
->p_fd
->fd_cdir
;
1264 vfs_context_create(vfs_context_t ctx
)
1266 vfs_context_t newcontext
;
1268 newcontext
= (vfs_context_t
)kalloc(sizeof(struct vfs_context
));
1271 kauth_cred_t safecred
;
1273 newcontext
->vc_thread
= ctx
->vc_thread
;
1274 safecred
= ctx
->vc_ucred
;
1276 newcontext
->vc_thread
= current_thread();
1277 safecred
= kauth_cred_get();
1279 if (IS_VALID_CRED(safecred
))
1280 kauth_cred_ref(safecred
);
1281 newcontext
->vc_ucred
= safecred
;
1289 vfs_context_current(void)
1291 vfs_context_t ctx
= NULL
;
1292 volatile uthread_t ut
= (uthread_t
)get_bsdthread_info(current_thread());
1295 if (ut
->uu_context
.vc_ucred
!= NULL
) {
1296 ctx
= &ut
->uu_context
;
1300 return(ctx
== NULL
? vfs_context_kernel() : ctx
);
1307 * Dangerous hack - adopt the first kernel thread as the current thread, to
1308 * get to the vfs_context_t in the uthread associated with a kernel thread.
1309 * This is used by UDF to make the call into IOCDMediaBSDClient,
1310 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1311 * ioctl() is being called from kernel or user space (and all this because
1312 * we do not pass threads into our ioctl()'s, instead of processes).
1314 * This is also used by imageboot_setup(), called early from bsd_init() after
1315 * kernproc has been given a credential.
1317 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1318 * of many Mach headers to do the reference directly rather than indirectly;
1319 * we will need to forego this convenience when we reture proc_thread().
1321 static struct vfs_context kerncontext
;
1323 vfs_context_kernel(void)
1325 if (kerncontext
.vc_ucred
== NOCRED
)
1326 kerncontext
.vc_ucred
= kernproc
->p_ucred
;
1327 if (kerncontext
.vc_thread
== NULL
)
1328 kerncontext
.vc_thread
= proc_thread(kernproc
);
1330 return(&kerncontext
);
1335 vfs_context_rele(vfs_context_t ctx
)
1338 if (IS_VALID_CRED(ctx
->vc_ucred
))
1339 kauth_cred_unref(&ctx
->vc_ucred
);
1340 kfree(ctx
, sizeof(struct vfs_context
));
1347 vfs_context_ucred(vfs_context_t ctx
)
1349 return (ctx
->vc_ucred
);
1353 * Return true if the context is owned by the superuser.
1356 vfs_context_issuser(vfs_context_t ctx
)
1358 return(kauth_cred_issuser(vfs_context_ucred(ctx
)));
1362 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1366 * Convert between vnode types and inode formats (since POSIX.1
1367 * defines mode word of stat structure in terms of inode formats).
1370 vnode_iftovt(int mode
)
1372 return(iftovt_tab
[((mode
) & S_IFMT
) >> 12]);
1376 vnode_vttoif(enum vtype indx
)
1378 return(vttoif_tab
[(int)(indx
)]);
1382 vnode_makeimode(int indx
, int mode
)
1384 return (int)(VTTOIF(indx
) | (mode
));
1389 * vnode manipulation functions.
1392 /* returns system root vnode reference; It should be dropped using vrele() */
1398 error
= vnode_get(rootvnode
);
1400 return ((vnode_t
)0);
1407 vnode_vid(vnode_t vp
)
1409 return ((uint32_t)(vp
->v_id
));
1412 /* returns a mount reference; drop it with vfs_mountrelease() */
1414 vnode_mount(vnode_t vp
)
1416 return (vp
->v_mount
);
1419 /* returns a mount reference iff vnode_t is a dir and is a mount point */
1421 vnode_mountedhere(vnode_t vp
)
1425 if ((vp
->v_type
== VDIR
) && ((mp
= vp
->v_mountedhere
) != NULL
) &&
1426 (mp
->mnt_vnodecovered
== vp
))
1429 return (mount_t
)NULL
;
1432 /* returns vnode type of vnode_t */
1434 vnode_vtype(vnode_t vp
)
1436 return (vp
->v_type
);
1439 /* returns FS specific node saved in vnode */
1441 vnode_fsnode(vnode_t vp
)
1443 return (vp
->v_data
);
1447 vnode_clearfsnode(vnode_t vp
)
1453 vnode_specrdev(vnode_t vp
)
1459 /* Accessor functions */
1460 /* is vnode_t a root vnode */
1462 vnode_isvroot(vnode_t vp
)
1464 return ((vp
->v_flag
& VROOT
)? 1 : 0);
1467 /* is vnode_t a system vnode */
1469 vnode_issystem(vnode_t vp
)
1471 return ((vp
->v_flag
& VSYSTEM
)? 1 : 0);
1474 /* is vnode_t a swap file vnode */
1476 vnode_isswap(vnode_t vp
)
1478 return ((vp
->v_flag
& VSWAP
)? 1 : 0);
1481 /* if vnode_t mount operation in progress */
1483 vnode_ismount(vnode_t vp
)
1485 return ((vp
->v_flag
& VMOUNT
)? 1 : 0);
1488 /* is this vnode under recyle now */
1490 vnode_isrecycled(vnode_t vp
)
1494 vnode_lock_spin(vp
);
1495 ret
= (vp
->v_lflag
& (VL_TERMINATE
|VL_DEAD
))? 1 : 0;
1500 /* is vnode_t marked to not keep data cached once it's been consumed */
1502 vnode_isnocache(vnode_t vp
)
1504 return ((vp
->v_flag
& VNOCACHE_DATA
)? 1 : 0);
1508 * has sequential readahead been disabled on this vnode
1511 vnode_isnoreadahead(vnode_t vp
)
1513 return ((vp
->v_flag
& VRAOFF
)? 1 : 0);
1517 vnode_is_openevt(vnode_t vp
)
1519 return ((vp
->v_flag
& VOPENEVT
)? 1 : 0);
1522 /* is vnode_t a standard one? */
1524 vnode_isstandard(vnode_t vp
)
1526 return ((vp
->v_flag
& VSTANDARD
)? 1 : 0);
1529 /* don't vflush() if SKIPSYSTEM */
1531 vnode_isnoflush(vnode_t vp
)
1533 return ((vp
->v_flag
& VNOFLUSH
)? 1 : 0);
1536 /* is vnode_t a regular file */
1538 vnode_isreg(vnode_t vp
)
1540 return ((vp
->v_type
== VREG
)? 1 : 0);
1543 /* is vnode_t a directory? */
1545 vnode_isdir(vnode_t vp
)
1547 return ((vp
->v_type
== VDIR
)? 1 : 0);
1550 /* is vnode_t a symbolic link ? */
1552 vnode_islnk(vnode_t vp
)
1554 return ((vp
->v_type
== VLNK
)? 1 : 0);
1557 /* is vnode_t a fifo ? */
1559 vnode_isfifo(vnode_t vp
)
1561 return ((vp
->v_type
== VFIFO
)? 1 : 0);
1564 /* is vnode_t a block device? */
1566 vnode_isblk(vnode_t vp
)
1568 return ((vp
->v_type
== VBLK
)? 1 : 0);
1571 /* is vnode_t a char device? */
1573 vnode_ischr(vnode_t vp
)
1575 return ((vp
->v_type
== VCHR
)? 1 : 0);
1578 /* is vnode_t a socket? */
1580 vnode_issock(vnode_t vp
)
1582 return ((vp
->v_type
== VSOCK
)? 1 : 0);
1585 /* is vnode_t a named stream? */
1587 vnode_isnamedstream(
1596 return ((vp
->v_flag
& VISNAMEDSTREAM
) ? 1 : 0);
1602 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1604 vnode_setnocache(vnode_t vp
)
1606 vnode_lock_spin(vp
);
1607 vp
->v_flag
|= VNOCACHE_DATA
;
1612 vnode_clearnocache(vnode_t vp
)
1614 vnode_lock_spin(vp
);
1615 vp
->v_flag
&= ~VNOCACHE_DATA
;
1620 vnode_set_openevt(vnode_t vp
)
1622 vnode_lock_spin(vp
);
1623 vp
->v_flag
|= VOPENEVT
;
1628 vnode_clear_openevt(vnode_t vp
)
1630 vnode_lock_spin(vp
);
1631 vp
->v_flag
&= ~VOPENEVT
;
1637 vnode_setnoreadahead(vnode_t vp
)
1639 vnode_lock_spin(vp
);
1640 vp
->v_flag
|= VRAOFF
;
1645 vnode_clearnoreadahead(vnode_t vp
)
1647 vnode_lock_spin(vp
);
1648 vp
->v_flag
&= ~VRAOFF
;
1653 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1655 vnode_setnoflush(vnode_t vp
)
1657 vnode_lock_spin(vp
);
1658 vp
->v_flag
|= VNOFLUSH
;
1663 vnode_clearnoflush(vnode_t vp
)
1665 vnode_lock_spin(vp
);
1666 vp
->v_flag
&= ~VNOFLUSH
;
1671 /* is vnode_t a blkdevice and has a FS mounted on it */
1673 vnode_ismountedon(vnode_t vp
)
1675 return ((vp
->v_specflags
& SI_MOUNTEDON
)? 1 : 0);
1679 vnode_setmountedon(vnode_t vp
)
1681 vnode_lock_spin(vp
);
1682 vp
->v_specflags
|= SI_MOUNTEDON
;
1687 vnode_clearmountedon(vnode_t vp
)
1689 vnode_lock_spin(vp
);
1690 vp
->v_specflags
&= ~SI_MOUNTEDON
;
1696 vnode_settag(vnode_t vp
, int tag
)
1703 vnode_tag(vnode_t vp
)
1709 vnode_parent(vnode_t vp
)
1712 return(vp
->v_parent
);
1716 vnode_setparent(vnode_t vp
, vnode_t dvp
)
1722 vnode_name(vnode_t vp
)
1724 /* we try to keep v_name a reasonable name for the node */
1729 vnode_setname(vnode_t vp
, char * name
)
1734 /* return the registered FS name when adding the FS to kernel */
1736 vnode_vfsname(vnode_t vp
, char * buf
)
1738 strncpy(buf
, vp
->v_mount
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
1741 /* return the FS type number */
1743 vnode_vfstypenum(vnode_t vp
)
1745 return(vp
->v_mount
->mnt_vtable
->vfc_typenum
);
1749 vnode_vfs64bitready(vnode_t vp
)
1752 if ((vp
->v_mount
->mnt_vtable
->vfc_64bitready
))
1760 /* return the visible flags on associated mount point of vnode_t */
1762 vnode_vfsvisflags(vnode_t vp
)
1764 return(vp
->v_mount
->mnt_flag
& MNT_VISFLAGMASK
);
1767 /* return the command modifier flags on associated mount point of vnode_t */
1769 vnode_vfscmdflags(vnode_t vp
)
1771 return(vp
->v_mount
->mnt_flag
& MNT_CMDFLAGS
);
1774 /* return the max symlink of short links of vnode_t */
1776 vnode_vfsmaxsymlen(vnode_t vp
)
1778 return(vp
->v_mount
->mnt_maxsymlinklen
);
1781 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
1783 vnode_vfsstatfs(vnode_t vp
)
1785 return(&vp
->v_mount
->mnt_vfsstat
);
1788 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
1790 vnode_vfsfsprivate(vnode_t vp
)
1792 return(vp
->v_mount
->mnt_data
);
1795 /* is vnode_t in a rdonly mounted FS */
1797 vnode_vfsisrdonly(vnode_t vp
)
1799 return ((vp
->v_mount
->mnt_flag
& MNT_RDONLY
)? 1 : 0);
1804 * Returns vnode ref to current working directory; if a per-thread current
1805 * working directory is in effect, return that instead of the per process one.
1807 * XXX Published, but not used.
1810 current_workingdir(void)
1812 return vfs_context_cwd(vfs_context_current());
1815 /* returns vnode ref to current root(chroot) directory */
1817 current_rootdir(void)
1819 proc_t proc
= current_proc();
1822 if ( (vp
= proc
->p_fd
->fd_rdir
) ) {
1823 if ( (vnode_getwithref(vp
)) )
1830 * Get a filesec and optional acl contents from an extended attribute.
1831 * Function will attempt to retrive ACL, UUID, and GUID information using a
1832 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
1834 * Parameters: vp The vnode on which to operate.
1835 * fsecp The filesec (and ACL, if any) being
1837 * ctx The vnode context in which the
1838 * operation is to be attempted.
1840 * Returns: 0 Success
1843 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
1844 * host byte order, as will be the ACL contents, if any.
1845 * Internally, we will cannonize these values from network (PPC)
1846 * byte order after we retrieve them so that the on-disk contents
1847 * of the extended attribute are identical for both PPC and Intel
1848 * (if we were not being required to provide this service via
1849 * fallback, this would be the job of the filesystem
1850 * 'VNOP_GETATTR' call).
1852 * We use ntohl() because it has a transitive property on Intel
1853 * machines and no effect on PPC mancines. This guarantees us
1855 * XXX: Deleting rather than ignoreing a corrupt security structure is
1856 * probably the only way to reset it without assistance from an
1857 * file system integrity checking tool. Right now we ignore it.
1859 * XXX: We should enummerate the possible errno values here, and where
1860 * in the code they originated.
1863 vnode_get_filesec(vnode_t vp
, kauth_filesec_t
*fsecp
, vfs_context_t ctx
)
1865 kauth_filesec_t fsec
;
1868 size_t xsize
, rsize
;
1870 uint32_t host_fsec_magic
;
1871 uint32_t host_acl_entrycount
;
1877 /* find out how big the EA is */
1878 if (vn_getxattr(vp
, KAUTH_FILESEC_XATTR
, NULL
, &xsize
, XATTR_NOSECURITY
, ctx
) != 0) {
1879 /* no EA, no filesec */
1880 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
1882 /* either way, we are done */
1887 * To be valid, a kauth_filesec_t must be large enough to hold a zero
1888 * ACE entrly ACL, and if it's larger than that, it must have the right
1889 * number of bytes such that it contains an atomic number of ACEs,
1890 * rather than partial entries. Otherwise, we ignore it.
1892 if (!KAUTH_FILESEC_VALID(xsize
)) {
1893 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize
);
1898 /* how many entries would fit? */
1899 fsec_size
= KAUTH_FILESEC_COUNT(xsize
);
1901 /* get buffer and uio */
1902 if (((fsec
= kauth_filesec_alloc(fsec_size
)) == NULL
) ||
1903 ((fsec_uio
= uio_create(1, 0, UIO_SYSSPACE
, UIO_READ
)) == NULL
) ||
1904 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), xsize
)) {
1905 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
1910 /* read security attribute */
1912 if ((error
= vn_getxattr(vp
,
1913 KAUTH_FILESEC_XATTR
,
1919 /* no attribute - no security data */
1920 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
1922 /* either way, we are done */
1927 * Validate security structure; the validation must take place in host
1928 * byte order. If it's corrupt, we will just ignore it.
1931 /* Validate the size before trying to convert it */
1932 if (rsize
< KAUTH_FILESEC_SIZE(0)) {
1933 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize
);
1937 /* Validate the magic number before trying to convert it */
1938 host_fsec_magic
= ntohl(KAUTH_FILESEC_MAGIC
);
1939 if (fsec
->fsec_magic
!= host_fsec_magic
) {
1940 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic
);
1944 /* Validate the entry count before trying to convert it. */
1945 host_acl_entrycount
= ntohl(fsec
->fsec_acl
.acl_entrycount
);
1946 if (host_acl_entrycount
!= KAUTH_FILESEC_NOACL
) {
1947 if (host_acl_entrycount
> KAUTH_ACL_MAX_ENTRIES
) {
1948 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount
);
1951 if (KAUTH_FILESEC_SIZE(host_acl_entrycount
) > rsize
) {
1952 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount
, rsize
);
1957 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST
, fsec
, NULL
);
1964 kauth_filesec_free(fsec
);
1965 if (fsec_uio
!= NULL
)
1973 * Set a filesec and optional acl contents into an extended attribute.
1974 * function will attempt to store ACL, UUID, and GUID information using a
1975 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
1976 * may or may not point to the `fsec->fsec_acl`, depending on whether the
1977 * original caller supplied an acl.
1979 * Parameters: vp The vnode on which to operate.
1980 * fsec The filesec being set.
1981 * acl The acl to be associated with 'fsec'.
1982 * ctx The vnode context in which the
1983 * operation is to be attempted.
1985 * Returns: 0 Success
1988 * Notes: Both the fsec and the acl are always valid.
1990 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
1991 * as are the acl contents, if they are used. Internally, we will
1992 * cannonize these values into network (PPC) byte order before we
1993 * attempt to write them so that the on-disk contents of the
1994 * extended attribute are identical for both PPC and Intel (if we
1995 * were not being required to provide this service via fallback,
1996 * this would be the job of the filesystem 'VNOP_SETATTR' call).
1997 * We reverse this process on the way out, so we leave with the
1998 * same byte order we started with.
2000 * XXX: We should enummerate the possible errno values here, and where
2001 * in the code they originated.
2004 vnode_set_filesec(vnode_t vp
, kauth_filesec_t fsec
, kauth_acl_t acl
, vfs_context_t ctx
)
2008 uint32_t saved_acl_copysize
;
2012 if ((fsec_uio
= uio_create(2, 0, UIO_SYSSPACE
, UIO_WRITE
)) == NULL
) {
2013 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2018 * Save the pre-converted ACL copysize, because it gets swapped too
2019 * if we are running with the wrong endianness.
2021 saved_acl_copysize
= KAUTH_ACL_COPYSIZE(acl
);
2023 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK
, fsec
, acl
);
2025 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), sizeof(struct kauth_filesec
) - sizeof(struct kauth_acl
));
2026 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(acl
), saved_acl_copysize
);
2027 error
= vn_setxattr(vp
,
2028 KAUTH_FILESEC_XATTR
,
2030 XATTR_NOSECURITY
, /* we have auth'ed already */
2032 VFS_DEBUG(ctx
, vp
, "SETATTR - set ACL returning %d", error
);
2034 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST
, fsec
, acl
);
2037 if (fsec_uio
!= NULL
)
2044 * Returns: 0 Success
2045 * ENOMEM Not enough space [only if has filesec]
2047 * vnode_get_filesec: ???
2048 * kauth_cred_guid2uid: ???
2049 * kauth_cred_guid2gid: ???
2050 * vfs_update_vfsstat: ???
2053 vnode_getattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2055 kauth_filesec_t fsec
;
2061 /* don't ask for extended security data if the filesystem doesn't support it */
2062 if (!vfs_extendedsecurity(vnode_mount(vp
))) {
2063 VATTR_CLEAR_ACTIVE(vap
, va_acl
);
2064 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
2065 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
2069 * If the caller wants size values we might have to synthesise, give the
2070 * filesystem the opportunity to supply better intermediate results.
2072 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
2073 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
2074 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
2075 VATTR_SET_ACTIVE(vap
, va_data_size
);
2076 VATTR_SET_ACTIVE(vap
, va_data_alloc
);
2077 VATTR_SET_ACTIVE(vap
, va_total_size
);
2078 VATTR_SET_ACTIVE(vap
, va_total_alloc
);
2081 error
= VNOP_GETATTR(vp
, vap
, ctx
);
2083 KAUTH_DEBUG("ERROR - returning %d", error
);
2088 * If extended security data was requested but not returned, try the fallback
2091 if (VATTR_NOT_RETURNED(vap
, va_acl
) || VATTR_NOT_RETURNED(vap
, va_uuuid
) || VATTR_NOT_RETURNED(vap
, va_guuid
)) {
2094 if ((vp
->v_type
== VDIR
) || (vp
->v_type
== VLNK
) || (vp
->v_type
== VREG
)) {
2095 /* try to get the filesec */
2096 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0)
2099 /* if no filesec, no attributes */
2101 VATTR_RETURN(vap
, va_acl
, NULL
);
2102 VATTR_RETURN(vap
, va_uuuid
, kauth_null_guid
);
2103 VATTR_RETURN(vap
, va_guuid
, kauth_null_guid
);
2106 /* looks good, try to return what we were asked for */
2107 VATTR_RETURN(vap
, va_uuuid
, fsec
->fsec_owner
);
2108 VATTR_RETURN(vap
, va_guuid
, fsec
->fsec_group
);
2110 /* only return the ACL if we were actually asked for it */
2111 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
2112 if (fsec
->fsec_acl
.acl_entrycount
== KAUTH_FILESEC_NOACL
) {
2113 VATTR_RETURN(vap
, va_acl
, NULL
);
2115 facl
= kauth_acl_alloc(fsec
->fsec_acl
.acl_entrycount
);
2117 kauth_filesec_free(fsec
);
2121 bcopy(&fsec
->fsec_acl
, facl
, KAUTH_ACL_COPYSIZE(&fsec
->fsec_acl
));
2122 VATTR_RETURN(vap
, va_acl
, facl
);
2125 kauth_filesec_free(fsec
);
2129 * If someone gave us an unsolicited filesec, toss it. We promise that
2130 * we're OK with a filesystem giving us anything back, but our callers
2131 * only expect what they asked for.
2133 if (VATTR_IS_SUPPORTED(vap
, va_acl
) && !VATTR_IS_ACTIVE(vap
, va_acl
)) {
2134 if (vap
->va_acl
!= NULL
)
2135 kauth_acl_free(vap
->va_acl
);
2136 VATTR_CLEAR_SUPPORTED(vap
, va_acl
);
2139 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2141 * Handle the case where we need a UID/GID, but only have extended
2142 * security information.
2144 if (VATTR_NOT_RETURNED(vap
, va_uid
) &&
2145 VATTR_IS_SUPPORTED(vap
, va_uuuid
) &&
2146 !kauth_guid_equal(&vap
->va_uuuid
, &kauth_null_guid
)) {
2147 if ((error
= kauth_cred_guid2uid(&vap
->va_uuuid
, &nuid
)) == 0)
2148 VATTR_RETURN(vap
, va_uid
, nuid
);
2150 if (VATTR_NOT_RETURNED(vap
, va_gid
) &&
2151 VATTR_IS_SUPPORTED(vap
, va_guuid
) &&
2152 !kauth_guid_equal(&vap
->va_guuid
, &kauth_null_guid
)) {
2153 if ((error
= kauth_cred_guid2gid(&vap
->va_guuid
, &ngid
)) == 0)
2154 VATTR_RETURN(vap
, va_gid
, ngid
);
2159 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2161 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
2162 if (vfs_context_issuser(ctx
) && VATTR_IS_SUPPORTED(vap
, va_uid
)) {
2164 } else if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
2165 nuid
= vp
->v_mount
->mnt_fsowner
;
2166 if (nuid
== KAUTH_UID_NONE
)
2168 } else if (VATTR_IS_SUPPORTED(vap
, va_uid
)) {
2171 /* this will always be something sensible */
2172 nuid
= vp
->v_mount
->mnt_fsowner
;
2174 if ((nuid
== 99) && !vfs_context_issuser(ctx
))
2175 nuid
= kauth_cred_getuid(vfs_context_ucred(ctx
));
2176 VATTR_RETURN(vap
, va_uid
, nuid
);
2178 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
2179 if (vfs_context_issuser(ctx
) && VATTR_IS_SUPPORTED(vap
, va_gid
)) {
2181 } else if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
2182 ngid
= vp
->v_mount
->mnt_fsgroup
;
2183 if (ngid
== KAUTH_GID_NONE
)
2185 } else if (VATTR_IS_SUPPORTED(vap
, va_gid
)) {
2188 /* this will always be something sensible */
2189 ngid
= vp
->v_mount
->mnt_fsgroup
;
2191 if ((ngid
== 99) && !vfs_context_issuser(ctx
))
2192 ngid
= kauth_cred_getgid(vfs_context_ucred(ctx
));
2193 VATTR_RETURN(vap
, va_gid
, ngid
);
2197 * Synthesise some values that can be reasonably guessed.
2199 if (!VATTR_IS_SUPPORTED(vap
, va_iosize
))
2200 VATTR_RETURN(vap
, va_iosize
, vp
->v_mount
->mnt_vfsstat
.f_iosize
);
2202 if (!VATTR_IS_SUPPORTED(vap
, va_flags
))
2203 VATTR_RETURN(vap
, va_flags
, 0);
2205 if (!VATTR_IS_SUPPORTED(vap
, va_filerev
))
2206 VATTR_RETURN(vap
, va_filerev
, 0);
2208 if (!VATTR_IS_SUPPORTED(vap
, va_gen
))
2209 VATTR_RETURN(vap
, va_gen
, 0);
2212 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2214 if (!VATTR_IS_SUPPORTED(vap
, va_data_size
))
2215 VATTR_RETURN(vap
, va_data_size
, 0);
2217 /* do we want any of the possibly-computed values? */
2218 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
2219 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
2220 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
2221 /* make sure f_bsize is valid */
2222 if (vp
->v_mount
->mnt_vfsstat
.f_bsize
== 0) {
2223 if ((error
= vfs_update_vfsstat(vp
->v_mount
, ctx
, VFS_KERNEL_EVENT
)) != 0)
2227 /* default va_data_alloc from va_data_size */
2228 if (!VATTR_IS_SUPPORTED(vap
, va_data_alloc
))
2229 VATTR_RETURN(vap
, va_data_alloc
, roundup(vap
->va_data_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
2231 /* default va_total_size from va_data_size */
2232 if (!VATTR_IS_SUPPORTED(vap
, va_total_size
))
2233 VATTR_RETURN(vap
, va_total_size
, vap
->va_data_size
);
2235 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2236 if (!VATTR_IS_SUPPORTED(vap
, va_total_alloc
))
2237 VATTR_RETURN(vap
, va_total_alloc
, roundup(vap
->va_total_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
2241 * If we don't have a change time, pull it from the modtime.
2243 if (!VATTR_IS_SUPPORTED(vap
, va_change_time
) && VATTR_IS_SUPPORTED(vap
, va_modify_time
))
2244 VATTR_RETURN(vap
, va_change_time
, vap
->va_modify_time
);
2247 * This is really only supported for the creation VNOPs, but since the field is there
2248 * we should populate it correctly.
2250 VATTR_RETURN(vap
, va_type
, vp
->v_type
);
2253 * The fsid can be obtained from the mountpoint directly.
2255 VATTR_RETURN(vap
, va_fsid
, vp
->v_mount
->mnt_vfsstat
.f_fsid
.val
[0]);
2263 * Set the attributes on a vnode in a vnode context.
2265 * Parameters: vp The vnode whose attributes to set.
2266 * vap A pointer to the attributes to set.
2267 * ctx The vnode context in which the
2268 * operation is to be attempted.
2270 * Returns: 0 Success
2273 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2275 * The contents of the data area pointed to by 'vap' may be
2276 * modified if the vnode is on a filesystem which has been
2277 * mounted with ingore ownership flags, or by the underlyng
2278 * VFS itself, or by the fallback code, if the underlying VFS
2279 * does not support ACL, UUID, or GUUID attributes directly.
2281 * XXX: We should enummerate the possible errno values here, and where
2282 * in the code they originated.
2285 vnode_setattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2287 int error
, is_perm_change
=0;
2290 * Make sure the filesystem is mounted R/W.
2291 * If not, return an error.
2293 if (vfs_isrdonly(vp
->v_mount
)) {
2298 /* For streams, va_data_size is the only setable attribute. */
2299 if ((vp
->v_flag
& VISNAMEDSTREAM
) && (vap
->va_active
!= VNODE_ATTR_va_data_size
)) {
2306 * If ownership is being ignored on this volume, we silently discard
2307 * ownership changes.
2309 if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
2310 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
2311 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
2314 if ( VATTR_IS_ACTIVE(vap
, va_uid
) || VATTR_IS_ACTIVE(vap
, va_gid
)
2315 || VATTR_IS_ACTIVE(vap
, va_mode
) || VATTR_IS_ACTIVE(vap
, va_acl
)) {
2320 * Make sure that extended security is enabled if we're going to try
2323 if (!vfs_extendedsecurity(vnode_mount(vp
)) &&
2324 (VATTR_IS_ACTIVE(vap
, va_acl
) || VATTR_IS_ACTIVE(vap
, va_uuuid
) || VATTR_IS_ACTIVE(vap
, va_guuid
))) {
2325 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2330 error
= VNOP_SETATTR(vp
, vap
, ctx
);
2332 if ((error
== 0) && !VATTR_ALL_SUPPORTED(vap
))
2333 error
= vnode_setattr_fallback(vp
, vap
, ctx
);
2336 // only send a stat_changed event if this is more than
2337 // just an access time update
2338 if (error
== 0 && (vap
->va_active
!= VNODE_ATTR_BIT(va_access_time
))) {
2339 if (is_perm_change
) {
2340 if (need_fsevent(FSE_CHOWN
, vp
)) {
2341 add_fsevent(FSE_CHOWN
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
2343 } else if(need_fsevent(FSE_STAT_CHANGED
, vp
)) {
2344 add_fsevent(FSE_STAT_CHANGED
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
2354 * Fallback for setting the attributes on a vnode in a vnode context. This
2355 * Function will attempt to store ACL, UUID, and GUID information utilizing
2356 * a read/modify/write operation against an EA used as a backing store for
2359 * Parameters: vp The vnode whose attributes to set.
2360 * vap A pointer to the attributes to set.
2361 * ctx The vnode context in which the
2362 * operation is to be attempted.
2364 * Returns: 0 Success
2367 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2368 * as are the fsec and lfsec, if they are used.
2370 * The contents of the data area pointed to by 'vap' may be
2371 * modified to indicate that the attribute is supported for
2372 * any given requested attribute.
2374 * XXX: We should enummerate the possible errno values here, and where
2375 * in the code they originated.
2378 vnode_setattr_fallback(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2380 kauth_filesec_t fsec
;
2382 struct kauth_filesec lfsec
;
2388 * Extended security fallback via extended attributes.
2390 * Note that we do not free the filesec; the caller is expected to
2393 if (VATTR_NOT_RETURNED(vap
, va_acl
) ||
2394 VATTR_NOT_RETURNED(vap
, va_uuuid
) ||
2395 VATTR_NOT_RETURNED(vap
, va_guuid
)) {
2396 VFS_DEBUG(ctx
, vp
, "SETATTR - doing filesec fallback");
2399 * Fail for file types that we don't permit extended security
2402 if ((vp
->v_type
!= VDIR
) && (vp
->v_type
!= VLNK
) && (vp
->v_type
!= VREG
)) {
2403 VFS_DEBUG(ctx
, vp
, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp
));
2409 * If we don't have all the extended security items, we need
2410 * to fetch the existing data to perform a read-modify-write
2414 if (!VATTR_IS_ACTIVE(vap
, va_acl
) ||
2415 !VATTR_IS_ACTIVE(vap
, va_uuuid
) ||
2416 !VATTR_IS_ACTIVE(vap
, va_guuid
)) {
2417 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0) {
2418 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error
);
2422 /* if we didn't get a filesec, use our local one */
2424 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2427 KAUTH_DEBUG("SETATTR - updating existing filesec");
2430 facl
= &fsec
->fsec_acl
;
2432 /* if we're using the local filesec, we need to initialise it */
2433 if (fsec
== &lfsec
) {
2434 fsec
->fsec_magic
= KAUTH_FILESEC_MAGIC
;
2435 fsec
->fsec_owner
= kauth_null_guid
;
2436 fsec
->fsec_group
= kauth_null_guid
;
2437 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
2438 facl
->acl_flags
= 0;
2442 * Update with the supplied attributes.
2444 if (VATTR_IS_ACTIVE(vap
, va_uuuid
)) {
2445 KAUTH_DEBUG("SETATTR - updating owner UUID");
2446 fsec
->fsec_owner
= vap
->va_uuuid
;
2447 VATTR_SET_SUPPORTED(vap
, va_uuuid
);
2449 if (VATTR_IS_ACTIVE(vap
, va_guuid
)) {
2450 KAUTH_DEBUG("SETATTR - updating group UUID");
2451 fsec
->fsec_group
= vap
->va_guuid
;
2452 VATTR_SET_SUPPORTED(vap
, va_guuid
);
2454 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
2455 if (vap
->va_acl
== NULL
) {
2456 KAUTH_DEBUG("SETATTR - removing ACL");
2457 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
2459 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap
->va_acl
->acl_entrycount
);
2462 VATTR_SET_SUPPORTED(vap
, va_acl
);
2466 * If the filesec data is all invalid, we can just remove
2467 * the EA completely.
2469 if ((facl
->acl_entrycount
== KAUTH_FILESEC_NOACL
) &&
2470 kauth_guid_equal(&fsec
->fsec_owner
, &kauth_null_guid
) &&
2471 kauth_guid_equal(&fsec
->fsec_group
, &kauth_null_guid
)) {
2472 error
= vn_removexattr(vp
, KAUTH_FILESEC_XATTR
, XATTR_NOSECURITY
, ctx
);
2473 /* no attribute is ok, nothing to delete */
2474 if (error
== ENOATTR
)
2476 VFS_DEBUG(ctx
, vp
, "SETATTR - remove filesec returning %d", error
);
2479 error
= vnode_set_filesec(vp
, fsec
, facl
, ctx
);
2480 VFS_DEBUG(ctx
, vp
, "SETATTR - update filesec returning %d", error
);
2483 /* if we fetched a filesec, dispose of the buffer */
2485 kauth_filesec_free(fsec
);
2493 * Definition of vnode operations.
2499 *#% lookup dvp L ? ?
2500 *#% lookup vpp - L -
2502 struct vnop_lookup_args
{
2503 struct vnodeop_desc
*a_desc
;
2506 struct componentname
*a_cnp
;
2507 vfs_context_t a_context
;
2512 * Returns: 0 Success
2513 * lock_fsnode:ENOENT No such file or directory [only for VFS
2514 * that is not thread safe & vnode is
2515 * currently being/has been terminated]
2516 * <vfs_lookup>:ENAMETOOLONG
2517 * <vfs_lookup>:ENOENT
2518 * <vfs_lookup>:EJUSTRETURN
2519 * <vfs_lookup>:EPERM
2520 * <vfs_lookup>:EISDIR
2521 * <vfs_lookup>:ENOTDIR
2524 * Note: The return codes from the underlying VFS's lookup routine can't
2525 * be fully enumerated here, since third party VFS authors may not
2526 * limit their error returns to the ones documented here, even
2527 * though this may result in some programs functioning incorrectly.
2529 * The return codes documented above are those which may currently
2530 * be returned by HFS from hfs_lookup, not including additional
2531 * error code which may be propagated from underlying routines.
2534 VNOP_LOOKUP(vnode_t dvp
, vnode_t
*vpp
, struct componentname
*cnp
, vfs_context_t ctx
)
2537 struct vnop_lookup_args a
;
2540 int funnel_state
= 0;
2542 a
.a_desc
= &vnop_lookup_desc
;
2547 thread_safe
= THREAD_SAFE_FS(dvp
);
2550 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2554 _err
= (*dvp
->v_op
[vnop_lookup_desc
.vdesc_offset
])(&a
);
2559 if ( (cnp
->cn_flags
& ISLASTCN
) ) {
2560 if ( (cnp
->cn_flags
& LOCKPARENT
) ) {
2561 if ( !(cnp
->cn_flags
& FSNODELOCKHELD
) ) {
2563 * leave the fsnode lock held on
2564 * the directory, but restore the funnel...
2565 * also indicate that we need to drop the
2566 * fsnode_lock when we're done with the
2567 * system call processing for this path
2569 cnp
->cn_flags
|= FSNODELOCKHELD
;
2571 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2576 unlock_fsnode(dvp
, &funnel_state
);
2584 *#% create dvp L L L
2585 *#% create vpp - L -
2589 struct vnop_create_args
{
2590 struct vnodeop_desc
*a_desc
;
2593 struct componentname
*a_cnp
;
2594 struct vnode_attr
*a_vap
;
2595 vfs_context_t a_context
;
2599 VNOP_CREATE(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
2602 struct vnop_create_args a
;
2604 int funnel_state
= 0;
2606 a
.a_desc
= &vnop_create_desc
;
2612 thread_safe
= THREAD_SAFE_FS(dvp
);
2615 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2619 _err
= (*dvp
->v_op
[vnop_create_desc
.vdesc_offset
])(&a
);
2620 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
2622 * Remove stale Apple Double file (if any).
2624 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, thread_safe
, 0);
2627 unlock_fsnode(dvp
, &funnel_state
);
2635 *#% whiteout dvp L L L
2636 *#% whiteout cnp - - -
2637 *#% whiteout flag - - -
2640 struct vnop_whiteout_args
{
2641 struct vnodeop_desc
*a_desc
;
2643 struct componentname
*a_cnp
;
2645 vfs_context_t a_context
;
2649 VNOP_WHITEOUT(vnode_t dvp
, struct componentname
* cnp
, int flags
, vfs_context_t ctx
)
2652 struct vnop_whiteout_args a
;
2654 int funnel_state
= 0;
2656 a
.a_desc
= &vnop_whiteout_desc
;
2661 thread_safe
= THREAD_SAFE_FS(dvp
);
2664 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2668 _err
= (*dvp
->v_op
[vnop_whiteout_desc
.vdesc_offset
])(&a
);
2670 unlock_fsnode(dvp
, &funnel_state
);
2682 struct vnop_mknod_args
{
2683 struct vnodeop_desc
*a_desc
;
2686 struct componentname
*a_cnp
;
2687 struct vnode_attr
*a_vap
;
2688 vfs_context_t a_context
;
2692 VNOP_MKNOD(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
2696 struct vnop_mknod_args a
;
2698 int funnel_state
= 0;
2700 a
.a_desc
= &vnop_mknod_desc
;
2706 thread_safe
= THREAD_SAFE_FS(dvp
);
2709 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
2713 _err
= (*dvp
->v_op
[vnop_mknod_desc
.vdesc_offset
])(&a
);
2715 unlock_fsnode(dvp
, &funnel_state
);
2726 struct vnop_open_args
{
2727 struct vnodeop_desc
*a_desc
;
2730 vfs_context_t a_context
;
2734 VNOP_OPEN(vnode_t vp
, int mode
, vfs_context_t ctx
)
2737 struct vnop_open_args a
;
2739 int funnel_state
= 0;
2742 ctx
= vfs_context_current();
2744 a
.a_desc
= &vnop_open_desc
;
2748 thread_safe
= THREAD_SAFE_FS(vp
);
2751 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2752 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2753 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2754 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2759 _err
= (*vp
->v_op
[vnop_open_desc
.vdesc_offset
])(&a
);
2761 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2762 unlock_fsnode(vp
, NULL
);
2764 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2775 struct vnop_close_args
{
2776 struct vnodeop_desc
*a_desc
;
2779 vfs_context_t a_context
;
2783 VNOP_CLOSE(vnode_t vp
, int fflag
, vfs_context_t ctx
)
2786 struct vnop_close_args a
;
2788 int funnel_state
= 0;
2791 ctx
= vfs_context_current();
2793 a
.a_desc
= &vnop_close_desc
;
2797 thread_safe
= THREAD_SAFE_FS(vp
);
2800 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2801 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2802 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
2803 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2808 _err
= (*vp
->v_op
[vnop_close_desc
.vdesc_offset
])(&a
);
2810 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
2811 unlock_fsnode(vp
, NULL
);
2813 (void) thread_funnel_set(kernel_flock
, funnel_state
);
2824 struct vnop_access_args
{
2825 struct vnodeop_desc
*a_desc
;
2828 vfs_context_t a_context
;
2832 VNOP_ACCESS(vnode_t vp
, int action
, vfs_context_t ctx
)
2835 struct vnop_access_args a
;
2837 int funnel_state
= 0;
2840 ctx
= vfs_context_current();
2842 a
.a_desc
= &vnop_access_desc
;
2844 a
.a_action
= action
;
2846 thread_safe
= THREAD_SAFE_FS(vp
);
2849 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2853 _err
= (*vp
->v_op
[vnop_access_desc
.vdesc_offset
])(&a
);
2855 unlock_fsnode(vp
, &funnel_state
);
2863 *#% getattr vp = = =
2866 struct vnop_getattr_args
{
2867 struct vnodeop_desc
*a_desc
;
2869 struct vnode_attr
*a_vap
;
2870 vfs_context_t a_context
;
2874 VNOP_GETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
2877 struct vnop_getattr_args a
;
2879 int funnel_state
= 0; /* protected by thread_safe */
2881 a
.a_desc
= &vnop_getattr_desc
;
2885 thread_safe
= THREAD_SAFE_FS(vp
);
2888 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2892 _err
= (*vp
->v_op
[vnop_getattr_desc
.vdesc_offset
])(&a
);
2894 unlock_fsnode(vp
, &funnel_state
);
2902 *#% setattr vp L L L
2905 struct vnop_setattr_args
{
2906 struct vnodeop_desc
*a_desc
;
2908 struct vnode_attr
*a_vap
;
2909 vfs_context_t a_context
;
2913 VNOP_SETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
2916 struct vnop_setattr_args a
;
2918 int funnel_state
= 0; /* protected by thread_safe */
2920 a
.a_desc
= &vnop_setattr_desc
;
2924 thread_safe
= THREAD_SAFE_FS(vp
);
2927 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
2931 _err
= (*vp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
2934 * Shadow uid/gid/mod change to extended attribute file.
2936 if (_err
== 0 && !NATIVE_XATTR(vp
)) {
2937 struct vnode_attr va
;
2941 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
2942 VATTR_SET(&va
, va_uid
, vap
->va_uid
);
2945 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
2946 VATTR_SET(&va
, va_gid
, vap
->va_gid
);
2949 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
2950 VATTR_SET(&va
, va_mode
, vap
->va_mode
);
2957 dvp
= vnode_getparent(vp
);
2958 vname
= vnode_getname(vp
);
2960 xattrfile_setattr(dvp
, vname
, &va
, ctx
, thread_safe
);
2964 vnode_putname(vname
);
2968 unlock_fsnode(vp
, &funnel_state
);
2971 * If we have changed any of the things about the file that are likely
2972 * to result in changes to authorization results, blow the vnode auth
2976 VATTR_IS_SUPPORTED(vap
, va_mode
) ||
2977 VATTR_IS_SUPPORTED(vap
, va_uid
) ||
2978 VATTR_IS_SUPPORTED(vap
, va_gid
) ||
2979 VATTR_IS_SUPPORTED(vap
, va_flags
) ||
2980 VATTR_IS_SUPPORTED(vap
, va_acl
) ||
2981 VATTR_IS_SUPPORTED(vap
, va_uuuid
) ||
2982 VATTR_IS_SUPPORTED(vap
, va_guuid
)))
2983 vnode_uncache_authorized_action(vp
, KAUTH_INVALIDATE_CACHED_RIGHTS
);
2995 struct vnop_read_args
{
2996 struct vnodeop_desc
*a_desc
;
3000 vfs_context_t a_context
;
3004 VNOP_READ(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t ctx
)
3007 struct vnop_read_args a
;
3009 int funnel_state
= 0;
3012 ctx
= vfs_context_current();
3015 a
.a_desc
= &vnop_read_desc
;
3018 a
.a_ioflag
= ioflag
;
3020 thread_safe
= THREAD_SAFE_FS(vp
);
3023 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3024 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3025 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3026 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3031 _err
= (*vp
->v_op
[vnop_read_desc
.vdesc_offset
])(&a
);
3034 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3035 unlock_fsnode(vp
, NULL
);
3037 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3049 struct vnop_write_args
{
3050 struct vnodeop_desc
*a_desc
;
3054 vfs_context_t a_context
;
3058 VNOP_WRITE(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t ctx
)
3060 struct vnop_write_args a
;
3063 int funnel_state
= 0;
3066 ctx
= vfs_context_current();
3069 a
.a_desc
= &vnop_write_desc
;
3072 a
.a_ioflag
= ioflag
;
3074 thread_safe
= THREAD_SAFE_FS(vp
);
3077 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3078 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3079 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3080 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3085 _err
= (*vp
->v_op
[vnop_write_desc
.vdesc_offset
])(&a
);
3088 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3089 unlock_fsnode(vp
, NULL
);
3091 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3103 struct vnop_ioctl_args
{
3104 struct vnodeop_desc
*a_desc
;
3109 vfs_context_t a_context
;
3113 VNOP_IOCTL(vnode_t vp
, u_long command
, caddr_t data
, int fflag
, vfs_context_t ctx
)
3116 struct vnop_ioctl_args a
;
3118 int funnel_state
= 0;
3121 ctx
= vfs_context_current();
3124 if (vfs_context_is64bit(ctx
)) {
3125 if (!vnode_vfs64bitready(vp
)) {
3130 a
.a_desc
= &vnop_ioctl_desc
;
3132 a
.a_command
= command
;
3136 thread_safe
= THREAD_SAFE_FS(vp
);
3139 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3140 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3141 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3142 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3147 _err
= (*vp
->v_op
[vnop_ioctl_desc
.vdesc_offset
])(&a
);
3149 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3150 unlock_fsnode(vp
, NULL
);
3152 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3164 struct vnop_select_args
{
3165 struct vnodeop_desc
*a_desc
;
3170 vfs_context_t a_context
;
3174 VNOP_SELECT(vnode_t vp
, int which
, int fflags
, void * wql
, vfs_context_t ctx
)
3177 struct vnop_select_args a
;
3179 int funnel_state
= 0;
3182 ctx
= vfs_context_current();
3184 a
.a_desc
= &vnop_select_desc
;
3187 a
.a_fflags
= fflags
;
3190 thread_safe
= THREAD_SAFE_FS(vp
);
3193 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3194 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3195 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3196 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3201 _err
= (*vp
->v_op
[vnop_select_desc
.vdesc_offset
])(&a
);
3203 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3204 unlock_fsnode(vp
, NULL
);
3206 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3215 *#% exchange fvp L L L
3216 *#% exchange tvp L L L
3219 struct vnop_exchange_args
{
3220 struct vnodeop_desc
*a_desc
;
3224 vfs_context_t a_context
;
3228 VNOP_EXCHANGE(vnode_t fvp
, vnode_t tvp
, int options
, vfs_context_t ctx
)
3231 struct vnop_exchange_args a
;
3233 int funnel_state
= 0;
3234 vnode_t lock_first
= NULL
, lock_second
= NULL
;
3236 a
.a_desc
= &vnop_exchange_desc
;
3239 a
.a_options
= options
;
3241 thread_safe
= THREAD_SAFE_FS(fvp
);
3245 * Lock in vnode address order to avoid deadlocks
3254 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) ) {
3257 if ( (_err
= lock_fsnode(lock_second
, NULL
)) ) {
3258 unlock_fsnode(lock_first
, &funnel_state
);
3262 _err
= (*fvp
->v_op
[vnop_exchange_desc
.vdesc_offset
])(&a
);
3264 unlock_fsnode(lock_second
, NULL
);
3265 unlock_fsnode(lock_first
, &funnel_state
);
3277 struct vnop_revoke_args
{
3278 struct vnodeop_desc
*a_desc
;
3281 vfs_context_t a_context
;
3285 VNOP_REVOKE(vnode_t vp
, int flags
, vfs_context_t ctx
)
3287 struct vnop_revoke_args a
;
3290 int funnel_state
= 0;
3292 a
.a_desc
= &vnop_revoke_desc
;
3296 thread_safe
= THREAD_SAFE_FS(vp
);
3299 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3301 _err
= (*vp
->v_op
[vnop_revoke_desc
.vdesc_offset
])(&a
);
3303 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3315 struct vnop_mmap_args
{
3316 struct vnodeop_desc
*a_desc
;
3319 vfs_context_t a_context
;
3323 VNOP_MMAP(vnode_t vp
, int fflags
, vfs_context_t ctx
)
3326 struct vnop_mmap_args a
;
3328 int funnel_state
= 0;
3330 a
.a_desc
= &vnop_mmap_desc
;
3332 a
.a_fflags
= fflags
;
3334 thread_safe
= THREAD_SAFE_FS(vp
);
3337 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3341 _err
= (*vp
->v_op
[vnop_mmap_desc
.vdesc_offset
])(&a
);
3343 unlock_fsnode(vp
, &funnel_state
);
3352 *# mnomap - vp U U U
3355 struct vnop_mnomap_args
{
3356 struct vnodeop_desc
*a_desc
;
3358 vfs_context_t a_context
;
3362 VNOP_MNOMAP(vnode_t vp
, vfs_context_t ctx
)
3365 struct vnop_mnomap_args a
;
3367 int funnel_state
= 0;
3369 a
.a_desc
= &vnop_mnomap_desc
;
3372 thread_safe
= THREAD_SAFE_FS(vp
);
3375 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3379 _err
= (*vp
->v_op
[vnop_mnomap_desc
.vdesc_offset
])(&a
);
3381 unlock_fsnode(vp
, &funnel_state
);
3393 struct vnop_fsync_args
{
3394 struct vnodeop_desc
*a_desc
;
3397 vfs_context_t a_context
;
3401 VNOP_FSYNC(vnode_t vp
, int waitfor
, vfs_context_t ctx
)
3403 struct vnop_fsync_args a
;
3406 int funnel_state
= 0;
3408 a
.a_desc
= &vnop_fsync_desc
;
3410 a
.a_waitfor
= waitfor
;
3412 thread_safe
= THREAD_SAFE_FS(vp
);
3415 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3419 _err
= (*vp
->v_op
[vnop_fsync_desc
.vdesc_offset
])(&a
);
3421 unlock_fsnode(vp
, &funnel_state
);
3430 *#% remove dvp L U U
3434 struct vnop_remove_args
{
3435 struct vnodeop_desc
*a_desc
;
3438 struct componentname
*a_cnp
;
3440 vfs_context_t a_context
;
3444 VNOP_REMOVE(vnode_t dvp
, vnode_t vp
, struct componentname
* cnp
, int flags
, vfs_context_t ctx
)
3447 struct vnop_remove_args a
;
3449 int funnel_state
= 0;
3451 a
.a_desc
= &vnop_remove_desc
;
3457 thread_safe
= THREAD_SAFE_FS(dvp
);
3460 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3464 _err
= (*dvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&a
);
3467 vnode_setneedinactive(vp
);
3469 if ( !(NATIVE_XATTR(dvp
)) ) {
3471 * Remove any associated extended attribute file (._ AppleDouble file).
3473 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, thread_safe
, 1);
3477 unlock_fsnode(vp
, &funnel_state
);
3490 struct vnop_link_args
{
3491 struct vnodeop_desc
*a_desc
;
3494 struct componentname
*a_cnp
;
3495 vfs_context_t a_context
;
3499 VNOP_LINK(vnode_t vp
, vnode_t tdvp
, struct componentname
* cnp
, vfs_context_t ctx
)
3502 struct vnop_link_args a
;
3504 int funnel_state
= 0;
3507 * For file systems with non-native extended attributes,
3508 * disallow linking to an existing "._" Apple Double file.
3510 if ( !NATIVE_XATTR(tdvp
) && (vp
->v_type
== VREG
)) {
3513 vname
= vnode_getname(vp
);
3514 if (vname
!= NULL
) {
3516 if (vname
[0] == '.' && vname
[1] == '_' && vname
[2] != '\0') {
3519 vnode_putname(vname
);
3524 a
.a_desc
= &vnop_link_desc
;
3529 thread_safe
= THREAD_SAFE_FS(vp
);
3532 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3536 _err
= (*tdvp
->v_op
[vnop_link_desc
.vdesc_offset
])(&a
);
3538 unlock_fsnode(vp
, &funnel_state
);
3547 *#% rename fdvp U U U
3548 *#% rename fvp U U U
3549 *#% rename tdvp L U U
3550 *#% rename tvp X U U
3553 struct vnop_rename_args
{
3554 struct vnodeop_desc
*a_desc
;
3557 struct componentname
*a_fcnp
;
3560 struct componentname
*a_tcnp
;
3561 vfs_context_t a_context
;
3565 VNOP_RENAME(struct vnode
*fdvp
, struct vnode
*fvp
, struct componentname
*fcnp
,
3566 struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
3570 struct vnop_rename_args a
;
3571 int funnel_state
= 0;
3572 char smallname1
[48];
3573 char smallname2
[48];
3574 char *xfromname
= NULL
;
3575 char *xtoname
= NULL
;
3576 vnode_t lock_first
= NULL
, lock_second
= NULL
;
3577 vnode_t fdvp_unsafe
= NULLVP
;
3578 vnode_t tdvp_unsafe
= NULLVP
;
3580 a
.a_desc
= &vnop_rename_desc
;
3589 if (!THREAD_SAFE_FS(fdvp
))
3591 if (!THREAD_SAFE_FS(tdvp
))
3594 if (fdvp_unsafe
!= NULLVP
) {
3596 * Lock parents in vnode address order to avoid deadlocks
3597 * note that it's possible for the fdvp to be unsafe,
3598 * but the tdvp to be safe because tvp could be a directory
3599 * in the root of a filesystem... in that case, tdvp is the
3600 * in the filesystem that this root is mounted on
3602 if (tdvp_unsafe
== NULL
|| fdvp_unsafe
== tdvp_unsafe
) {
3603 lock_first
= fdvp_unsafe
;
3605 } else if (fdvp_unsafe
< tdvp_unsafe
) {
3606 lock_first
= fdvp_unsafe
;
3607 lock_second
= tdvp_unsafe
;
3609 lock_first
= tdvp_unsafe
;
3610 lock_second
= fdvp_unsafe
;
3612 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) )
3615 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
3616 unlock_fsnode(lock_first
, &funnel_state
);
3621 * Lock both children in vnode address order to avoid deadlocks
3623 if (tvp
== NULL
|| tvp
== fvp
) {
3626 } else if (fvp
< tvp
) {
3633 if ( (_err
= lock_fsnode(lock_first
, NULL
)) )
3636 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
3637 unlock_fsnode(lock_first
, NULL
);
3642 * Save source and destination names (._ AppleDouble files).
3643 * Skip if source already has a "._" prefix.
3645 if (!NATIVE_XATTR(fdvp
) &&
3646 !(fcnp
->cn_nameptr
[0] == '.' && fcnp
->cn_nameptr
[1] == '_')) {
3649 /* Get source attribute file name. */
3650 len
= fcnp
->cn_namelen
+ 3;
3651 if (len
> sizeof(smallname1
)) {
3652 MALLOC(xfromname
, char *, len
, M_TEMP
, M_WAITOK
);
3654 xfromname
= &smallname1
[0];
3656 strlcpy(xfromname
, "._", min(sizeof smallname1
, len
));
3657 strncat(xfromname
, fcnp
->cn_nameptr
, fcnp
->cn_namelen
);
3658 xfromname
[len
-1] = '\0';
3660 /* Get destination attribute file name. */
3661 len
= tcnp
->cn_namelen
+ 3;
3662 if (len
> sizeof(smallname2
)) {
3663 MALLOC(xtoname
, char *, len
, M_TEMP
, M_WAITOK
);
3665 xtoname
= &smallname2
[0];
3667 strlcpy(xtoname
, "._", min(sizeof smallname2
, len
));
3668 strncat(xtoname
, tcnp
->cn_nameptr
, tcnp
->cn_namelen
);
3669 xtoname
[len
-1] = '\0';
3672 _err
= (*fdvp
->v_op
[vnop_rename_desc
.vdesc_offset
])(&a
);
3674 if (fdvp_unsafe
!= NULLVP
) {
3675 if (lock_second
!= NULL
)
3676 unlock_fsnode(lock_second
, NULL
);
3677 unlock_fsnode(lock_first
, NULL
);
3680 if (tvp
&& tvp
!= fvp
)
3681 vnode_setneedinactive(tvp
);
3685 * Rename any associated extended attribute file (._ AppleDouble file).
3687 if (_err
== 0 && !NATIVE_XATTR(fdvp
) && xfromname
!= NULL
) {
3688 struct nameidata fromnd
, tond
;
3693 * Get source attribute file vnode.
3694 * Note that fdvp already has an iocount reference and
3695 * using DELETE will take an additional reference.
3697 NDINIT(&fromnd
, DELETE
, NOFOLLOW
| USEDVP
| CN_NBMOUNTLOOK
, UIO_SYSSPACE
,
3698 CAST_USER_ADDR_T(xfromname
), ctx
);
3699 fromnd
.ni_dvp
= fdvp
;
3700 error
= namei(&fromnd
);
3703 /* When source doesn't exist there still may be a destination. */
3704 if (error
== ENOENT
) {
3709 } else if (fromnd
.ni_vp
->v_type
!= VREG
) {
3710 vnode_put(fromnd
.ni_vp
);
3715 struct vnop_remove_args args
;
3718 * Get destination attribute file vnode.
3719 * Note that tdvp already has an iocount reference.
3721 NDINIT(&tond
, DELETE
, NOFOLLOW
| USEDVP
| CN_NBMOUNTLOOK
, UIO_SYSSPACE
,
3722 CAST_USER_ADDR_T(xtoname
), ctx
);
3724 error
= namei(&tond
);
3728 if (tond
.ni_vp
->v_type
!= VREG
) {
3729 vnode_put(tond
.ni_vp
);
3733 args
.a_desc
= &vnop_remove_desc
;
3735 args
.a_vp
= tond
.ni_vp
;
3736 args
.a_cnp
= &tond
.ni_cnd
;
3737 args
.a_context
= ctx
;
3739 if (fdvp_unsafe
!= NULLVP
)
3740 error
= lock_fsnode(tond
.ni_vp
, NULL
);
3742 error
= (*tdvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&args
);
3744 if (fdvp_unsafe
!= NULLVP
)
3745 unlock_fsnode(tond
.ni_vp
, NULL
);
3748 vnode_setneedinactive(tond
.ni_vp
);
3750 vnode_put(tond
.ni_vp
);
3756 * Get destination attribute file vnode.
3758 NDINIT(&tond
, RENAME
,
3759 NOCACHE
| NOFOLLOW
| USEDVP
| CN_NBMOUNTLOOK
, UIO_SYSSPACE
,
3760 CAST_USER_ADDR_T(xtoname
), ctx
);
3762 error
= namei(&tond
);
3765 vnode_put(fromnd
.ni_vp
);
3769 a
.a_desc
= &vnop_rename_desc
;
3771 a
.a_fvp
= fromnd
.ni_vp
;
3772 a
.a_fcnp
= &fromnd
.ni_cnd
;
3774 a
.a_tvp
= tond
.ni_vp
;
3775 a
.a_tcnp
= &tond
.ni_cnd
;
3778 if (fdvp_unsafe
!= NULLVP
) {
3780 * Lock in vnode address order to avoid deadlocks
3782 if (tond
.ni_vp
== NULL
|| tond
.ni_vp
== fromnd
.ni_vp
) {
3783 lock_first
= fromnd
.ni_vp
;
3785 } else if (fromnd
.ni_vp
< tond
.ni_vp
) {
3786 lock_first
= fromnd
.ni_vp
;
3787 lock_second
= tond
.ni_vp
;
3789 lock_first
= tond
.ni_vp
;
3790 lock_second
= fromnd
.ni_vp
;
3792 if ( (error
= lock_fsnode(lock_first
, NULL
)) == 0) {
3793 if (lock_second
!= NULL
&& (error
= lock_fsnode(lock_second
, NULL
)) )
3794 unlock_fsnode(lock_first
, NULL
);
3801 /* Save these off so we can later verify them (fix up below) */
3802 oname
= fromnd
.ni_vp
->v_name
;
3803 oparent
= fromnd
.ni_vp
->v_parent
;
3805 error
= (*fdvp
->v_op
[vnop_rename_desc
.vdesc_offset
])(&a
);
3807 if (fdvp_unsafe
!= NULLVP
) {
3808 if (lock_second
!= NULL
)
3809 unlock_fsnode(lock_second
, NULL
);
3810 unlock_fsnode(lock_first
, NULL
);
3813 vnode_setneedinactive(fromnd
.ni_vp
);
3815 if (tond
.ni_vp
&& tond
.ni_vp
!= fromnd
.ni_vp
)
3816 vnode_setneedinactive(tond
.ni_vp
);
3818 * Fix up name & parent pointers on ._ file
3820 if (oname
== fromnd
.ni_vp
->v_name
&&
3821 oparent
== fromnd
.ni_vp
->v_parent
) {
3824 update_flags
= VNODE_UPDATE_NAME
;
3827 update_flags
|= VNODE_UPDATE_PARENT
;
3829 vnode_update_identity(fromnd
.ni_vp
, tdvp
,
3830 tond
.ni_cnd
.cn_nameptr
,
3831 tond
.ni_cnd
.cn_namelen
,
3832 tond
.ni_cnd
.cn_hash
,
3837 vnode_put(fromnd
.ni_vp
);
3839 vnode_put(tond
.ni_vp
);
3845 if (xfromname
&& xfromname
!= &smallname1
[0]) {
3846 FREE(xfromname
, M_TEMP
);
3848 if (xtoname
&& xtoname
!= &smallname2
[0]) {
3849 FREE(xtoname
, M_TEMP
);
3852 if (fdvp_unsafe
!= NULLVP
) {
3853 if (tdvp_unsafe
!= NULLVP
)
3854 unlock_fsnode(tdvp_unsafe
, NULL
);
3855 unlock_fsnode(fdvp_unsafe
, &funnel_state
);
3867 struct vnop_mkdir_args
{
3868 struct vnodeop_desc
*a_desc
;
3871 struct componentname
*a_cnp
;
3872 struct vnode_attr
*a_vap
;
3873 vfs_context_t a_context
;
3877 VNOP_MKDIR(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
3878 struct vnode_attr
*vap
, vfs_context_t ctx
)
3881 struct vnop_mkdir_args a
;
3883 int funnel_state
= 0;
3885 a
.a_desc
= &vnop_mkdir_desc
;
3891 thread_safe
= THREAD_SAFE_FS(dvp
);
3894 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3898 _err
= (*dvp
->v_op
[vnop_mkdir_desc
.vdesc_offset
])(&a
);
3899 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
3901 * Remove stale Apple Double file (if any).
3903 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, thread_safe
, 0);
3906 unlock_fsnode(dvp
, &funnel_state
);
3919 struct vnop_rmdir_args
{
3920 struct vnodeop_desc
*a_desc
;
3923 struct componentname
*a_cnp
;
3924 vfs_context_t a_context
;
3929 VNOP_RMDIR(struct vnode
*dvp
, struct vnode
*vp
, struct componentname
*cnp
, vfs_context_t ctx
)
3932 struct vnop_rmdir_args a
;
3934 int funnel_state
= 0;
3936 a
.a_desc
= &vnop_rmdir_desc
;
3941 thread_safe
= THREAD_SAFE_FS(dvp
);
3944 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3948 _err
= (*vp
->v_op
[vnop_rmdir_desc
.vdesc_offset
])(&a
);
3951 vnode_setneedinactive(vp
);
3953 if ( !(NATIVE_XATTR(dvp
)) ) {
3955 * Remove any associated extended attribute file (._ AppleDouble file).
3957 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, thread_safe
, 1);
3961 unlock_fsnode(vp
, &funnel_state
);
3967 * Remove a ._ AppleDouble file
3969 #define AD_STALE_SECS (180)
3971 xattrfile_remove(vnode_t dvp
, const char * basename
, vfs_context_t ctx
, int thread_safe
, int force
) {
3973 struct nameidata nd
;
3975 char *filename
= NULL
;
3978 if ((basename
== NULL
) || (basename
[0] == '\0') ||
3979 (basename
[0] == '.' && basename
[1] == '_')) {
3982 filename
= &smallname
[0];
3983 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
3984 if (len
>= sizeof(smallname
)) {
3985 len
++; /* snprintf result doesn't include '\0' */
3986 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
3987 len
= snprintf(filename
, len
, "._%s", basename
);
3989 NDINIT(&nd
, DELETE
, WANTPARENT
| LOCKLEAF
| NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
3990 CAST_USER_ADDR_T(filename
), ctx
);
3992 if (namei(&nd
) != 0)
3997 if (xvp
->v_type
!= VREG
)
4001 * When creating a new object and a "._" file already
4002 * exists, check to see if its a stale "._" file.
4006 struct vnode_attr va
;
4009 VATTR_WANTED(&va
, va_data_size
);
4010 VATTR_WANTED(&va
, va_modify_time
);
4011 if (VNOP_GETATTR(xvp
, &va
, ctx
) == 0 &&
4012 VATTR_IS_SUPPORTED(&va
, va_data_size
) &&
4013 VATTR_IS_SUPPORTED(&va
, va_modify_time
) &&
4014 va
.va_data_size
!= 0) {
4018 if ((tv
.tv_sec
> va
.va_modify_time
.tv_sec
) &&
4019 (tv
.tv_sec
- va
.va_modify_time
.tv_sec
) > AD_STALE_SECS
) {
4020 force
= 1; /* must be stale */
4025 struct vnop_remove_args a
;
4028 a
.a_desc
= &vnop_remove_desc
;
4029 a
.a_dvp
= nd
.ni_dvp
;
4031 a
.a_cnp
= &nd
.ni_cnd
;
4035 if ( (lock_fsnode(xvp
, NULL
)) )
4038 error
= (*dvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&a
);
4041 unlock_fsnode(xvp
, NULL
);
4044 vnode_setneedinactive(xvp
);
4050 if (filename
&& filename
!= &smallname
[0]) {
4051 FREE(filename
, M_TEMP
);
4056 * Shadow uid/gid/mod to a ._ AppleDouble file
4059 xattrfile_setattr(vnode_t dvp
, const char * basename
, struct vnode_attr
* vap
,
4060 vfs_context_t ctx
, int thread_safe
) {
4062 struct nameidata nd
;
4064 char *filename
= NULL
;
4067 if ((dvp
== NULLVP
) ||
4068 (basename
== NULL
) || (basename
[0] == '\0') ||
4069 (basename
[0] == '.' && basename
[1] == '_')) {
4072 filename
= &smallname
[0];
4073 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
4074 if (len
>= sizeof(smallname
)) {
4075 len
++; /* snprintf result doesn't include '\0' */
4076 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
4077 len
= snprintf(filename
, len
, "._%s", basename
);
4079 NDINIT(&nd
, LOOKUP
, NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
4080 CAST_USER_ADDR_T(filename
), ctx
);
4082 if (namei(&nd
) != 0)
4088 if (xvp
->v_type
== VREG
) {
4089 struct vnop_setattr_args a
;
4091 a
.a_desc
= &vnop_setattr_desc
;
4097 if ( (lock_fsnode(xvp
, NULL
)) )
4100 (void) (*xvp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
4102 unlock_fsnode(xvp
, NULL
);
4108 if (filename
&& filename
!= &smallname
[0]) {
4109 FREE(filename
, M_TEMP
);
4116 *#% symlink dvp L U U
4117 *#% symlink vpp - U -
4120 struct vnop_symlink_args
{
4121 struct vnodeop_desc
*a_desc
;
4124 struct componentname
*a_cnp
;
4125 struct vnode_attr
*a_vap
;
4127 vfs_context_t a_context
;
4132 VNOP_SYMLINK(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
4133 struct vnode_attr
*vap
, char *target
, vfs_context_t ctx
)
4136 struct vnop_symlink_args a
;
4138 int funnel_state
= 0;
4140 a
.a_desc
= &vnop_symlink_desc
;
4145 a
.a_target
= target
;
4147 thread_safe
= THREAD_SAFE_FS(dvp
);
4150 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
4154 _err
= (*dvp
->v_op
[vnop_symlink_desc
.vdesc_offset
])(&a
);
4155 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
4157 * Remove stale Apple Double file (if any).
4159 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, thread_safe
, 0);
4162 unlock_fsnode(dvp
, &funnel_state
);
4170 *#% readdir vp L L L
4173 struct vnop_readdir_args
{
4174 struct vnodeop_desc
*a_desc
;
4180 vfs_context_t a_context
;
4185 VNOP_READDIR(struct vnode
*vp
, struct uio
*uio
, int flags
, int *eofflag
,
4186 int *numdirent
, vfs_context_t ctx
)
4189 struct vnop_readdir_args a
;
4191 int funnel_state
= 0;
4193 a
.a_desc
= &vnop_readdir_desc
;
4197 a
.a_eofflag
= eofflag
;
4198 a
.a_numdirent
= numdirent
;
4200 thread_safe
= THREAD_SAFE_FS(vp
);
4203 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4207 _err
= (*vp
->v_op
[vnop_readdir_desc
.vdesc_offset
])(&a
);
4209 unlock_fsnode(vp
, &funnel_state
);
4217 *#% readdirattr vp L L L
4220 struct vnop_readdirattr_args
{
4221 struct vnodeop_desc
*a_desc
;
4223 struct attrlist
*a_alist
;
4229 u_long
*a_actualcount
;
4230 vfs_context_t a_context
;
4235 VNOP_READDIRATTR(struct vnode
*vp
, struct attrlist
*alist
, struct uio
*uio
, u_long maxcount
,
4236 u_long options
, u_long
*newstate
, int *eofflag
, u_long
*actualcount
, vfs_context_t ctx
)
4239 struct vnop_readdirattr_args a
;
4241 int funnel_state
= 0;
4243 a
.a_desc
= &vnop_readdirattr_desc
;
4247 a
.a_maxcount
= maxcount
;
4248 a
.a_options
= options
;
4249 a
.a_newstate
= newstate
;
4250 a
.a_eofflag
= eofflag
;
4251 a
.a_actualcount
= actualcount
;
4253 thread_safe
= THREAD_SAFE_FS(vp
);
4256 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4260 _err
= (*vp
->v_op
[vnop_readdirattr_desc
.vdesc_offset
])(&a
);
4262 unlock_fsnode(vp
, &funnel_state
);
4270 *#% readlink vp L L L
4273 struct vnop_readlink_args
{
4274 struct vnodeop_desc
*a_desc
;
4277 vfs_context_t a_context
;
4282 * Returns: 0 Success
4283 * lock_fsnode:ENOENT No such file or directory [only for VFS
4284 * that is not thread safe & vnode is
4285 * currently being/has been terminated]
4286 * <vfs_readlink>:EINVAL
4287 * <vfs_readlink>:???
4289 * Note: The return codes from the underlying VFS's readlink routine
4290 * can't be fully enumerated here, since third party VFS authors
4291 * may not limit their error returns to the ones documented here,
4292 * even though this may result in some programs functioning
4295 * The return codes documented above are those which may currently
4296 * be returned by HFS from hfs_vnop_readlink, not including
4297 * additional error code which may be propagated from underlying
4301 VNOP_READLINK(struct vnode
*vp
, struct uio
*uio
, vfs_context_t ctx
)
4304 struct vnop_readlink_args a
;
4306 int funnel_state
= 0;
4308 a
.a_desc
= &vnop_readlink_desc
;
4312 thread_safe
= THREAD_SAFE_FS(vp
);
4315 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4319 _err
= (*vp
->v_op
[vnop_readlink_desc
.vdesc_offset
])(&a
);
4321 unlock_fsnode(vp
, &funnel_state
);
4329 *#% inactive vp L U U
4332 struct vnop_inactive_args
{
4333 struct vnodeop_desc
*a_desc
;
4335 vfs_context_t a_context
;
4339 VNOP_INACTIVE(struct vnode
*vp
, vfs_context_t ctx
)
4342 struct vnop_inactive_args a
;
4344 int funnel_state
= 0;
4346 a
.a_desc
= &vnop_inactive_desc
;
4349 thread_safe
= THREAD_SAFE_FS(vp
);
4352 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4356 _err
= (*vp
->v_op
[vnop_inactive_desc
.vdesc_offset
])(&a
);
4358 unlock_fsnode(vp
, &funnel_state
);
4362 /* For file systems that do not support namedstreams natively, mark
4363 * the shadow stream file vnode to be recycled as soon as the last
4364 * reference goes away. To avoid re-entering reclaim code, do not
4365 * call recycle on terminating named stream vnodes.
4367 if (vnode_isnamedstream(vp
) &&
4368 (vp
->v_parent
!= NULLVP
) &&
4369 ((vp
->v_parent
->v_mount
->mnt_kern_flag
& MNTK_NAMED_STREAMS
) == 0) &&
4370 ((vp
->v_lflag
& VL_TERMINATE
) == 0)) {
4382 *#% reclaim vp U U U
4385 struct vnop_reclaim_args
{
4386 struct vnodeop_desc
*a_desc
;
4388 vfs_context_t a_context
;
4392 VNOP_RECLAIM(struct vnode
*vp
, vfs_context_t ctx
)
4395 struct vnop_reclaim_args a
;
4397 int funnel_state
= 0;
4399 a
.a_desc
= &vnop_reclaim_desc
;
4402 thread_safe
= THREAD_SAFE_FS(vp
);
4405 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4407 _err
= (*vp
->v_op
[vnop_reclaim_desc
.vdesc_offset
])(&a
);
4409 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4416 * Returns: 0 Success
4417 * lock_fsnode:ENOENT No such file or directory [only for VFS
4418 * that is not thread safe & vnode is
4419 * currently being/has been terminated]
4420 * <vnop_pathconf_desc>:??? [per FS implementation specific]
4425 *#% pathconf vp L L L
4428 struct vnop_pathconf_args
{
4429 struct vnodeop_desc
*a_desc
;
4432 register_t
*a_retval
;
4433 vfs_context_t a_context
;
4437 VNOP_PATHCONF(struct vnode
*vp
, int name
, register_t
*retval
, vfs_context_t ctx
)
4440 struct vnop_pathconf_args a
;
4442 int funnel_state
= 0;
4444 a
.a_desc
= &vnop_pathconf_desc
;
4447 a
.a_retval
= retval
;
4449 thread_safe
= THREAD_SAFE_FS(vp
);
4452 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4456 _err
= (*vp
->v_op
[vnop_pathconf_desc
.vdesc_offset
])(&a
);
4458 unlock_fsnode(vp
, &funnel_state
);
4464 * Returns: 0 Success
4465 * err_advlock:ENOTSUP
4467 * <vnop_advlock_desc>:???
4469 * Notes: VFS implementations of advisory locking using calls through
4470 * <vnop_advlock_desc> because lock enforcement does not occur
4471 * locally should try to limit themselves to the return codes
4472 * documented above for lf_advlock and err_advlock.
4477 *#% advlock vp U U U
4480 struct vnop_advlock_args
{
4481 struct vnodeop_desc
*a_desc
;
4487 vfs_context_t a_context
;
4491 VNOP_ADVLOCK(struct vnode
*vp
, caddr_t id
, int op
, struct flock
*fl
, int flags
, vfs_context_t ctx
)
4494 struct vnop_advlock_args a
;
4496 int funnel_state
= 0;
4497 struct uthread
* uth
;
4499 a
.a_desc
= &vnop_advlock_desc
;
4506 thread_safe
= THREAD_SAFE_FS(vp
);
4508 uth
= get_bsdthread_info(current_thread());
4510 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4512 /* Disallow advisory locking on non-seekable vnodes */
4513 if (vnode_isfifo(vp
)) {
4514 _err
= err_advlock(&a
);
4516 if ((vp
->v_flag
& VLOCKLOCAL
)) {
4517 /* Advisory locking done at this layer */
4518 _err
= lf_advlock(&a
);
4520 /* Advisory locking done by underlying filesystem */
4521 _err
= (*vp
->v_op
[vnop_advlock_desc
.vdesc_offset
])(&a
);
4525 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4535 *#% allocate vp L L L
4538 struct vnop_allocate_args
{
4539 struct vnodeop_desc
*a_desc
;
4543 off_t
*a_bytesallocated
;
4545 vfs_context_t a_context
;
4550 VNOP_ALLOCATE(struct vnode
*vp
, off_t length
, u_int32_t flags
, off_t
*bytesallocated
, off_t offset
, vfs_context_t ctx
)
4553 struct vnop_allocate_args a
;
4555 int funnel_state
= 0;
4557 a
.a_desc
= &vnop_allocate_desc
;
4559 a
.a_length
= length
;
4561 a
.a_bytesallocated
= bytesallocated
;
4562 a
.a_offset
= offset
;
4564 thread_safe
= THREAD_SAFE_FS(vp
);
4567 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4571 _err
= (*vp
->v_op
[vnop_allocate_desc
.vdesc_offset
])(&a
);
4573 unlock_fsnode(vp
, &funnel_state
);
4584 struct vnop_pagein_args
{
4585 struct vnodeop_desc
*a_desc
;
4588 vm_offset_t a_pl_offset
;
4592 vfs_context_t a_context
;
4596 VNOP_PAGEIN(struct vnode
*vp
, upl_t pl
, vm_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t ctx
)
4599 struct vnop_pagein_args a
;
4601 int funnel_state
= 0;
4603 a
.a_desc
= &vnop_pagein_desc
;
4606 a
.a_pl_offset
= pl_offset
;
4607 a
.a_f_offset
= f_offset
;
4611 thread_safe
= THREAD_SAFE_FS(vp
);
4614 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4616 _err
= (*vp
->v_op
[vnop_pagein_desc
.vdesc_offset
])(&a
);
4618 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4626 *#% pageout vp = = =
4629 struct vnop_pageout_args
{
4630 struct vnodeop_desc
*a_desc
;
4633 vm_offset_t a_pl_offset
;
4637 vfs_context_t a_context
;
4642 VNOP_PAGEOUT(struct vnode
*vp
, upl_t pl
, vm_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t ctx
)
4645 struct vnop_pageout_args a
;
4647 int funnel_state
= 0;
4649 a
.a_desc
= &vnop_pageout_desc
;
4652 a
.a_pl_offset
= pl_offset
;
4653 a
.a_f_offset
= f_offset
;
4657 thread_safe
= THREAD_SAFE_FS(vp
);
4660 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4662 _err
= (*vp
->v_op
[vnop_pageout_desc
.vdesc_offset
])(&a
);
4664 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4673 *#% searchfs vp L L L
4676 struct vnop_searchfs_args
{
4677 struct vnodeop_desc
*a_desc
;
4679 void *a_searchparams1
;
4680 void *a_searchparams2
;
4681 struct attrlist
*a_searchattrs
;
4682 u_long a_maxmatches
;
4683 struct timeval
*a_timelimit
;
4684 struct attrlist
*a_returnattrs
;
4685 u_long
*a_nummatches
;
4686 u_long a_scriptcode
;
4689 struct searchstate
*a_searchstate
;
4690 vfs_context_t a_context
;
4695 VNOP_SEARCHFS(struct vnode
*vp
, void *searchparams1
, void *searchparams2
, struct attrlist
*searchattrs
, u_long maxmatches
, struct timeval
*timelimit
, struct attrlist
*returnattrs
, u_long
*nummatches
, u_long scriptcode
, u_long options
, struct uio
*uio
, struct searchstate
*searchstate
, vfs_context_t ctx
)
4698 struct vnop_searchfs_args a
;
4700 int funnel_state
= 0;
4702 a
.a_desc
= &vnop_searchfs_desc
;
4704 a
.a_searchparams1
= searchparams1
;
4705 a
.a_searchparams2
= searchparams2
;
4706 a
.a_searchattrs
= searchattrs
;
4707 a
.a_maxmatches
= maxmatches
;
4708 a
.a_timelimit
= timelimit
;
4709 a
.a_returnattrs
= returnattrs
;
4710 a
.a_nummatches
= nummatches
;
4711 a
.a_scriptcode
= scriptcode
;
4712 a
.a_options
= options
;
4714 a
.a_searchstate
= searchstate
;
4716 thread_safe
= THREAD_SAFE_FS(vp
);
4719 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4723 _err
= (*vp
->v_op
[vnop_searchfs_desc
.vdesc_offset
])(&a
);
4725 unlock_fsnode(vp
, &funnel_state
);
4733 *#% copyfile fvp U U U
4734 *#% copyfile tdvp L U U
4735 *#% copyfile tvp X U U
4738 struct vnop_copyfile_args
{
4739 struct vnodeop_desc
*a_desc
;
4743 struct componentname
*a_tcnp
;
4746 vfs_context_t a_context
;
4750 VNOP_COPYFILE(struct vnode
*fvp
, struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
4751 int mode
, int flags
, vfs_context_t ctx
)
4754 struct vnop_copyfile_args a
;
4755 a
.a_desc
= &vnop_copyfile_desc
;
4763 _err
= (*fvp
->v_op
[vnop_copyfile_desc
.vdesc_offset
])(&a
);
4768 VNOP_GETXATTR(vnode_t vp
, const char *name
, uio_t uio
, size_t *size
, int options
, vfs_context_t ctx
)
4770 struct vnop_getxattr_args a
;
4773 int funnel_state
= 0;
4775 a
.a_desc
= &vnop_getxattr_desc
;
4780 a
.a_options
= options
;
4783 thread_safe
= THREAD_SAFE_FS(vp
);
4785 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4789 error
= (*vp
->v_op
[vnop_getxattr_desc
.vdesc_offset
])(&a
);
4791 unlock_fsnode(vp
, &funnel_state
);
4797 VNOP_SETXATTR(vnode_t vp
, const char *name
, uio_t uio
, int options
, vfs_context_t ctx
)
4799 struct vnop_setxattr_args a
;
4802 int funnel_state
= 0;
4804 a
.a_desc
= &vnop_setxattr_desc
;
4808 a
.a_options
= options
;
4811 thread_safe
= THREAD_SAFE_FS(vp
);
4813 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4817 error
= (*vp
->v_op
[vnop_setxattr_desc
.vdesc_offset
])(&a
);
4819 unlock_fsnode(vp
, &funnel_state
);
4822 vnode_uncache_authorized_action(vp
, KAUTH_INVALIDATE_CACHED_RIGHTS
);
4827 VNOP_REMOVEXATTR(vnode_t vp
, const char *name
, int options
, vfs_context_t ctx
)
4829 struct vnop_removexattr_args a
;
4832 int funnel_state
= 0;
4834 a
.a_desc
= &vnop_removexattr_desc
;
4837 a
.a_options
= options
;
4840 thread_safe
= THREAD_SAFE_FS(vp
);
4842 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4846 error
= (*vp
->v_op
[vnop_removexattr_desc
.vdesc_offset
])(&a
);
4848 unlock_fsnode(vp
, &funnel_state
);
4854 VNOP_LISTXATTR(vnode_t vp
, uio_t uio
, size_t *size
, int options
, vfs_context_t ctx
)
4856 struct vnop_listxattr_args a
;
4859 int funnel_state
= 0;
4861 a
.a_desc
= &vnop_listxattr_desc
;
4865 a
.a_options
= options
;
4868 thread_safe
= THREAD_SAFE_FS(vp
);
4870 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
4874 error
= (*vp
->v_op
[vnop_listxattr_desc
.vdesc_offset
])(&a
);
4876 unlock_fsnode(vp
, &funnel_state
);
4885 *#% blktooff vp = = =
4888 struct vnop_blktooff_args
{
4889 struct vnodeop_desc
*a_desc
;
4896 VNOP_BLKTOOFF(struct vnode
*vp
, daddr64_t lblkno
, off_t
*offset
)
4899 struct vnop_blktooff_args a
;
4901 int funnel_state
= 0;
4903 a
.a_desc
= &vnop_blktooff_desc
;
4905 a
.a_lblkno
= lblkno
;
4906 a
.a_offset
= offset
;
4907 thread_safe
= THREAD_SAFE_FS(vp
);
4910 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4912 _err
= (*vp
->v_op
[vnop_blktooff_desc
.vdesc_offset
])(&a
);
4914 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4922 *#% offtoblk vp = = =
4925 struct vnop_offtoblk_args
{
4926 struct vnodeop_desc
*a_desc
;
4929 daddr64_t
*a_lblkno
;
4933 VNOP_OFFTOBLK(struct vnode
*vp
, off_t offset
, daddr64_t
*lblkno
)
4936 struct vnop_offtoblk_args a
;
4938 int funnel_state
= 0;
4940 a
.a_desc
= &vnop_offtoblk_desc
;
4942 a
.a_offset
= offset
;
4943 a
.a_lblkno
= lblkno
;
4944 thread_safe
= THREAD_SAFE_FS(vp
);
4947 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4949 _err
= (*vp
->v_op
[vnop_offtoblk_desc
.vdesc_offset
])(&a
);
4951 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4959 *#% blockmap vp L L L
4962 struct vnop_blockmap_args
{
4963 struct vnodeop_desc
*a_desc
;
4971 vfs_context_t a_context
;
4975 VNOP_BLOCKMAP(struct vnode
*vp
, off_t foffset
, size_t size
, daddr64_t
*bpn
, size_t *run
, void *poff
, int flags
, vfs_context_t ctx
)
4978 struct vnop_blockmap_args a
;
4980 int funnel_state
= 0;
4983 ctx
= vfs_context_current();
4985 a
.a_desc
= &vnop_blockmap_desc
;
4987 a
.a_foffset
= foffset
;
4994 thread_safe
= THREAD_SAFE_FS(vp
);
4997 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4999 _err
= (*vp
->v_op
[vnop_blockmap_desc
.vdesc_offset
])(&a
);
5001 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5007 struct vnop_strategy_args
{
5008 struct vnodeop_desc
*a_desc
;
5014 VNOP_STRATEGY(struct buf
*bp
)
5017 struct vnop_strategy_args a
;
5018 a
.a_desc
= &vnop_strategy_desc
;
5020 _err
= (*buf_vnode(bp
)->v_op
[vnop_strategy_desc
.vdesc_offset
])(&a
);
5025 struct vnop_bwrite_args
{
5026 struct vnodeop_desc
*a_desc
;
5031 VNOP_BWRITE(struct buf
*bp
)
5034 struct vnop_bwrite_args a
;
5035 a
.a_desc
= &vnop_bwrite_desc
;
5037 _err
= (*buf_vnode(bp
)->v_op
[vnop_bwrite_desc
.vdesc_offset
])(&a
);
5042 struct vnop_kqfilt_add_args
{
5043 struct vnodeop_desc
*a_desc
;
5046 vfs_context_t a_context
;
5050 VNOP_KQFILT_ADD(struct vnode
*vp
, struct knote
*kn
, vfs_context_t ctx
)
5053 struct vnop_kqfilt_add_args a
;
5055 int funnel_state
= 0;
5057 a
.a_desc
= VDESC(vnop_kqfilt_add
);
5061 thread_safe
= THREAD_SAFE_FS(vp
);
5064 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5068 _err
= (*vp
->v_op
[vnop_kqfilt_add_desc
.vdesc_offset
])(&a
);
5070 unlock_fsnode(vp
, &funnel_state
);
5076 struct vnop_kqfilt_remove_args
{
5077 struct vnodeop_desc
*a_desc
;
5080 vfs_context_t a_context
;
5084 VNOP_KQFILT_REMOVE(struct vnode
*vp
, uintptr_t ident
, vfs_context_t ctx
)
5087 struct vnop_kqfilt_remove_args a
;
5089 int funnel_state
= 0;
5091 a
.a_desc
= VDESC(vnop_kqfilt_remove
);
5095 thread_safe
= THREAD_SAFE_FS(vp
);
5098 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5102 _err
= (*vp
->v_op
[vnop_kqfilt_remove_desc
.vdesc_offset
])(&a
);
5104 unlock_fsnode(vp
, &funnel_state
);
5110 struct vnop_setlabel_args
{
5111 struct vnodeop_desc
*a_desc
;
5114 vfs_context_t a_context
;
5118 VNOP_SETLABEL(struct vnode
*vp
, struct label
*label
, vfs_context_t ctx
)
5121 struct vnop_setlabel_args a
;
5123 int funnel_state
= 0;
5125 a
.a_desc
= VDESC(vnop_setlabel
);
5129 thread_safe
= THREAD_SAFE_FS(vp
);
5132 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5136 _err
= (*vp
->v_op
[vnop_setlabel_desc
.vdesc_offset
])(&a
);
5138 unlock_fsnode(vp
, &funnel_state
);
5146 * Get a named streamed
5149 VNOP_GETNAMEDSTREAM(vnode_t vp
, vnode_t
*svpp
, const char *name
, enum nsoperation operation
, int flags
, vfs_context_t ctx
)
5151 struct vnop_getnamedstream_args a
;
5153 if (!THREAD_SAFE_FS(vp
))
5155 a
.a_desc
= &vnop_getnamedstream_desc
;
5159 a
.a_operation
= operation
;
5163 return (*vp
->v_op
[vnop_getnamedstream_desc
.vdesc_offset
])(&a
);
5167 * Create a named streamed
5170 VNOP_MAKENAMEDSTREAM(vnode_t vp
, vnode_t
*svpp
, const char *name
, int flags
, vfs_context_t ctx
)
5172 struct vnop_makenamedstream_args a
;
5174 if (!THREAD_SAFE_FS(vp
))
5176 a
.a_desc
= &vnop_makenamedstream_desc
;
5183 return (*vp
->v_op
[vnop_makenamedstream_desc
.vdesc_offset
])(&a
);
5188 * Remove a named streamed
5191 VNOP_REMOVENAMEDSTREAM(vnode_t vp
, vnode_t svp
, const char *name
, int flags
, vfs_context_t ctx
)
5193 struct vnop_removenamedstream_args a
;
5195 if (!THREAD_SAFE_FS(vp
))
5197 a
.a_desc
= &vnop_removenamedstream_desc
;
5204 return (*vp
->v_op
[vnop_removenamedstream_desc
.vdesc_offset
])(&a
);