2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
76 * External virtual filesystem routines
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
87 #include <sys/vnode_internal.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
92 #include <sys/errno.h>
93 #include <sys/malloc.h>
94 #include <sys/domain.h>
96 #include <sys/syslog.h>
99 #include <sys/sysctl.h>
100 #include <sys/filedesc.h>
101 #include <sys/event.h>
102 #include <sys/fsevents.h>
103 #include <sys/user.h>
104 #include <sys/lockf.h>
105 #include <sys/xattr.h>
107 #include <kern/assert.h>
108 #include <kern/kalloc.h>
109 #include <kern/task.h>
111 #include <libkern/OSByteOrder.h>
113 #include <miscfs/specfs/specdev.h>
115 #include <mach/mach_types.h>
116 #include <mach/memory_object_types.h>
117 #include <mach/task.h>
120 #include <security/mac_framework.h>
130 #if CONFIG_VFS_FUNNEL
131 #define THREAD_SAFE_FS(VP) \
132 ((VP)->v_unsafefs ? 0 : 1)
133 #endif /* CONFIG_VFS_FUNNEL */
135 #define NATIVE_XATTR(VP) \
136 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
138 static void xattrfile_remove(vnode_t dvp
, const char *basename
,
139 vfs_context_t ctx
, int force
);
140 static void xattrfile_setattr(vnode_t dvp
, const char * basename
,
141 struct vnode_attr
* vap
, vfs_context_t ctx
);
144 * vnode_setneedinactive
146 * Description: Indicate that when the last iocount on this vnode goes away,
147 * and the usecount is also zero, we should inform the filesystem
150 * Parameters: vnode_t vnode to mark
154 * Notes: Notably used when we're deleting a file--we need not have a
155 * usecount, so VNOP_INACTIVE may not get called by anyone. We
156 * want it called when we drop our iocount.
159 vnode_setneedinactive(vnode_t vp
)
164 vp
->v_lflag
|= VL_NEEDINACTIVE
;
169 #if CONFIG_VFS_FUNNEL
171 lock_fsnode(vnode_t vp
, int *funnel_state
)
174 *funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
176 if (vp
->v_unsafefs
) {
177 if (vp
->v_unsafefs
->fsnodeowner
== current_thread()) {
178 vp
->v_unsafefs
->fsnode_count
++;
180 lck_mtx_lock(&vp
->v_unsafefs
->fsnodelock
);
182 if (vp
->v_lflag
& (VL_TERMWANT
| VL_TERMINATE
| VL_DEAD
)) {
183 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
186 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
189 vp
->v_unsafefs
->fsnodeowner
= current_thread();
190 vp
->v_unsafefs
->fsnode_count
= 1;
198 unlock_fsnode(vnode_t vp
, int *funnel_state
)
200 if (vp
->v_unsafefs
) {
201 if (--vp
->v_unsafefs
->fsnode_count
== 0) {
202 vp
->v_unsafefs
->fsnodeowner
= NULL
;
203 lck_mtx_unlock(&vp
->v_unsafefs
->fsnodelock
);
207 (void) thread_funnel_set(kernel_flock
, *funnel_state
);
209 #endif /* CONFIG_VFS_FUNNEL */
213 /* ====================================================================== */
214 /* ************ EXTERNAL KERNEL APIS ********************************** */
215 /* ====================================================================== */
218 * implementations of exported VFS operations
221 VFS_MOUNT(mount_t mp
, vnode_t devvp
, user_addr_t data
, vfs_context_t ctx
)
224 #if CONFIG_VFS_FUNNEL
226 int funnel_state
= 0;
227 #endif /* CONFIG_VFS_FUNNEL */
229 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_mount
== 0))
232 #if CONFIG_VFS_FUNNEL
233 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
235 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
237 #endif /* CONFIG_VFS_FUNNEL */
239 if (vfs_context_is64bit(ctx
)) {
240 if (vfs_64bitready(mp
)) {
241 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, ctx
);
248 error
= (*mp
->mnt_op
->vfs_mount
)(mp
, devvp
, data
, ctx
);
251 #if CONFIG_VFS_FUNNEL
253 (void) thread_funnel_set(kernel_flock
, funnel_state
);
255 #endif /* CONFIG_VFS_FUNNEL */
261 VFS_START(mount_t mp
, int flags
, vfs_context_t ctx
)
264 #if CONFIG_VFS_FUNNEL
266 int funnel_state
= 0;
267 #endif /* CONFIG_VFS_FUNNEL */
269 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_start
== 0))
272 #if CONFIG_VFS_FUNNEL
273 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
275 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
277 #endif /* CONFIG_VFS_FUNNEL */
279 error
= (*mp
->mnt_op
->vfs_start
)(mp
, flags
, ctx
);
281 #if CONFIG_VFS_FUNNEL
283 (void) thread_funnel_set(kernel_flock
, funnel_state
);
285 #endif /* CONFIG_VFS_FUNNEL */
291 VFS_UNMOUNT(mount_t mp
, int flags
, vfs_context_t ctx
)
294 #if CONFIG_VFS_FUNNEL
296 int funnel_state
= 0;
297 #endif /* CONFIG_VFS_FUNNEL */
299 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_unmount
== 0))
302 #if CONFIG_VFS_FUNNEL
303 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
305 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
307 #endif /* CONFIG_VFS_FUNNEL */
309 error
= (*mp
->mnt_op
->vfs_unmount
)(mp
, flags
, ctx
);
311 #if CONFIG_VFS_FUNNEL
313 (void) thread_funnel_set(kernel_flock
, funnel_state
);
315 #endif /* CONFIG_VFS_FUNNEL */
322 * ENOTSUP Not supported
326 * Note: The return codes from the underlying VFS's root routine can't
327 * be fully enumerated here, since third party VFS authors may not
328 * limit their error returns to the ones documented here, even
329 * though this may result in some programs functioning incorrectly.
331 * The return codes documented above are those which may currently
332 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
333 * for a call to hfs_vget on the volume mount poit, not including
334 * additional error codes which may be propagated from underlying
335 * routines called by hfs_vget.
338 VFS_ROOT(mount_t mp
, struct vnode
** vpp
, vfs_context_t ctx
)
341 #if CONFIG_VFS_FUNNEL
343 int funnel_state
= 0;
344 #endif /* CONFIG_VFS_FUNNEL */
346 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_root
== 0))
350 ctx
= vfs_context_current();
353 #if CONFIG_VFS_FUNNEL
354 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
356 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
358 #endif /* CONFIG_VFS_FUNNEL */
360 error
= (*mp
->mnt_op
->vfs_root
)(mp
, vpp
, ctx
);
362 #if CONFIG_VFS_FUNNEL
364 (void) thread_funnel_set(kernel_flock
, funnel_state
);
366 #endif /* CONFIG_VFS_FUNNEL */
372 VFS_QUOTACTL(mount_t mp
, int cmd
, uid_t uid
, caddr_t datap
, vfs_context_t ctx
)
375 #if CONFIG_VFS_FUNNEL
377 int funnel_state
= 0;
378 #endif /* CONFIG_VFS_FUNNEL */
380 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_quotactl
== 0))
383 #if CONFIG_VFS_FUNNEL
384 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
386 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
388 #endif /* CONFIG_VFS_FUNNEL */
390 error
= (*mp
->mnt_op
->vfs_quotactl
)(mp
, cmd
, uid
, datap
, ctx
);
392 #if CONFIG_VFS_FUNNEL
394 (void) thread_funnel_set(kernel_flock
, funnel_state
);
396 #endif /* CONFIG_VFS_FUNNEL */
402 VFS_GETATTR(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
405 #if CONFIG_VFS_FUNNEL
407 int funnel_state
= 0;
408 #endif /* CONFIG_VFS_FUNNEL */
410 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_getattr
== 0))
414 ctx
= vfs_context_current();
417 #if CONFIG_VFS_FUNNEL
418 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
420 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
422 #endif /* CONFIG_VFS_FUNNEL */
424 error
= (*mp
->mnt_op
->vfs_getattr
)(mp
, vfa
, ctx
);
426 #if CONFIG_VFS_FUNNEL
428 (void) thread_funnel_set(kernel_flock
, funnel_state
);
430 #endif /* CONFIG_VFS_FUNNEL */
436 VFS_SETATTR(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
439 #if CONFIG_VFS_FUNNEL
441 int funnel_state
= 0;
442 #endif /* CONFIG_VFS_FUNNEL */
444 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_setattr
== 0))
448 ctx
= vfs_context_current();
451 #if CONFIG_VFS_FUNNEL
452 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
454 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
456 #endif /* CONFIG_VFS_FUNNEL */
458 error
= (*mp
->mnt_op
->vfs_setattr
)(mp
, vfa
, ctx
);
460 #if CONFIG_VFS_FUNNEL
462 (void) thread_funnel_set(kernel_flock
, funnel_state
);
464 #endif /* CONFIG_VFS_FUNNEL */
470 VFS_SYNC(mount_t mp
, int flags
, vfs_context_t ctx
)
473 #if CONFIG_VFS_FUNNEL
475 int funnel_state
= 0;
476 #endif /* CONFIG_VFS_FUNNEL */
478 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_sync
== 0))
482 ctx
= vfs_context_current();
485 #if CONFIG_VFS_FUNNEL
486 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
488 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
490 #endif /* CONFIG_VFS_FUNNEL */
492 error
= (*mp
->mnt_op
->vfs_sync
)(mp
, flags
, ctx
);
494 #if CONFIG_VFS_FUNNEL
496 (void) thread_funnel_set(kernel_flock
, funnel_state
);
498 #endif /* CONFIG_VFS_FUNNEL */
504 VFS_VGET(mount_t mp
, ino64_t ino
, struct vnode
**vpp
, vfs_context_t ctx
)
507 #if CONFIG_VFS_FUNNEL
509 int funnel_state
= 0;
510 #endif /* CONFIG_VFS_FUNNEL */
512 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_vget
== 0))
516 ctx
= vfs_context_current();
519 #if CONFIG_VFS_FUNNEL
520 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
522 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
524 #endif /* CONFIG_VFS_FUNNEL */
526 error
= (*mp
->mnt_op
->vfs_vget
)(mp
, ino
, vpp
, ctx
);
528 #if CONFIG_VFS_FUNNEL
530 (void) thread_funnel_set(kernel_flock
, funnel_state
);
532 #endif /* CONFIG_VFS_FUNNEL */
538 VFS_FHTOVP(mount_t mp
, int fhlen
, unsigned char * fhp
, vnode_t
* vpp
, vfs_context_t ctx
)
541 #if CONFIG_VFS_FUNNEL
543 int funnel_state
= 0;
544 #endif /* CONFIG_VFS_FUNNEL */
546 if ((mp
== dead_mountp
) || (mp
->mnt_op
->vfs_fhtovp
== 0))
550 ctx
= vfs_context_current();
553 #if CONFIG_VFS_FUNNEL
554 thread_safe
= (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSTHREADSAFE
);
556 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
558 #endif /* CONFIG_VFS_FUNNEL */
560 error
= (*mp
->mnt_op
->vfs_fhtovp
)(mp
, fhlen
, fhp
, vpp
, ctx
);
562 #if CONFIG_VFS_FUNNEL
564 (void) thread_funnel_set(kernel_flock
, funnel_state
);
566 #endif /* CONFIG_VFS_FUNNEL */
572 VFS_VPTOFH(struct vnode
* vp
, int *fhlenp
, unsigned char * fhp
, vfs_context_t ctx
)
575 #if CONFIG_VFS_FUNNEL
577 int funnel_state
= 0;
578 #endif /* CONFIG_VFS_FUNNEL */
580 if ((vp
->v_mount
== dead_mountp
) || (vp
->v_mount
->mnt_op
->vfs_vptofh
== 0))
584 ctx
= vfs_context_current();
587 #if CONFIG_VFS_FUNNEL
588 thread_safe
= THREAD_SAFE_FS(vp
);
590 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
592 #endif /* CONFIG_VFS_FUNNEL */
594 error
= (*vp
->v_mount
->mnt_op
->vfs_vptofh
)(vp
, fhlenp
, fhp
, ctx
);
596 #if CONFIG_VFS_FUNNEL
598 (void) thread_funnel_set(kernel_flock
, funnel_state
);
600 #endif /* CONFIG_VFS_FUNNEL */
606 /* returns the cached throttle mask for the mount_t */
608 vfs_throttle_mask(mount_t mp
)
610 return(mp
->mnt_throttle_mask
);
613 /* returns a copy of vfs type name for the mount_t */
615 vfs_name(mount_t mp
, char * buffer
)
617 strncpy(buffer
, mp
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
620 /* returns vfs type number for the mount_t */
622 vfs_typenum(mount_t mp
)
624 return(mp
->mnt_vtable
->vfc_typenum
);
627 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
629 vfs_mntlabel(mount_t mp
)
631 return (void*)mp
->mnt_mntlabel
;
634 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
636 vfs_flags(mount_t mp
)
638 return((uint64_t)(mp
->mnt_flag
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
)));
641 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
643 vfs_setflags(mount_t mp
, uint64_t flags
)
645 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
648 mp
->mnt_flag
|= lflags
;
652 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
654 vfs_clearflags(mount_t mp
, uint64_t flags
)
656 uint32_t lflags
= (uint32_t)(flags
& (MNT_CMDFLAGS
| MNT_VISFLAGMASK
));
659 mp
->mnt_flag
&= ~lflags
;
663 /* Is the mount_t ronly and upgrade read/write requested? */
665 vfs_iswriteupgrade(mount_t mp
) /* ronly && MNTK_WANTRDWR */
667 return ((mp
->mnt_flag
& MNT_RDONLY
) && (mp
->mnt_kern_flag
& MNTK_WANTRDWR
));
671 /* Is the mount_t mounted ronly */
673 vfs_isrdonly(mount_t mp
)
675 return (mp
->mnt_flag
& MNT_RDONLY
);
678 /* Is the mount_t mounted for filesystem synchronous writes? */
680 vfs_issynchronous(mount_t mp
)
682 return (mp
->mnt_flag
& MNT_SYNCHRONOUS
);
685 /* Is the mount_t mounted read/write? */
687 vfs_isrdwr(mount_t mp
)
689 return ((mp
->mnt_flag
& MNT_RDONLY
) == 0);
693 /* Is mount_t marked for update (ie MNT_UPDATE) */
695 vfs_isupdate(mount_t mp
)
697 return (mp
->mnt_flag
& MNT_UPDATE
);
701 /* Is mount_t marked for reload (ie MNT_RELOAD) */
703 vfs_isreload(mount_t mp
)
705 return ((mp
->mnt_flag
& MNT_UPDATE
) && (mp
->mnt_flag
& MNT_RELOAD
));
708 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
710 vfs_isforce(mount_t mp
)
712 if ((mp
->mnt_lflag
& MNT_LFORCE
) || (mp
->mnt_kern_flag
& MNTK_FRCUNMOUNT
))
719 vfs_isunmount(mount_t mp
)
721 if ((mp
->mnt_lflag
& MNT_LUNMOUNT
)) {
729 vfs_64bitready(mount_t mp
)
731 if ((mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFS64BITREADY
))
739 vfs_authcache_ttl(mount_t mp
)
741 if ( (mp
->mnt_kern_flag
& (MNTK_AUTH_OPAQUE
| MNTK_AUTH_CACHE_TTL
)) )
742 return (mp
->mnt_authcache_ttl
);
744 return (CACHED_RIGHT_INFINITE_TTL
);
748 vfs_setauthcache_ttl(mount_t mp
, int ttl
)
751 mp
->mnt_kern_flag
|= MNTK_AUTH_CACHE_TTL
;
752 mp
->mnt_authcache_ttl
= ttl
;
757 vfs_clearauthcache_ttl(mount_t mp
)
760 mp
->mnt_kern_flag
&= ~MNTK_AUTH_CACHE_TTL
;
762 * back to the default TTL value in case
763 * MNTK_AUTH_OPAQUE is set on this mount
765 mp
->mnt_authcache_ttl
= CACHED_LOOKUP_RIGHT_TTL
;
770 vfs_markdependency(mount_t mp
)
772 proc_t p
= current_proc();
774 mp
->mnt_dependent_process
= p
;
775 mp
->mnt_dependent_pid
= proc_pid(p
);
781 vfs_authopaque(mount_t mp
)
783 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE
))
790 vfs_authopaqueaccess(mount_t mp
)
792 if ((mp
->mnt_kern_flag
& MNTK_AUTH_OPAQUE_ACCESS
))
799 vfs_setauthopaque(mount_t mp
)
802 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE
;
807 vfs_setauthopaqueaccess(mount_t mp
)
810 mp
->mnt_kern_flag
|= MNTK_AUTH_OPAQUE_ACCESS
;
815 vfs_clearauthopaque(mount_t mp
)
818 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE
;
823 vfs_clearauthopaqueaccess(mount_t mp
)
826 mp
->mnt_kern_flag
&= ~MNTK_AUTH_OPAQUE_ACCESS
;
831 vfs_setextendedsecurity(mount_t mp
)
834 mp
->mnt_kern_flag
|= MNTK_EXTENDED_SECURITY
;
839 vfs_clearextendedsecurity(mount_t mp
)
842 mp
->mnt_kern_flag
&= ~MNTK_EXTENDED_SECURITY
;
847 vfs_extendedsecurity(mount_t mp
)
849 return(mp
->mnt_kern_flag
& MNTK_EXTENDED_SECURITY
);
852 /* returns the max size of short symlink in this mount_t */
854 vfs_maxsymlen(mount_t mp
)
856 return(mp
->mnt_maxsymlinklen
);
859 /* set max size of short symlink on mount_t */
861 vfs_setmaxsymlen(mount_t mp
, uint32_t symlen
)
863 mp
->mnt_maxsymlinklen
= symlen
;
866 /* return a pointer to the RO vfs_statfs associated with mount_t */
868 vfs_statfs(mount_t mp
)
870 return(&mp
->mnt_vfsstat
);
874 vfs_getattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
878 if ((error
= VFS_GETATTR(mp
, vfa
, ctx
)) != 0)
882 * If we have a filesystem create time, use it to default some others.
884 if (VFSATTR_IS_SUPPORTED(vfa
, f_create_time
)) {
885 if (VFSATTR_IS_ACTIVE(vfa
, f_modify_time
) && !VFSATTR_IS_SUPPORTED(vfa
, f_modify_time
))
886 VFSATTR_RETURN(vfa
, f_modify_time
, vfa
->f_create_time
);
893 vfs_setattr(mount_t mp
, struct vfs_attr
*vfa
, vfs_context_t ctx
)
897 if (vfs_isrdonly(mp
))
900 error
= VFS_SETATTR(mp
, vfa
, ctx
);
903 * If we had alternate ways of setting vfs attributes, we'd
910 /* return the private data handle stored in mount_t */
912 vfs_fsprivate(mount_t mp
)
914 return(mp
->mnt_data
);
917 /* set the private data handle in mount_t */
919 vfs_setfsprivate(mount_t mp
, void *mntdata
)
922 mp
->mnt_data
= mntdata
;
928 * return the block size of the underlying
929 * device associated with mount_t
932 vfs_devblocksize(mount_t mp
) {
934 return(mp
->mnt_devblocksize
);
938 * Returns vnode with an iocount that must be released with vnode_put()
941 vfs_vnodecovered(mount_t mp
)
943 vnode_t vp
= mp
->mnt_vnodecovered
;
944 if ((vp
== NULL
) || (vnode_getwithref(vp
) != 0)) {
952 * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
953 * The iocount must be released with vnode_put(). Note that this KPI is subtle
954 * with respect to the validity of using this device vnode for anything substantial
955 * (which is discouraged). If commands are sent to the device driver without
956 * taking proper steps to ensure that the device is still open, chaos may ensue.
957 * Similarly, this routine should only be called if there is some guarantee that
958 * the mount itself is still valid.
961 vfs_devvp(mount_t mp
)
963 vnode_t vp
= mp
->mnt_devvp
;
965 if ((vp
!= NULLVP
) && (vnode_get(vp
) == 0)) {
973 * return the io attributes associated with mount_t
976 vfs_ioattr(mount_t mp
, struct vfsioattr
*ioattrp
)
979 ioattrp
->io_maxreadcnt
= MAXPHYS
;
980 ioattrp
->io_maxwritecnt
= MAXPHYS
;
981 ioattrp
->io_segreadcnt
= 32;
982 ioattrp
->io_segwritecnt
= 32;
983 ioattrp
->io_maxsegreadsize
= MAXPHYS
;
984 ioattrp
->io_maxsegwritesize
= MAXPHYS
;
985 ioattrp
->io_devblocksize
= DEV_BSIZE
;
986 ioattrp
->io_flags
= 0;
988 ioattrp
->io_maxreadcnt
= mp
->mnt_maxreadcnt
;
989 ioattrp
->io_maxwritecnt
= mp
->mnt_maxwritecnt
;
990 ioattrp
->io_segreadcnt
= mp
->mnt_segreadcnt
;
991 ioattrp
->io_segwritecnt
= mp
->mnt_segwritecnt
;
992 ioattrp
->io_maxsegreadsize
= mp
->mnt_maxsegreadsize
;
993 ioattrp
->io_maxsegwritesize
= mp
->mnt_maxsegwritesize
;
994 ioattrp
->io_devblocksize
= mp
->mnt_devblocksize
;
995 ioattrp
->io_flags
= mp
->mnt_ioflags
;
997 ioattrp
->io_reserved
[0] = NULL
;
998 ioattrp
->io_reserved
[1] = NULL
;
1003 * set the IO attributes associated with mount_t
1006 vfs_setioattr(mount_t mp
, struct vfsioattr
* ioattrp
)
1010 mp
->mnt_maxreadcnt
= ioattrp
->io_maxreadcnt
;
1011 mp
->mnt_maxwritecnt
= ioattrp
->io_maxwritecnt
;
1012 mp
->mnt_segreadcnt
= ioattrp
->io_segreadcnt
;
1013 mp
->mnt_segwritecnt
= ioattrp
->io_segwritecnt
;
1014 mp
->mnt_maxsegreadsize
= ioattrp
->io_maxsegreadsize
;
1015 mp
->mnt_maxsegwritesize
= ioattrp
->io_maxsegwritesize
;
1016 mp
->mnt_devblocksize
= ioattrp
->io_devblocksize
;
1017 mp
->mnt_ioflags
= ioattrp
->io_flags
;
1021 * Add a new filesystem into the kernel specified in passed in
1022 * vfstable structure. It fills in the vnode
1023 * dispatch vector that is to be passed to when vnodes are created.
1024 * It returns a handle which is to be used to when the FS is to be removed
1026 typedef int (*PFI
)(void *);
1027 extern int vfs_opv_numops
;
1029 vfs_fsadd(struct vfs_fsentry
*vfe
, vfstable_t
* handle
)
1031 struct vfstable
*newvfstbl
= NULL
;
1033 int (***opv_desc_vector_p
)(void *);
1034 int (**opv_desc_vector
)(void *);
1035 struct vnodeopv_entry_desc
*opve_descp
;
1041 * This routine is responsible for all the initialization that would
1042 * ordinarily be done as part of the system startup;
1045 if (vfe
== (struct vfs_fsentry
*)0)
1048 desccount
= vfe
->vfe_vopcnt
;
1049 if ((desccount
<=0) || ((desccount
> 8)) || (vfe
->vfe_vfsops
== (struct vfsops
*)NULL
)
1050 || (vfe
->vfe_opvdescs
== (struct vnodeopv_desc
**)NULL
))
1053 #if !CONFIG_VFS_FUNNEL
1054 /* Non-threadsafe filesystems are not supported e.g. on K64 & iOS */
1055 if ((vfe
->vfe_flags
& (VFS_TBLTHREADSAFE
| VFS_TBLFSNODELOCK
)) == 0) {
1058 #endif /* !CONFIG_VFS_FUNNEL */
1060 MALLOC(newvfstbl
, void *, sizeof(struct vfstable
), M_TEMP
,
1062 bzero(newvfstbl
, sizeof(struct vfstable
));
1063 newvfstbl
->vfc_vfsops
= vfe
->vfe_vfsops
;
1064 strncpy(&newvfstbl
->vfc_name
[0], vfe
->vfe_fsname
, MFSNAMELEN
);
1065 if ((vfe
->vfe_flags
& VFS_TBLNOTYPENUM
))
1066 newvfstbl
->vfc_typenum
= maxvfsconf
++;
1068 newvfstbl
->vfc_typenum
= vfe
->vfe_fstypenum
;
1070 newvfstbl
->vfc_refcount
= 0;
1071 newvfstbl
->vfc_flags
= 0;
1072 newvfstbl
->vfc_mountroot
= NULL
;
1073 newvfstbl
->vfc_next
= NULL
;
1074 newvfstbl
->vfc_vfsflags
= 0;
1075 if (vfe
->vfe_flags
& VFS_TBL64BITREADY
)
1076 newvfstbl
->vfc_vfsflags
|= VFC_VFS64BITREADY
;
1077 if (vfe
->vfe_flags
& VFS_TBLVNOP_PAGEINV2
)
1078 newvfstbl
->vfc_vfsflags
|= VFC_VFSVNOP_PAGEINV2
;
1079 if (vfe
->vfe_flags
& VFS_TBLVNOP_PAGEOUTV2
)
1080 newvfstbl
->vfc_vfsflags
|= VFC_VFSVNOP_PAGEOUTV2
;
1081 #if CONFIG_VFS_FUNNEL
1082 if (vfe
->vfe_flags
& VFS_TBLTHREADSAFE
)
1083 newvfstbl
->vfc_vfsflags
|= VFC_VFSTHREADSAFE
;
1084 if (vfe
->vfe_flags
& VFS_TBLFSNODELOCK
)
1085 newvfstbl
->vfc_vfsflags
|= VFC_VFSTHREADSAFE
;
1086 #endif /* CONFIG_VFS_FUNNEL */
1087 if ((vfe
->vfe_flags
& VFS_TBLLOCALVOL
) == VFS_TBLLOCALVOL
)
1088 newvfstbl
->vfc_flags
|= MNT_LOCAL
;
1089 if ((vfe
->vfe_flags
& VFS_TBLLOCALVOL
) && (vfe
->vfe_flags
& VFS_TBLGENERICMNTARGS
) == 0)
1090 newvfstbl
->vfc_vfsflags
|= VFC_VFSLOCALARGS
;
1092 newvfstbl
->vfc_vfsflags
|= VFC_VFSGENERICARGS
;
1094 if (vfe
->vfe_flags
& VFS_TBLNATIVEXATTR
)
1095 newvfstbl
->vfc_vfsflags
|= VFC_VFSNATIVEXATTR
;
1096 if (vfe
->vfe_flags
& VFS_TBLUNMOUNT_PREFLIGHT
)
1097 newvfstbl
->vfc_vfsflags
|= VFC_VFSPREFLIGHT
;
1098 if (vfe
->vfe_flags
& VFS_TBLREADDIR_EXTENDED
)
1099 newvfstbl
->vfc_vfsflags
|= VFC_VFSREADDIR_EXTENDED
;
1100 if (vfe
->vfe_flags
& VFS_TBLNOMACLABEL
)
1101 newvfstbl
->vfc_vfsflags
|= VFC_VFSNOMACLABEL
;
1104 * Allocate and init the vectors.
1105 * Also handle backwards compatibility.
1107 * We allocate one large block to hold all <desccount>
1108 * vnode operation vectors stored contiguously.
1110 /* XXX - shouldn't be M_TEMP */
1112 descsize
= desccount
* vfs_opv_numops
* sizeof(PFI
);
1113 MALLOC(descptr
, PFI
*, descsize
,
1115 bzero(descptr
, descsize
);
1117 newvfstbl
->vfc_descptr
= descptr
;
1118 newvfstbl
->vfc_descsize
= descsize
;
1121 for (i
= 0; i
< desccount
; i
++ ) {
1122 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
1124 * Fill in the caller's pointer to the start of the i'th vector.
1125 * They'll need to supply it when calling vnode_create.
1127 opv_desc_vector
= descptr
+ i
* vfs_opv_numops
;
1128 *opv_desc_vector_p
= opv_desc_vector
;
1130 for (j
= 0; vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
].opve_op
; j
++) {
1131 opve_descp
= &(vfe
->vfe_opvdescs
[i
]->opv_desc_ops
[j
]);
1134 * Sanity check: is this operation listed
1135 * in the list of operations? We check this
1136 * by seeing if its offset is zero. Since
1137 * the default routine should always be listed
1138 * first, it should be the only one with a zero
1139 * offset. Any other operation with a zero
1140 * offset is probably not listed in
1141 * vfs_op_descs, and so is probably an error.
1143 * A panic here means the layer programmer
1144 * has committed the all-too common bug
1145 * of adding a new operation to the layer's
1146 * list of vnode operations but
1147 * not adding the operation to the system-wide
1148 * list of supported operations.
1150 if (opve_descp
->opve_op
->vdesc_offset
== 0 &&
1151 opve_descp
->opve_op
->vdesc_offset
!= VOFFSET(vnop_default
)) {
1152 printf("vfs_fsadd: operation %s not listed in %s.\n",
1153 opve_descp
->opve_op
->vdesc_name
,
1155 panic("vfs_fsadd: bad operation");
1158 * Fill in this entry.
1160 opv_desc_vector
[opve_descp
->opve_op
->vdesc_offset
] =
1161 opve_descp
->opve_impl
;
1166 * Finally, go back and replace unfilled routines
1167 * with their default. (Sigh, an O(n^3) algorithm. I
1168 * could make it better, but that'd be work, and n is small.)
1170 opv_desc_vector_p
= vfe
->vfe_opvdescs
[i
]->opv_desc_vector_p
;
1173 * Force every operations vector to have a default routine.
1175 opv_desc_vector
= *opv_desc_vector_p
;
1176 if (opv_desc_vector
[VOFFSET(vnop_default
)] == NULL
)
1177 panic("vfs_fsadd: operation vector without default routine.");
1178 for (j
= 0; j
< vfs_opv_numops
; j
++)
1179 if (opv_desc_vector
[j
] == NULL
)
1180 opv_desc_vector
[j
] =
1181 opv_desc_vector
[VOFFSET(vnop_default
)];
1183 } /* end of each vnodeopv_desc parsing */
1187 *handle
= vfstable_add(newvfstbl
);
1189 if (newvfstbl
->vfc_typenum
<= maxvfsconf
)
1190 maxvfsconf
= newvfstbl
->vfc_typenum
+ 1;
1192 if (newvfstbl
->vfc_vfsops
->vfs_init
) {
1193 struct vfsconf vfsc
;
1194 bzero(&vfsc
, sizeof(struct vfsconf
));
1195 vfsc
.vfc_reserved1
= 0;
1196 bcopy((*handle
)->vfc_name
, vfsc
.vfc_name
, sizeof(vfsc
.vfc_name
));
1197 vfsc
.vfc_typenum
= (*handle
)->vfc_typenum
;
1198 vfsc
.vfc_refcount
= (*handle
)->vfc_refcount
;
1199 vfsc
.vfc_flags
= (*handle
)->vfc_flags
;
1200 vfsc
.vfc_reserved2
= 0;
1201 vfsc
.vfc_reserved3
= 0;
1203 (*newvfstbl
->vfc_vfsops
->vfs_init
)(&vfsc
);
1206 FREE(newvfstbl
, M_TEMP
);
1212 * Removes the filesystem from kernel.
1213 * The argument passed in is the handle that was given when
1214 * file system was added
1217 vfs_fsremove(vfstable_t handle
)
1219 struct vfstable
* vfstbl
= (struct vfstable
*)handle
;
1220 void *old_desc
= NULL
;
1223 /* Preflight check for any mounts */
1225 if ( vfstbl
->vfc_refcount
!= 0 ) {
1226 mount_list_unlock();
1231 * save the old descriptor; the free cannot occur unconditionally,
1232 * since vfstable_del() may fail.
1234 if (vfstbl
->vfc_descptr
&& vfstbl
->vfc_descsize
) {
1235 old_desc
= vfstbl
->vfc_descptr
;
1237 err
= vfstable_del(vfstbl
);
1239 mount_list_unlock();
1241 /* free the descriptor if the delete was successful */
1242 if (err
== 0 && old_desc
) {
1243 FREE(old_desc
, M_TEMP
);
1250 vfs_context_pid(vfs_context_t ctx
)
1252 return (proc_pid(vfs_context_proc(ctx
)));
1256 vfs_context_suser(vfs_context_t ctx
)
1258 return (suser(ctx
->vc_ucred
, NULL
));
1262 * Return bit field of signals posted to all threads in the context's process.
1264 * XXX Signals should be tied to threads, not processes, for most uses of this
1268 vfs_context_issignal(vfs_context_t ctx
, sigset_t mask
)
1270 proc_t p
= vfs_context_proc(ctx
);
1272 return(proc_pendingsignals(p
, mask
));
1277 vfs_context_is64bit(vfs_context_t ctx
)
1279 proc_t proc
= vfs_context_proc(ctx
);
1282 return(proc_is64bit(proc
));
1290 * Description: Given a vfs_context_t, return the proc_t associated with it.
1292 * Parameters: vfs_context_t The context to use
1294 * Returns: proc_t The process for this context
1296 * Notes: This function will return the current_proc() if any of the
1297 * following conditions are true:
1299 * o The supplied context pointer is NULL
1300 * o There is no Mach thread associated with the context
1301 * o There is no Mach task associated with the Mach thread
1302 * o There is no proc_t associated with the Mach task
1303 * o The proc_t has no per process open file table
1304 * o The proc_t is post-vfork()
1306 * This causes this function to return a value matching as
1307 * closely as possible the previous behaviour, while at the
1308 * same time avoiding the task lending that results from vfork()
1311 vfs_context_proc(vfs_context_t ctx
)
1315 if (ctx
!= NULL
&& ctx
->vc_thread
!= NULL
)
1316 proc
= (proc_t
)get_bsdthreadtask_info(ctx
->vc_thread
);
1317 if (proc
!= NULL
&& (proc
->p_fd
== NULL
|| (proc
->p_lflag
& P_LVFORK
)))
1320 return(proc
== NULL
? current_proc() : proc
);
1324 * vfs_context_get_special_port
1326 * Description: Return the requested special port from the task associated
1327 * with the given context.
1329 * Parameters: vfs_context_t The context to use
1330 * int Index of special port
1331 * ipc_port_t * Pointer to returned port
1333 * Returns: kern_return_t see task_get_special_port()
1336 vfs_context_get_special_port(vfs_context_t ctx
, int which
, ipc_port_t
*portp
)
1340 if (ctx
!= NULL
&& ctx
->vc_thread
!= NULL
)
1341 task
= get_threadtask(ctx
->vc_thread
);
1343 return task_get_special_port(task
, which
, portp
);
1347 * vfs_context_set_special_port
1349 * Description: Set the requested special port in the task associated
1350 * with the given context.
1352 * Parameters: vfs_context_t The context to use
1353 * int Index of special port
1354 * ipc_port_t New special port
1356 * Returns: kern_return_t see task_set_special_port()
1359 vfs_context_set_special_port(vfs_context_t ctx
, int which
, ipc_port_t port
)
1363 if (ctx
!= NULL
&& ctx
->vc_thread
!= NULL
)
1364 task
= get_threadtask(ctx
->vc_thread
);
1366 return task_set_special_port(task
, which
, port
);
1370 * vfs_context_thread
1372 * Description: Return the Mach thread associated with a vfs_context_t
1374 * Parameters: vfs_context_t The context to use
1376 * Returns: thread_t The thread for this context, or
1377 * NULL, if there is not one.
1379 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1380 * as a result of a static vfs_context_t declaration in a function
1381 * and will result in this function returning NULL.
1383 * This is intentional; this function should NOT return the
1384 * current_thread() in this case.
1387 vfs_context_thread(vfs_context_t ctx
)
1389 return(ctx
->vc_thread
);
1396 * Description: Returns a reference on the vnode for the current working
1397 * directory for the supplied context
1399 * Parameters: vfs_context_t The context to use
1401 * Returns: vnode_t The current working directory
1404 * Notes: The function first attempts to obtain the current directory
1405 * from the thread, and if it is not present there, falls back
1406 * to obtaining it from the process instead. If it can't be
1407 * obtained from either place, we return NULLVP.
1410 vfs_context_cwd(vfs_context_t ctx
)
1412 vnode_t cwd
= NULLVP
;
1414 if(ctx
!= NULL
&& ctx
->vc_thread
!= NULL
) {
1415 uthread_t uth
= get_bsdthread_info(ctx
->vc_thread
);
1419 * Get the cwd from the thread; if there isn't one, get it
1420 * from the process, instead.
1422 if ((cwd
= uth
->uu_cdir
) == NULLVP
&&
1423 (proc
= (proc_t
)get_bsdthreadtask_info(ctx
->vc_thread
)) != NULL
&&
1425 cwd
= proc
->p_fd
->fd_cdir
;
1432 * vfs_context_create
1434 * Description: Allocate and initialize a new context.
1436 * Parameters: vfs_context_t: Context to copy, or NULL for new
1438 * Returns: Pointer to new context
1440 * Notes: Copy cred and thread from argument, if available; else
1441 * initialize with current thread and new cred. Returns
1442 * with a reference held on the credential.
1445 vfs_context_create(vfs_context_t ctx
)
1447 vfs_context_t newcontext
;
1449 newcontext
= (vfs_context_t
)kalloc(sizeof(struct vfs_context
));
1452 kauth_cred_t safecred
;
1454 newcontext
->vc_thread
= ctx
->vc_thread
;
1455 safecred
= ctx
->vc_ucred
;
1457 newcontext
->vc_thread
= current_thread();
1458 safecred
= kauth_cred_get();
1460 if (IS_VALID_CRED(safecred
))
1461 kauth_cred_ref(safecred
);
1462 newcontext
->vc_ucred
= safecred
;
1470 vfs_context_current(void)
1472 vfs_context_t ctx
= NULL
;
1473 volatile uthread_t ut
= (uthread_t
)get_bsdthread_info(current_thread());
1476 if (ut
->uu_context
.vc_ucred
!= NULL
) {
1477 ctx
= &ut
->uu_context
;
1481 return(ctx
== NULL
? vfs_context_kernel() : ctx
);
1488 * Dangerous hack - adopt the first kernel thread as the current thread, to
1489 * get to the vfs_context_t in the uthread associated with a kernel thread.
1490 * This is used by UDF to make the call into IOCDMediaBSDClient,
1491 * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the
1492 * ioctl() is being called from kernel or user space (and all this because
1493 * we do not pass threads into our ioctl()'s, instead of processes).
1495 * This is also used by imageboot_setup(), called early from bsd_init() after
1496 * kernproc has been given a credential.
1498 * Note: The use of proc_thread() here is a convenience to avoid inclusion
1499 * of many Mach headers to do the reference directly rather than indirectly;
1500 * we will need to forego this convenience when we reture proc_thread().
1502 static struct vfs_context kerncontext
;
1504 vfs_context_kernel(void)
1506 if (kerncontext
.vc_ucred
== NOCRED
)
1507 kerncontext
.vc_ucred
= kernproc
->p_ucred
;
1508 if (kerncontext
.vc_thread
== NULL
)
1509 kerncontext
.vc_thread
= proc_thread(kernproc
);
1511 return(&kerncontext
);
1516 vfs_context_rele(vfs_context_t ctx
)
1519 if (IS_VALID_CRED(ctx
->vc_ucred
))
1520 kauth_cred_unref(&ctx
->vc_ucred
);
1521 kfree(ctx
, sizeof(struct vfs_context
));
1528 vfs_context_ucred(vfs_context_t ctx
)
1530 return (ctx
->vc_ucred
);
1534 * Return true if the context is owned by the superuser.
1537 vfs_context_issuser(vfs_context_t ctx
)
1539 return(kauth_cred_issuser(vfs_context_ucred(ctx
)));
1543 * Given a context, for all fields of vfs_context_t which
1544 * are not held with a reference, set those fields to the
1545 * values for the current execution context. Currently, this
1546 * just means the vc_thread.
1548 * Returns: 0 for success, nonzero for failure
1550 * The intended use is:
1551 * 1. vfs_context_create() gets the caller a context
1552 * 2. vfs_context_bind() sets the unrefcounted data
1553 * 3. vfs_context_rele() releases the context
1557 vfs_context_bind(vfs_context_t ctx
)
1559 ctx
->vc_thread
= current_thread();
1563 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1567 * Convert between vnode types and inode formats (since POSIX.1
1568 * defines mode word of stat structure in terms of inode formats).
1571 vnode_iftovt(int mode
)
1573 return(iftovt_tab
[((mode
) & S_IFMT
) >> 12]);
1577 vnode_vttoif(enum vtype indx
)
1579 return(vttoif_tab
[(int)(indx
)]);
1583 vnode_makeimode(int indx
, int mode
)
1585 return (int)(VTTOIF(indx
) | (mode
));
1590 * vnode manipulation functions.
1593 /* returns system root vnode iocount; It should be released using vnode_put() */
1599 error
= vnode_get(rootvnode
);
1601 return ((vnode_t
)0);
1608 vnode_vid(vnode_t vp
)
1610 return ((uint32_t)(vp
->v_id
));
1614 vnode_mount(vnode_t vp
)
1616 return (vp
->v_mount
);
1620 vnode_mountedhere(vnode_t vp
)
1624 if ((vp
->v_type
== VDIR
) && ((mp
= vp
->v_mountedhere
) != NULL
) &&
1625 (mp
->mnt_vnodecovered
== vp
))
1628 return (mount_t
)NULL
;
1631 /* returns vnode type of vnode_t */
1633 vnode_vtype(vnode_t vp
)
1635 return (vp
->v_type
);
1638 /* returns FS specific node saved in vnode */
1640 vnode_fsnode(vnode_t vp
)
1642 return (vp
->v_data
);
1646 vnode_clearfsnode(vnode_t vp
)
1652 vnode_specrdev(vnode_t vp
)
1658 /* Accessor functions */
1659 /* is vnode_t a root vnode */
1661 vnode_isvroot(vnode_t vp
)
1663 return ((vp
->v_flag
& VROOT
)? 1 : 0);
1666 /* is vnode_t a system vnode */
1668 vnode_issystem(vnode_t vp
)
1670 return ((vp
->v_flag
& VSYSTEM
)? 1 : 0);
1673 /* is vnode_t a swap file vnode */
1675 vnode_isswap(vnode_t vp
)
1677 return ((vp
->v_flag
& VSWAP
)? 1 : 0);
1680 /* is vnode_t a tty */
1682 vnode_istty(vnode_t vp
)
1684 return ((vp
->v_flag
& VISTTY
) ? 1 : 0);
1687 /* if vnode_t mount operation in progress */
1689 vnode_ismount(vnode_t vp
)
1691 return ((vp
->v_flag
& VMOUNT
)? 1 : 0);
1694 /* is this vnode under recyle now */
1696 vnode_isrecycled(vnode_t vp
)
1700 vnode_lock_spin(vp
);
1701 ret
= (vp
->v_lflag
& (VL_TERMINATE
|VL_DEAD
))? 1 : 0;
1706 /* vnode was created by background task requesting rapid aging
1707 and has not since been referenced by a normal task */
1709 vnode_israge(vnode_t vp
)
1711 return ((vp
->v_flag
& VRAGE
)? 1 : 0);
1715 vnode_needssnapshots(vnode_t vp
)
1717 return ((vp
->v_flag
& VNEEDSSNAPSHOT
)? 1 : 0);
1721 /* Check the process/thread to see if we should skip atime updates */
1723 vfs_ctx_skipatime (vfs_context_t ctx
) {
1728 proc
= vfs_context_proc(ctx
);
1729 thr
= vfs_context_thread (ctx
);
1731 /* Validate pointers in case we were invoked via a kernel context */
1733 ut
= get_bsdthread_info (thr
);
1735 if (proc
->p_lflag
& P_LRAGE_VNODES
) {
1740 if (ut
->uu_flag
& UT_RAGE_VNODES
) {
1748 /* is vnode_t marked to not keep data cached once it's been consumed */
1750 vnode_isnocache(vnode_t vp
)
1752 return ((vp
->v_flag
& VNOCACHE_DATA
)? 1 : 0);
1756 * has sequential readahead been disabled on this vnode
1759 vnode_isnoreadahead(vnode_t vp
)
1761 return ((vp
->v_flag
& VRAOFF
)? 1 : 0);
1765 vnode_is_openevt(vnode_t vp
)
1767 return ((vp
->v_flag
& VOPENEVT
)? 1 : 0);
1770 /* is vnode_t a standard one? */
1772 vnode_isstandard(vnode_t vp
)
1774 return ((vp
->v_flag
& VSTANDARD
)? 1 : 0);
1777 /* don't vflush() if SKIPSYSTEM */
1779 vnode_isnoflush(vnode_t vp
)
1781 return ((vp
->v_flag
& VNOFLUSH
)? 1 : 0);
1784 /* is vnode_t a regular file */
1786 vnode_isreg(vnode_t vp
)
1788 return ((vp
->v_type
== VREG
)? 1 : 0);
1791 /* is vnode_t a directory? */
1793 vnode_isdir(vnode_t vp
)
1795 return ((vp
->v_type
== VDIR
)? 1 : 0);
1798 /* is vnode_t a symbolic link ? */
1800 vnode_islnk(vnode_t vp
)
1802 return ((vp
->v_type
== VLNK
)? 1 : 0);
1806 vnode_lookup_continue_needed(vnode_t vp
, struct componentname
*cnp
)
1808 struct nameidata
*ndp
= cnp
->cn_ndp
;
1811 panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL\n");
1814 if (vnode_isdir(vp
)) {
1815 if (vp
->v_mountedhere
!= NULL
) {
1820 if (vp
->v_resolve
) {
1823 #endif /* CONFIG_TRIGGERS */
1828 if (vnode_islnk(vp
)) {
1829 /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
1830 if (cnp
->cn_flags
& FOLLOW
) {
1833 if (ndp
->ni_flag
& NAMEI_TRAILINGSLASH
) {
1841 ndp
->ni_flag
|= NAMEI_CONTLOOKUP
;
1842 return EKEEPLOOKING
;
1845 /* is vnode_t a fifo ? */
1847 vnode_isfifo(vnode_t vp
)
1849 return ((vp
->v_type
== VFIFO
)? 1 : 0);
1852 /* is vnode_t a block device? */
1854 vnode_isblk(vnode_t vp
)
1856 return ((vp
->v_type
== VBLK
)? 1 : 0);
1860 vnode_isspec(vnode_t vp
)
1862 return (((vp
->v_type
== VCHR
) || (vp
->v_type
== VBLK
)) ? 1 : 0);
1865 /* is vnode_t a char device? */
1867 vnode_ischr(vnode_t vp
)
1869 return ((vp
->v_type
== VCHR
)? 1 : 0);
1872 /* is vnode_t a socket? */
1874 vnode_issock(vnode_t vp
)
1876 return ((vp
->v_type
== VSOCK
)? 1 : 0);
1879 /* is vnode_t a device with multiple active vnodes referring to it? */
1881 vnode_isaliased(vnode_t vp
)
1883 enum vtype vt
= vp
->v_type
;
1884 if (!((vt
== VCHR
) || (vt
== VBLK
))) {
1887 return (vp
->v_specflags
& SI_ALIASED
);
1891 /* is vnode_t a named stream? */
1893 vnode_isnamedstream(
1902 return ((vp
->v_flag
& VISNAMEDSTREAM
) ? 1 : 0);
1918 return ((vp
->v_flag
& VISSHADOW
) ? 1 : 0);
1924 /* does vnode have associated named stream vnodes ? */
1926 vnode_hasnamedstreams(
1935 return ((vp
->v_lflag
& VL_HASSTREAMS
) ? 1 : 0);
1940 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
1942 vnode_setnocache(vnode_t vp
)
1944 vnode_lock_spin(vp
);
1945 vp
->v_flag
|= VNOCACHE_DATA
;
1950 vnode_clearnocache(vnode_t vp
)
1952 vnode_lock_spin(vp
);
1953 vp
->v_flag
&= ~VNOCACHE_DATA
;
1958 vnode_set_openevt(vnode_t vp
)
1960 vnode_lock_spin(vp
);
1961 vp
->v_flag
|= VOPENEVT
;
1966 vnode_clear_openevt(vnode_t vp
)
1968 vnode_lock_spin(vp
);
1969 vp
->v_flag
&= ~VOPENEVT
;
1975 vnode_setnoreadahead(vnode_t vp
)
1977 vnode_lock_spin(vp
);
1978 vp
->v_flag
|= VRAOFF
;
1983 vnode_clearnoreadahead(vnode_t vp
)
1985 vnode_lock_spin(vp
);
1986 vp
->v_flag
&= ~VRAOFF
;
1991 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
1993 vnode_setnoflush(vnode_t vp
)
1995 vnode_lock_spin(vp
);
1996 vp
->v_flag
|= VNOFLUSH
;
2001 vnode_clearnoflush(vnode_t vp
)
2003 vnode_lock_spin(vp
);
2004 vp
->v_flag
&= ~VNOFLUSH
;
2009 /* is vnode_t a blkdevice and has a FS mounted on it */
2011 vnode_ismountedon(vnode_t vp
)
2013 return ((vp
->v_specflags
& SI_MOUNTEDON
)? 1 : 0);
2017 vnode_setmountedon(vnode_t vp
)
2019 vnode_lock_spin(vp
);
2020 vp
->v_specflags
|= SI_MOUNTEDON
;
2025 vnode_clearmountedon(vnode_t vp
)
2027 vnode_lock_spin(vp
);
2028 vp
->v_specflags
&= ~SI_MOUNTEDON
;
2034 vnode_settag(vnode_t vp
, int tag
)
2041 vnode_tag(vnode_t vp
)
2047 vnode_parent(vnode_t vp
)
2050 return(vp
->v_parent
);
2054 vnode_setparent(vnode_t vp
, vnode_t dvp
)
2060 vnode_name(vnode_t vp
)
2062 /* we try to keep v_name a reasonable name for the node */
2067 vnode_setname(vnode_t vp
, char * name
)
2072 /* return the registered FS name when adding the FS to kernel */
2074 vnode_vfsname(vnode_t vp
, char * buf
)
2076 strncpy(buf
, vp
->v_mount
->mnt_vtable
->vfc_name
, MFSNAMELEN
);
2079 /* return the FS type number */
2081 vnode_vfstypenum(vnode_t vp
)
2083 return(vp
->v_mount
->mnt_vtable
->vfc_typenum
);
2087 vnode_vfs64bitready(vnode_t vp
)
2091 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
2093 if ((vp
->v_mount
!= dead_mountp
) && (vp
->v_mount
->mnt_vtable
->vfc_vfsflags
& VFC_VFS64BITREADY
))
2101 /* return the visible flags on associated mount point of vnode_t */
2103 vnode_vfsvisflags(vnode_t vp
)
2105 return(vp
->v_mount
->mnt_flag
& MNT_VISFLAGMASK
);
2108 /* return the command modifier flags on associated mount point of vnode_t */
2110 vnode_vfscmdflags(vnode_t vp
)
2112 return(vp
->v_mount
->mnt_flag
& MNT_CMDFLAGS
);
2115 /* return the max symlink of short links of vnode_t */
2117 vnode_vfsmaxsymlen(vnode_t vp
)
2119 return(vp
->v_mount
->mnt_maxsymlinklen
);
2122 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2124 vnode_vfsstatfs(vnode_t vp
)
2126 return(&vp
->v_mount
->mnt_vfsstat
);
2129 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2131 vnode_vfsfsprivate(vnode_t vp
)
2133 return(vp
->v_mount
->mnt_data
);
2136 /* is vnode_t in a rdonly mounted FS */
2138 vnode_vfsisrdonly(vnode_t vp
)
2140 return ((vp
->v_mount
->mnt_flag
& MNT_RDONLY
)? 1 : 0);
2144 vnode_compound_rename_available(vnode_t vp
)
2146 return vnode_compound_op_available(vp
, COMPOUND_VNOP_RENAME
);
2149 vnode_compound_rmdir_available(vnode_t vp
)
2151 return vnode_compound_op_available(vp
, COMPOUND_VNOP_RMDIR
);
2154 vnode_compound_mkdir_available(vnode_t vp
)
2156 return vnode_compound_op_available(vp
, COMPOUND_VNOP_MKDIR
);
2159 vnode_compound_remove_available(vnode_t vp
)
2161 return vnode_compound_op_available(vp
, COMPOUND_VNOP_REMOVE
);
2164 vnode_compound_open_available(vnode_t vp
)
2166 return vnode_compound_op_available(vp
, COMPOUND_VNOP_OPEN
);
2170 vnode_compound_op_available(vnode_t vp
, compound_vnop_id_t opid
)
2172 return ((vp
->v_mount
->mnt_compound_ops
& opid
) != 0);
2176 * Returns vnode ref to current working directory; if a per-thread current
2177 * working directory is in effect, return that instead of the per process one.
2179 * XXX Published, but not used.
2182 current_workingdir(void)
2184 return vfs_context_cwd(vfs_context_current());
2187 /* returns vnode ref to current root(chroot) directory */
2189 current_rootdir(void)
2191 proc_t proc
= current_proc();
2194 if ( (vp
= proc
->p_fd
->fd_rdir
) ) {
2195 if ( (vnode_getwithref(vp
)) )
2202 * Get a filesec and optional acl contents from an extended attribute.
2203 * Function will attempt to retrive ACL, UUID, and GUID information using a
2204 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2206 * Parameters: vp The vnode on which to operate.
2207 * fsecp The filesec (and ACL, if any) being
2209 * ctx The vnode context in which the
2210 * operation is to be attempted.
2212 * Returns: 0 Success
2215 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
2216 * host byte order, as will be the ACL contents, if any.
2217 * Internally, we will cannonize these values from network (PPC)
2218 * byte order after we retrieve them so that the on-disk contents
2219 * of the extended attribute are identical for both PPC and Intel
2220 * (if we were not being required to provide this service via
2221 * fallback, this would be the job of the filesystem
2222 * 'VNOP_GETATTR' call).
2224 * We use ntohl() because it has a transitive property on Intel
2225 * machines and no effect on PPC mancines. This guarantees us
2227 * XXX: Deleting rather than ignoreing a corrupt security structure is
2228 * probably the only way to reset it without assistance from an
2229 * file system integrity checking tool. Right now we ignore it.
2231 * XXX: We should enummerate the possible errno values here, and where
2232 * in the code they originated.
2235 vnode_get_filesec(vnode_t vp
, kauth_filesec_t
*fsecp
, vfs_context_t ctx
)
2237 kauth_filesec_t fsec
;
2240 size_t xsize
, rsize
;
2242 uint32_t host_fsec_magic
;
2243 uint32_t host_acl_entrycount
;
2249 /* find out how big the EA is */
2250 if (vn_getxattr(vp
, KAUTH_FILESEC_XATTR
, NULL
, &xsize
, XATTR_NOSECURITY
, ctx
) != 0) {
2251 /* no EA, no filesec */
2252 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
2254 /* either way, we are done */
2259 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2260 * ACE entrly ACL, and if it's larger than that, it must have the right
2261 * number of bytes such that it contains an atomic number of ACEs,
2262 * rather than partial entries. Otherwise, we ignore it.
2264 if (!KAUTH_FILESEC_VALID(xsize
)) {
2265 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize
);
2270 /* how many entries would fit? */
2271 fsec_size
= KAUTH_FILESEC_COUNT(xsize
);
2273 /* get buffer and uio */
2274 if (((fsec
= kauth_filesec_alloc(fsec_size
)) == NULL
) ||
2275 ((fsec_uio
= uio_create(1, 0, UIO_SYSSPACE
, UIO_READ
)) == NULL
) ||
2276 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), xsize
)) {
2277 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2282 /* read security attribute */
2284 if ((error
= vn_getxattr(vp
,
2285 KAUTH_FILESEC_XATTR
,
2291 /* no attribute - no security data */
2292 if ((error
== ENOATTR
) || (error
== ENOENT
) || (error
== EJUSTRETURN
))
2294 /* either way, we are done */
2299 * Validate security structure; the validation must take place in host
2300 * byte order. If it's corrupt, we will just ignore it.
2303 /* Validate the size before trying to convert it */
2304 if (rsize
< KAUTH_FILESEC_SIZE(0)) {
2305 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize
);
2309 /* Validate the magic number before trying to convert it */
2310 host_fsec_magic
= ntohl(KAUTH_FILESEC_MAGIC
);
2311 if (fsec
->fsec_magic
!= host_fsec_magic
) {
2312 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic
);
2316 /* Validate the entry count before trying to convert it. */
2317 host_acl_entrycount
= ntohl(fsec
->fsec_acl
.acl_entrycount
);
2318 if (host_acl_entrycount
!= KAUTH_FILESEC_NOACL
) {
2319 if (host_acl_entrycount
> KAUTH_ACL_MAX_ENTRIES
) {
2320 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount
);
2323 if (KAUTH_FILESEC_SIZE(host_acl_entrycount
) > rsize
) {
2324 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount
, rsize
);
2329 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST
, fsec
, NULL
);
2336 kauth_filesec_free(fsec
);
2337 if (fsec_uio
!= NULL
)
2345 * Set a filesec and optional acl contents into an extended attribute.
2346 * function will attempt to store ACL, UUID, and GUID information using a
2347 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2348 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2349 * original caller supplied an acl.
2351 * Parameters: vp The vnode on which to operate.
2352 * fsec The filesec being set.
2353 * acl The acl to be associated with 'fsec'.
2354 * ctx The vnode context in which the
2355 * operation is to be attempted.
2357 * Returns: 0 Success
2360 * Notes: Both the fsec and the acl are always valid.
2362 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2363 * as are the acl contents, if they are used. Internally, we will
2364 * cannonize these values into network (PPC) byte order before we
2365 * attempt to write them so that the on-disk contents of the
2366 * extended attribute are identical for both PPC and Intel (if we
2367 * were not being required to provide this service via fallback,
2368 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2369 * We reverse this process on the way out, so we leave with the
2370 * same byte order we started with.
2372 * XXX: We should enummerate the possible errno values here, and where
2373 * in the code they originated.
2376 vnode_set_filesec(vnode_t vp
, kauth_filesec_t fsec
, kauth_acl_t acl
, vfs_context_t ctx
)
2380 uint32_t saved_acl_copysize
;
2384 if ((fsec_uio
= uio_create(2, 0, UIO_SYSSPACE
, UIO_WRITE
)) == NULL
) {
2385 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2390 * Save the pre-converted ACL copysize, because it gets swapped too
2391 * if we are running with the wrong endianness.
2393 saved_acl_copysize
= KAUTH_ACL_COPYSIZE(acl
);
2395 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK
, fsec
, acl
);
2397 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(fsec
), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL
));
2398 uio_addiov(fsec_uio
, CAST_USER_ADDR_T(acl
), saved_acl_copysize
);
2399 error
= vn_setxattr(vp
,
2400 KAUTH_FILESEC_XATTR
,
2402 XATTR_NOSECURITY
, /* we have auth'ed already */
2404 VFS_DEBUG(ctx
, vp
, "SETATTR - set ACL returning %d", error
);
2406 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST
, fsec
, acl
);
2409 if (fsec_uio
!= NULL
)
2416 * Returns: 0 Success
2417 * ENOMEM Not enough space [only if has filesec]
2419 * vnode_get_filesec: ???
2420 * kauth_cred_guid2uid: ???
2421 * kauth_cred_guid2gid: ???
2422 * vfs_update_vfsstat: ???
2425 vnode_getattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2427 kauth_filesec_t fsec
;
2433 /* don't ask for extended security data if the filesystem doesn't support it */
2434 if (!vfs_extendedsecurity(vnode_mount(vp
))) {
2435 VATTR_CLEAR_ACTIVE(vap
, va_acl
);
2436 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
2437 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
2441 * If the caller wants size values we might have to synthesise, give the
2442 * filesystem the opportunity to supply better intermediate results.
2444 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
2445 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
2446 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
2447 VATTR_SET_ACTIVE(vap
, va_data_size
);
2448 VATTR_SET_ACTIVE(vap
, va_data_alloc
);
2449 VATTR_SET_ACTIVE(vap
, va_total_size
);
2450 VATTR_SET_ACTIVE(vap
, va_total_alloc
);
2453 error
= VNOP_GETATTR(vp
, vap
, ctx
);
2455 KAUTH_DEBUG("ERROR - returning %d", error
);
2460 * If extended security data was requested but not returned, try the fallback
2463 if (VATTR_NOT_RETURNED(vap
, va_acl
) || VATTR_NOT_RETURNED(vap
, va_uuuid
) || VATTR_NOT_RETURNED(vap
, va_guuid
)) {
2466 if ((vp
->v_type
== VDIR
) || (vp
->v_type
== VLNK
) || (vp
->v_type
== VREG
)) {
2467 /* try to get the filesec */
2468 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0)
2471 /* if no filesec, no attributes */
2473 VATTR_RETURN(vap
, va_acl
, NULL
);
2474 VATTR_RETURN(vap
, va_uuuid
, kauth_null_guid
);
2475 VATTR_RETURN(vap
, va_guuid
, kauth_null_guid
);
2478 /* looks good, try to return what we were asked for */
2479 VATTR_RETURN(vap
, va_uuuid
, fsec
->fsec_owner
);
2480 VATTR_RETURN(vap
, va_guuid
, fsec
->fsec_group
);
2482 /* only return the ACL if we were actually asked for it */
2483 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
2484 if (fsec
->fsec_acl
.acl_entrycount
== KAUTH_FILESEC_NOACL
) {
2485 VATTR_RETURN(vap
, va_acl
, NULL
);
2487 facl
= kauth_acl_alloc(fsec
->fsec_acl
.acl_entrycount
);
2489 kauth_filesec_free(fsec
);
2493 bcopy(&fsec
->fsec_acl
, facl
, KAUTH_ACL_COPYSIZE(&fsec
->fsec_acl
));
2494 VATTR_RETURN(vap
, va_acl
, facl
);
2497 kauth_filesec_free(fsec
);
2501 * If someone gave us an unsolicited filesec, toss it. We promise that
2502 * we're OK with a filesystem giving us anything back, but our callers
2503 * only expect what they asked for.
2505 if (VATTR_IS_SUPPORTED(vap
, va_acl
) && !VATTR_IS_ACTIVE(vap
, va_acl
)) {
2506 if (vap
->va_acl
!= NULL
)
2507 kauth_acl_free(vap
->va_acl
);
2508 VATTR_CLEAR_SUPPORTED(vap
, va_acl
);
2511 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2513 * Handle the case where we need a UID/GID, but only have extended
2514 * security information.
2516 if (VATTR_NOT_RETURNED(vap
, va_uid
) &&
2517 VATTR_IS_SUPPORTED(vap
, va_uuuid
) &&
2518 !kauth_guid_equal(&vap
->va_uuuid
, &kauth_null_guid
)) {
2519 if ((error
= kauth_cred_guid2uid(&vap
->va_uuuid
, &nuid
)) == 0)
2520 VATTR_RETURN(vap
, va_uid
, nuid
);
2522 if (VATTR_NOT_RETURNED(vap
, va_gid
) &&
2523 VATTR_IS_SUPPORTED(vap
, va_guuid
) &&
2524 !kauth_guid_equal(&vap
->va_guuid
, &kauth_null_guid
)) {
2525 if ((error
= kauth_cred_guid2gid(&vap
->va_guuid
, &ngid
)) == 0)
2526 VATTR_RETURN(vap
, va_gid
, ngid
);
2531 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2533 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
2534 if (vfs_context_issuser(ctx
) && VATTR_IS_SUPPORTED(vap
, va_uid
)) {
2536 } else if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
2537 nuid
= vp
->v_mount
->mnt_fsowner
;
2538 if (nuid
== KAUTH_UID_NONE
)
2540 } else if (VATTR_IS_SUPPORTED(vap
, va_uid
)) {
2543 /* this will always be something sensible */
2544 nuid
= vp
->v_mount
->mnt_fsowner
;
2546 if ((nuid
== 99) && !vfs_context_issuser(ctx
))
2547 nuid
= kauth_cred_getuid(vfs_context_ucred(ctx
));
2548 VATTR_RETURN(vap
, va_uid
, nuid
);
2550 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
2551 if (vfs_context_issuser(ctx
) && VATTR_IS_SUPPORTED(vap
, va_gid
)) {
2553 } else if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
2554 ngid
= vp
->v_mount
->mnt_fsgroup
;
2555 if (ngid
== KAUTH_GID_NONE
)
2557 } else if (VATTR_IS_SUPPORTED(vap
, va_gid
)) {
2560 /* this will always be something sensible */
2561 ngid
= vp
->v_mount
->mnt_fsgroup
;
2563 if ((ngid
== 99) && !vfs_context_issuser(ctx
))
2564 ngid
= kauth_cred_getgid(vfs_context_ucred(ctx
));
2565 VATTR_RETURN(vap
, va_gid
, ngid
);
2569 * Synthesise some values that can be reasonably guessed.
2571 if (!VATTR_IS_SUPPORTED(vap
, va_iosize
))
2572 VATTR_RETURN(vap
, va_iosize
, vp
->v_mount
->mnt_vfsstat
.f_iosize
);
2574 if (!VATTR_IS_SUPPORTED(vap
, va_flags
))
2575 VATTR_RETURN(vap
, va_flags
, 0);
2577 if (!VATTR_IS_SUPPORTED(vap
, va_filerev
))
2578 VATTR_RETURN(vap
, va_filerev
, 0);
2580 if (!VATTR_IS_SUPPORTED(vap
, va_gen
))
2581 VATTR_RETURN(vap
, va_gen
, 0);
2584 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2586 if (!VATTR_IS_SUPPORTED(vap
, va_data_size
))
2587 VATTR_RETURN(vap
, va_data_size
, 0);
2589 /* do we want any of the possibly-computed values? */
2590 if (VATTR_IS_ACTIVE(vap
, va_data_alloc
) ||
2591 VATTR_IS_ACTIVE(vap
, va_total_size
) ||
2592 VATTR_IS_ACTIVE(vap
, va_total_alloc
)) {
2593 /* make sure f_bsize is valid */
2594 if (vp
->v_mount
->mnt_vfsstat
.f_bsize
== 0) {
2595 if ((error
= vfs_update_vfsstat(vp
->v_mount
, ctx
, VFS_KERNEL_EVENT
)) != 0)
2599 /* default va_data_alloc from va_data_size */
2600 if (!VATTR_IS_SUPPORTED(vap
, va_data_alloc
))
2601 VATTR_RETURN(vap
, va_data_alloc
, roundup(vap
->va_data_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
2603 /* default va_total_size from va_data_size */
2604 if (!VATTR_IS_SUPPORTED(vap
, va_total_size
))
2605 VATTR_RETURN(vap
, va_total_size
, vap
->va_data_size
);
2607 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2608 if (!VATTR_IS_SUPPORTED(vap
, va_total_alloc
))
2609 VATTR_RETURN(vap
, va_total_alloc
, roundup(vap
->va_total_size
, vp
->v_mount
->mnt_vfsstat
.f_bsize
));
2613 * If we don't have a change time, pull it from the modtime.
2615 if (!VATTR_IS_SUPPORTED(vap
, va_change_time
) && VATTR_IS_SUPPORTED(vap
, va_modify_time
))
2616 VATTR_RETURN(vap
, va_change_time
, vap
->va_modify_time
);
2619 * This is really only supported for the creation VNOPs, but since the field is there
2620 * we should populate it correctly.
2622 VATTR_RETURN(vap
, va_type
, vp
->v_type
);
2625 * The fsid can be obtained from the mountpoint directly.
2627 VATTR_RETURN(vap
, va_fsid
, vp
->v_mount
->mnt_vfsstat
.f_fsid
.val
[0]);
2635 * Set the attributes on a vnode in a vnode context.
2637 * Parameters: vp The vnode whose attributes to set.
2638 * vap A pointer to the attributes to set.
2639 * ctx The vnode context in which the
2640 * operation is to be attempted.
2642 * Returns: 0 Success
2645 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2647 * The contents of the data area pointed to by 'vap' may be
2648 * modified if the vnode is on a filesystem which has been
2649 * mounted with ingore ownership flags, or by the underlyng
2650 * VFS itself, or by the fallback code, if the underlying VFS
2651 * does not support ACL, UUID, or GUUID attributes directly.
2653 * XXX: We should enummerate the possible errno values here, and where
2654 * in the code they originated.
2657 vnode_setattr(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2659 int error
, is_perm_change
=0;
2662 * Make sure the filesystem is mounted R/W.
2663 * If not, return an error.
2665 if (vfs_isrdonly(vp
->v_mount
)) {
2670 /* For streams, va_data_size is the only setable attribute. */
2671 if ((vp
->v_flag
& VISNAMEDSTREAM
) && (vap
->va_active
!= VNODE_ATTR_va_data_size
)) {
2678 * If ownership is being ignored on this volume, we silently discard
2679 * ownership changes.
2681 if (vp
->v_mount
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) {
2682 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
2683 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
2686 if ( VATTR_IS_ACTIVE(vap
, va_uid
) || VATTR_IS_ACTIVE(vap
, va_gid
)
2687 || VATTR_IS_ACTIVE(vap
, va_mode
) || VATTR_IS_ACTIVE(vap
, va_acl
)) {
2692 * Make sure that extended security is enabled if we're going to try
2695 if (!vfs_extendedsecurity(vnode_mount(vp
)) &&
2696 (VATTR_IS_ACTIVE(vap
, va_acl
) || VATTR_IS_ACTIVE(vap
, va_uuuid
) || VATTR_IS_ACTIVE(vap
, va_guuid
))) {
2697 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2702 error
= VNOP_SETATTR(vp
, vap
, ctx
);
2704 if ((error
== 0) && !VATTR_ALL_SUPPORTED(vap
))
2705 error
= vnode_setattr_fallback(vp
, vap
, ctx
);
2708 // only send a stat_changed event if this is more than
2709 // just an access or backup time update
2710 if (error
== 0 && (vap
->va_active
!= VNODE_ATTR_BIT(va_access_time
)) && (vap
->va_active
!= VNODE_ATTR_BIT(va_backup_time
))) {
2711 if (is_perm_change
) {
2712 if (need_fsevent(FSE_CHOWN
, vp
)) {
2713 add_fsevent(FSE_CHOWN
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
2715 } else if(need_fsevent(FSE_STAT_CHANGED
, vp
)) {
2716 add_fsevent(FSE_STAT_CHANGED
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
2726 * Fallback for setting the attributes on a vnode in a vnode context. This
2727 * Function will attempt to store ACL, UUID, and GUID information utilizing
2728 * a read/modify/write operation against an EA used as a backing store for
2731 * Parameters: vp The vnode whose attributes to set.
2732 * vap A pointer to the attributes to set.
2733 * ctx The vnode context in which the
2734 * operation is to be attempted.
2736 * Returns: 0 Success
2739 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
2740 * as are the fsec and lfsec, if they are used.
2742 * The contents of the data area pointed to by 'vap' may be
2743 * modified to indicate that the attribute is supported for
2744 * any given requested attribute.
2746 * XXX: We should enummerate the possible errno values here, and where
2747 * in the code they originated.
2750 vnode_setattr_fallback(vnode_t vp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
2752 kauth_filesec_t fsec
;
2754 struct kauth_filesec lfsec
;
2760 * Extended security fallback via extended attributes.
2762 * Note that we do not free the filesec; the caller is expected to
2765 if (VATTR_NOT_RETURNED(vap
, va_acl
) ||
2766 VATTR_NOT_RETURNED(vap
, va_uuuid
) ||
2767 VATTR_NOT_RETURNED(vap
, va_guuid
)) {
2768 VFS_DEBUG(ctx
, vp
, "SETATTR - doing filesec fallback");
2771 * Fail for file types that we don't permit extended security
2774 if ((vp
->v_type
!= VDIR
) && (vp
->v_type
!= VLNK
) && (vp
->v_type
!= VREG
)) {
2775 VFS_DEBUG(ctx
, vp
, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp
));
2781 * If we don't have all the extended security items, we need
2782 * to fetch the existing data to perform a read-modify-write
2786 if (!VATTR_IS_ACTIVE(vap
, va_acl
) ||
2787 !VATTR_IS_ACTIVE(vap
, va_uuuid
) ||
2788 !VATTR_IS_ACTIVE(vap
, va_guuid
)) {
2789 if ((error
= vnode_get_filesec(vp
, &fsec
, ctx
)) != 0) {
2790 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error
);
2794 /* if we didn't get a filesec, use our local one */
2796 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
2799 KAUTH_DEBUG("SETATTR - updating existing filesec");
2802 facl
= &fsec
->fsec_acl
;
2804 /* if we're using the local filesec, we need to initialise it */
2805 if (fsec
== &lfsec
) {
2806 fsec
->fsec_magic
= KAUTH_FILESEC_MAGIC
;
2807 fsec
->fsec_owner
= kauth_null_guid
;
2808 fsec
->fsec_group
= kauth_null_guid
;
2809 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
2810 facl
->acl_flags
= 0;
2814 * Update with the supplied attributes.
2816 if (VATTR_IS_ACTIVE(vap
, va_uuuid
)) {
2817 KAUTH_DEBUG("SETATTR - updating owner UUID");
2818 fsec
->fsec_owner
= vap
->va_uuuid
;
2819 VATTR_SET_SUPPORTED(vap
, va_uuuid
);
2821 if (VATTR_IS_ACTIVE(vap
, va_guuid
)) {
2822 KAUTH_DEBUG("SETATTR - updating group UUID");
2823 fsec
->fsec_group
= vap
->va_guuid
;
2824 VATTR_SET_SUPPORTED(vap
, va_guuid
);
2826 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
2827 if (vap
->va_acl
== NULL
) {
2828 KAUTH_DEBUG("SETATTR - removing ACL");
2829 facl
->acl_entrycount
= KAUTH_FILESEC_NOACL
;
2831 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap
->va_acl
->acl_entrycount
);
2834 VATTR_SET_SUPPORTED(vap
, va_acl
);
2838 * If the filesec data is all invalid, we can just remove
2839 * the EA completely.
2841 if ((facl
->acl_entrycount
== KAUTH_FILESEC_NOACL
) &&
2842 kauth_guid_equal(&fsec
->fsec_owner
, &kauth_null_guid
) &&
2843 kauth_guid_equal(&fsec
->fsec_group
, &kauth_null_guid
)) {
2844 error
= vn_removexattr(vp
, KAUTH_FILESEC_XATTR
, XATTR_NOSECURITY
, ctx
);
2845 /* no attribute is ok, nothing to delete */
2846 if (error
== ENOATTR
)
2848 VFS_DEBUG(ctx
, vp
, "SETATTR - remove filesec returning %d", error
);
2851 error
= vnode_set_filesec(vp
, fsec
, facl
, ctx
);
2852 VFS_DEBUG(ctx
, vp
, "SETATTR - update filesec returning %d", error
);
2855 /* if we fetched a filesec, dispose of the buffer */
2857 kauth_filesec_free(fsec
);
2865 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
2869 vnode_notify(vnode_t vp
, uint32_t events
, struct vnode_attr
*vap
)
2871 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
2872 uint32_t knote_mask
= (VNODE_EVENT_WRITE
| VNODE_EVENT_DELETE
| VNODE_EVENT_RENAME
2873 | VNODE_EVENT_LINK
| VNODE_EVENT_EXTEND
| VNODE_EVENT_ATTRIB
);
2874 uint32_t dir_contents_mask
= (VNODE_EVENT_DIR_CREATED
| VNODE_EVENT_FILE_CREATED
2875 | VNODE_EVENT_DIR_REMOVED
| VNODE_EVENT_FILE_REMOVED
);
2876 uint32_t knote_events
= (events
& knote_mask
);
2878 /* Permissions are not explicitly part of the kqueue model */
2879 if (events
& VNODE_EVENT_PERMS
) {
2880 knote_events
|= NOTE_ATTRIB
;
2883 /* Directory contents information just becomes NOTE_WRITE */
2884 if ((vnode_isdir(vp
)) && (events
& dir_contents_mask
)) {
2885 knote_events
|= NOTE_WRITE
;
2889 lock_vnode_and_post(vp
, knote_events
);
2892 create_fsevent_from_kevent(vp
, events
, vap
);
2905 vnode_isdyldsharedcache(vnode_t vp
)
2907 return ((vp
->v_flag
& VSHARED_DYLD
) ? 1 : 0);
2912 * For a filesystem that isn't tracking its own vnode watchers:
2913 * check whether a vnode is being monitored.
2916 vnode_ismonitored(vnode_t vp
) {
2917 return (vp
->v_knotes
.slh_first
!= NULL
);
2921 * Initialize a struct vnode_attr and activate the attributes required
2922 * by the vnode_notify() call.
2925 vfs_get_notify_attributes(struct vnode_attr
*vap
)
2928 vap
->va_active
= VNODE_NOTIFY_ATTRS
;
2934 vfs_settriggercallback(fsid_t
*fsid
, vfs_trigger_callback_t vtc
, void *data
, uint32_t flags __unused
, vfs_context_t ctx
)
2939 mp
= mount_list_lookupby_fsid(fsid
, 0 /* locked */, 1 /* withref */);
2944 error
= vfs_busy(mp
, LK_NOWAIT
);
2952 if (mp
->mnt_triggercallback
!= NULL
) {
2958 mp
->mnt_triggercallback
= vtc
;
2959 mp
->mnt_triggerdata
= data
;
2962 mp
->mnt_triggercallback(mp
, VTC_REPLACE
, data
, ctx
);
2968 #endif /* CONFIG_TRIGGERS */
2971 * Definition of vnode operations.
2977 *#% lookup dvp L ? ?
2978 *#% lookup vpp - L -
2980 struct vnop_lookup_args
{
2981 struct vnodeop_desc
*a_desc
;
2984 struct componentname
*a_cnp
;
2985 vfs_context_t a_context
;
2990 * Returns: 0 Success
2991 * lock_fsnode:ENOENT No such file or directory [only for VFS
2992 * that is not thread safe & vnode is
2993 * currently being/has been terminated]
2994 * <vfs_lookup>:ENAMETOOLONG
2995 * <vfs_lookup>:ENOENT
2996 * <vfs_lookup>:EJUSTRETURN
2997 * <vfs_lookup>:EPERM
2998 * <vfs_lookup>:EISDIR
2999 * <vfs_lookup>:ENOTDIR
3002 * Note: The return codes from the underlying VFS's lookup routine can't
3003 * be fully enumerated here, since third party VFS authors may not
3004 * limit their error returns to the ones documented here, even
3005 * though this may result in some programs functioning incorrectly.
3007 * The return codes documented above are those which may currently
3008 * be returned by HFS from hfs_lookup, not including additional
3009 * error code which may be propagated from underlying routines.
3012 VNOP_LOOKUP(vnode_t dvp
, vnode_t
*vpp
, struct componentname
*cnp
, vfs_context_t ctx
)
3015 struct vnop_lookup_args a
;
3017 #if CONFIG_VFS_FUNNEL
3019 int funnel_state
= 0;
3020 #endif /* CONFIG_VFS_FUNNEL */
3022 a
.a_desc
= &vnop_lookup_desc
;
3028 #if CONFIG_VFS_FUNNEL
3029 thread_safe
= THREAD_SAFE_FS(dvp
);
3031 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3035 #endif /* CONFIG_VFS_FUNNEL */
3037 _err
= (*dvp
->v_op
[vnop_lookup_desc
.vdesc_offset
])(&a
);
3041 #if CONFIG_VFS_FUNNEL
3043 if ( (cnp
->cn_flags
& ISLASTCN
) ) {
3044 if ( (cnp
->cn_flags
& LOCKPARENT
) ) {
3045 if ( !(cnp
->cn_flags
& FSNODELOCKHELD
) ) {
3047 * leave the fsnode lock held on
3048 * the directory, but restore the funnel...
3049 * also indicate that we need to drop the
3050 * fsnode_lock when we're done with the
3051 * system call processing for this path
3053 cnp
->cn_flags
|= FSNODELOCKHELD
;
3055 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3060 unlock_fsnode(dvp
, &funnel_state
);
3062 #endif /* CONFIG_VFS_FUNNEL */
3068 struct vnop_compound_open_args
{
3069 struct vnodeop_desc
*a_desc
;
3072 struct componentname
*a_cnp
;
3075 struct vnode_attr
*a_vap
;
3076 vfs_context_t a_context
;
3082 VNOP_COMPOUND_OPEN(vnode_t dvp
, vnode_t
*vpp
, struct nameidata
*ndp
, int32_t flags
, int32_t fmode
, uint32_t *statusp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
3085 struct vnop_compound_open_args a
;
3088 uint32_t tmp_status
= 0;
3089 struct componentname
*cnp
= &ndp
->ni_cnd
;
3091 want_create
= (flags
& VNOP_COMPOUND_OPEN_DO_CREATE
);
3093 a
.a_desc
= &vnop_compound_open_desc
;
3095 a
.a_vpp
= vpp
; /* Could be NULL */
3099 a
.a_status
= (statusp
!= NULL
) ? statusp
: &tmp_status
;
3102 a
.a_open_create_authorizer
= vn_authorize_create
;
3103 a
.a_open_existing_authorizer
= vn_authorize_open_existing
;
3104 a
.a_reserved
= NULL
;
3106 if (dvp
== NULLVP
) {
3109 if (want_create
&& !vap
) {
3110 panic("Want create, but no vap?");
3112 if (!want_create
&& vap
) {
3113 panic("Don't want create, but have a vap?");
3116 _err
= (*dvp
->v_op
[vnop_compound_open_desc
.vdesc_offset
])(&a
);
3118 did_create
= (*a
.a_status
& COMPOUND_OPEN_STATUS_DID_CREATE
);
3120 if (did_create
&& !want_create
) {
3121 panic("Filesystem did a create, even though none was requested?");
3125 if (!NATIVE_XATTR(dvp
)) {
3127 * Remove stale Apple Double file (if any).
3129 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 0);
3132 /* On create, provide kqueue notification */
3133 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
3136 lookup_compound_vnop_post_hook(_err
, dvp
, *vpp
, ndp
, did_create
);
3137 #if 0 /* FSEvents... */
3138 if (*vpp
&& _err
&& _err
!= EKEEPLOOKING
) {
3149 struct vnop_create_args
{
3150 struct vnodeop_desc
*a_desc
;
3153 struct componentname
*a_cnp
;
3154 struct vnode_attr
*a_vap
;
3155 vfs_context_t a_context
;
3159 VNOP_CREATE(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
3162 struct vnop_create_args a
;
3163 #if CONFIG_VFS_FUNNEL
3165 int funnel_state
= 0;
3166 #endif /* CONFIG_VFS_FUNNEL */
3168 a
.a_desc
= &vnop_create_desc
;
3175 #if CONFIG_VFS_FUNNEL
3176 thread_safe
= THREAD_SAFE_FS(dvp
);
3178 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3182 #endif /* CONFIG_VFS_FUNNEL */
3184 _err
= (*dvp
->v_op
[vnop_create_desc
.vdesc_offset
])(&a
);
3185 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
3187 * Remove stale Apple Double file (if any).
3189 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 0);
3192 #if CONFIG_VFS_FUNNEL
3194 unlock_fsnode(dvp
, &funnel_state
);
3196 #endif /* CONFIG_VFS_FUNNEL */
3198 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
3206 *#% whiteout dvp L L L
3207 *#% whiteout cnp - - -
3208 *#% whiteout flag - - -
3211 struct vnop_whiteout_args
{
3212 struct vnodeop_desc
*a_desc
;
3214 struct componentname
*a_cnp
;
3216 vfs_context_t a_context
;
3220 VNOP_WHITEOUT(vnode_t dvp
, struct componentname
* cnp
, int flags
, vfs_context_t ctx
)
3223 struct vnop_whiteout_args a
;
3224 #if CONFIG_VFS_FUNNEL
3226 int funnel_state
= 0;
3227 #endif /* CONFIG_VFS_FUNNEL */
3229 a
.a_desc
= &vnop_whiteout_desc
;
3235 #if CONFIG_VFS_FUNNEL
3236 thread_safe
= THREAD_SAFE_FS(dvp
);
3238 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3242 #endif /* CONFIG_VFS_FUNNEL */
3244 _err
= (*dvp
->v_op
[vnop_whiteout_desc
.vdesc_offset
])(&a
);
3246 #if CONFIG_VFS_FUNNEL
3248 unlock_fsnode(dvp
, &funnel_state
);
3250 #endif /* CONFIG_VFS_FUNNEL */
3252 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
3264 struct vnop_mknod_args
{
3265 struct vnodeop_desc
*a_desc
;
3268 struct componentname
*a_cnp
;
3269 struct vnode_attr
*a_vap
;
3270 vfs_context_t a_context
;
3274 VNOP_MKNOD(vnode_t dvp
, vnode_t
* vpp
, struct componentname
* cnp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
3278 struct vnop_mknod_args a
;
3279 #if CONFIG_VFS_FUNNEL
3281 int funnel_state
= 0;
3282 #endif /* CONFIG_VFS_FUNNEL */
3284 a
.a_desc
= &vnop_mknod_desc
;
3291 #if CONFIG_VFS_FUNNEL
3292 thread_safe
= THREAD_SAFE_FS(dvp
);
3294 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
3298 #endif /* CONFIG_VFS_FUNNEL */
3300 _err
= (*dvp
->v_op
[vnop_mknod_desc
.vdesc_offset
])(&a
);
3302 #if CONFIG_VFS_FUNNEL
3304 unlock_fsnode(dvp
, &funnel_state
);
3306 #endif /* CONFIG_VFS_FUNNEL */
3308 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
3319 struct vnop_open_args
{
3320 struct vnodeop_desc
*a_desc
;
3323 vfs_context_t a_context
;
3327 VNOP_OPEN(vnode_t vp
, int mode
, vfs_context_t ctx
)
3330 struct vnop_open_args a
;
3331 #if CONFIG_VFS_FUNNEL
3333 int funnel_state
= 0;
3334 #endif /* CONFIG_VFS_FUNNEL */
3337 ctx
= vfs_context_current();
3339 a
.a_desc
= &vnop_open_desc
;
3344 #if CONFIG_VFS_FUNNEL
3345 thread_safe
= THREAD_SAFE_FS(vp
);
3347 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3348 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3349 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3350 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3355 #endif /* CONFIG_VFS_FUNNEL */
3357 _err
= (*vp
->v_op
[vnop_open_desc
.vdesc_offset
])(&a
);
3359 #if CONFIG_VFS_FUNNEL
3361 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3362 unlock_fsnode(vp
, NULL
);
3364 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3366 #endif /* CONFIG_VFS_FUNNEL */
3377 struct vnop_close_args
{
3378 struct vnodeop_desc
*a_desc
;
3381 vfs_context_t a_context
;
3385 VNOP_CLOSE(vnode_t vp
, int fflag
, vfs_context_t ctx
)
3388 struct vnop_close_args a
;
3389 #if CONFIG_VFS_FUNNEL
3391 int funnel_state
= 0;
3392 #endif /* CONFIG_VFS_FUNNEL */
3395 ctx
= vfs_context_current();
3397 a
.a_desc
= &vnop_close_desc
;
3402 #if CONFIG_VFS_FUNNEL
3403 thread_safe
= THREAD_SAFE_FS(vp
);
3405 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3406 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3407 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3408 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3413 #endif /* CONFIG_VFS_FUNNEL */
3415 _err
= (*vp
->v_op
[vnop_close_desc
.vdesc_offset
])(&a
);
3417 #if CONFIG_VFS_FUNNEL
3419 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3420 unlock_fsnode(vp
, NULL
);
3422 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3424 #endif /* CONFIG_VFS_FUNNEL */
3435 struct vnop_access_args
{
3436 struct vnodeop_desc
*a_desc
;
3439 vfs_context_t a_context
;
3443 VNOP_ACCESS(vnode_t vp
, int action
, vfs_context_t ctx
)
3446 struct vnop_access_args a
;
3447 #if CONFIG_VFS_FUNNEL
3449 int funnel_state
= 0;
3450 #endif /* CONFIG_VFS_FUNNEL */
3453 ctx
= vfs_context_current();
3455 a
.a_desc
= &vnop_access_desc
;
3457 a
.a_action
= action
;
3460 #if CONFIG_VFS_FUNNEL
3461 thread_safe
= THREAD_SAFE_FS(vp
);
3463 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3467 #endif /* CONFIG_VFS_FUNNEL */
3469 _err
= (*vp
->v_op
[vnop_access_desc
.vdesc_offset
])(&a
);
3471 #if CONFIG_VFS_FUNNEL
3473 unlock_fsnode(vp
, &funnel_state
);
3475 #endif /* CONFIG_VFS_FUNNEL */
3483 *#% getattr vp = = =
3486 struct vnop_getattr_args
{
3487 struct vnodeop_desc
*a_desc
;
3489 struct vnode_attr
*a_vap
;
3490 vfs_context_t a_context
;
3494 VNOP_GETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
3497 struct vnop_getattr_args a
;
3498 #if CONFIG_VFS_FUNNEL
3500 int funnel_state
= 0;
3501 #endif /* CONFIG_VFS_FUNNEL */
3503 a
.a_desc
= &vnop_getattr_desc
;
3508 #if CONFIG_VFS_FUNNEL
3509 thread_safe
= THREAD_SAFE_FS(vp
);
3511 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3515 #endif /* CONFIG_VFS_FUNNEL */
3517 _err
= (*vp
->v_op
[vnop_getattr_desc
.vdesc_offset
])(&a
);
3519 #if CONFIG_VFS_FUNNEL
3521 unlock_fsnode(vp
, &funnel_state
);
3523 #endif /* CONFIG_VFS_FUNNEL */
3531 *#% setattr vp L L L
3534 struct vnop_setattr_args
{
3535 struct vnodeop_desc
*a_desc
;
3537 struct vnode_attr
*a_vap
;
3538 vfs_context_t a_context
;
3542 VNOP_SETATTR(vnode_t vp
, struct vnode_attr
* vap
, vfs_context_t ctx
)
3545 struct vnop_setattr_args a
;
3546 #if CONFIG_VFS_FUNNEL
3548 int funnel_state
= 0;
3549 #endif /* CONFIG_VFS_FUNNEL */
3551 a
.a_desc
= &vnop_setattr_desc
;
3556 #if CONFIG_VFS_FUNNEL
3557 thread_safe
= THREAD_SAFE_FS(vp
);
3559 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
3563 #endif /* CONFIG_VFS_FUNNEL */
3565 _err
= (*vp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
3568 * Shadow uid/gid/mod change to extended attribute file.
3570 if (_err
== 0 && !NATIVE_XATTR(vp
)) {
3571 struct vnode_attr va
;
3575 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
3576 VATTR_SET(&va
, va_uid
, vap
->va_uid
);
3579 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
3580 VATTR_SET(&va
, va_gid
, vap
->va_gid
);
3583 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
3584 VATTR_SET(&va
, va_mode
, vap
->va_mode
);
3591 dvp
= vnode_getparent(vp
);
3592 vname
= vnode_getname(vp
);
3594 xattrfile_setattr(dvp
, vname
, &va
, ctx
);
3598 vnode_putname(vname
);
3602 #if CONFIG_VFS_FUNNEL
3604 unlock_fsnode(vp
, &funnel_state
);
3606 #endif /* CONFIG_VFS_FUNNEL */
3609 * If we have changed any of the things about the file that are likely
3610 * to result in changes to authorization results, blow the vnode auth
3614 VATTR_IS_SUPPORTED(vap
, va_mode
) ||
3615 VATTR_IS_SUPPORTED(vap
, va_uid
) ||
3616 VATTR_IS_SUPPORTED(vap
, va_gid
) ||
3617 VATTR_IS_SUPPORTED(vap
, va_flags
) ||
3618 VATTR_IS_SUPPORTED(vap
, va_acl
) ||
3619 VATTR_IS_SUPPORTED(vap
, va_uuuid
) ||
3620 VATTR_IS_SUPPORTED(vap
, va_guuid
))) {
3621 vnode_uncache_authorized_action(vp
, KAUTH_INVALIDATE_CACHED_RIGHTS
);
3624 if (vfs_authopaque(vp
->v_mount
) && vnode_hasnamedstreams(vp
)) {
3626 if (vnode_getnamedstream(vp
, &svp
, XATTR_RESOURCEFORK_NAME
, NS_OPEN
, 0, ctx
) == 0) {
3627 vnode_uncache_authorized_action(svp
, KAUTH_INVALIDATE_CACHED_RIGHTS
);
3631 #endif /* NAMEDSTREAMS */
3635 post_event_if_success(vp
, _err
, NOTE_ATTRIB
);
3647 struct vnop_read_args
{
3648 struct vnodeop_desc
*a_desc
;
3652 vfs_context_t a_context
;
3656 VNOP_READ(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t ctx
)
3659 struct vnop_read_args a
;
3660 #if CONFIG_VFS_FUNNEL
3662 int funnel_state
= 0;
3663 #endif /* CONFIG_VFS_FUNNEL */
3666 ctx
= vfs_context_current();
3669 a
.a_desc
= &vnop_read_desc
;
3672 a
.a_ioflag
= ioflag
;
3675 #if CONFIG_VFS_FUNNEL
3676 thread_safe
= THREAD_SAFE_FS(vp
);
3678 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3679 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3680 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3681 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3686 #endif /* CONFIG_VFS_FUNNEL */
3688 _err
= (*vp
->v_op
[vnop_read_desc
.vdesc_offset
])(&a
);
3690 #if CONFIG_VFS_FUNNEL
3692 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3693 unlock_fsnode(vp
, NULL
);
3695 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3697 #endif /* CONFIG_VFS_FUNNEL */
3709 struct vnop_write_args
{
3710 struct vnodeop_desc
*a_desc
;
3714 vfs_context_t a_context
;
3718 VNOP_WRITE(vnode_t vp
, struct uio
* uio
, int ioflag
, vfs_context_t ctx
)
3720 struct vnop_write_args a
;
3722 #if CONFIG_VFS_FUNNEL
3724 int funnel_state
= 0;
3725 #endif /* CONFIG_VFS_FUNNEL */
3728 ctx
= vfs_context_current();
3731 a
.a_desc
= &vnop_write_desc
;
3734 a
.a_ioflag
= ioflag
;
3737 #if CONFIG_VFS_FUNNEL
3738 thread_safe
= THREAD_SAFE_FS(vp
);
3740 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3741 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3742 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3743 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3748 #endif /* CONFIG_VFS_FUNNEL */
3750 _err
= (*vp
->v_op
[vnop_write_desc
.vdesc_offset
])(&a
);
3752 #if CONFIG_VFS_FUNNEL
3754 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3755 unlock_fsnode(vp
, NULL
);
3757 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3759 #endif /* CONFIG_VFS_FUNNEL */
3761 post_event_if_success(vp
, _err
, NOTE_WRITE
);
3773 struct vnop_ioctl_args
{
3774 struct vnodeop_desc
*a_desc
;
3779 vfs_context_t a_context
;
3783 VNOP_IOCTL(vnode_t vp
, u_long command
, caddr_t data
, int fflag
, vfs_context_t ctx
)
3786 struct vnop_ioctl_args a
;
3787 #if CONFIG_VFS_FUNNEL
3789 int funnel_state
= 0;
3790 #endif /* CONFIG_VFS_FUNNEL */
3793 ctx
= vfs_context_current();
3797 * This check should probably have been put in the TTY code instead...
3799 * We have to be careful about what we assume during startup and shutdown.
3800 * We have to be able to use the root filesystem's device vnode even when
3801 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3802 * structure. If there is no data pointer, it doesn't matter whether
3803 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZECACHE)
3804 * which passes NULL for its data pointer can therefore be used during
3805 * mount or unmount of the root filesystem.
3807 * Depending on what root filesystems need to do during mount/unmount, we
3808 * may need to loosen this check again in the future.
3810 if (vfs_context_is64bit(ctx
) && !(vnode_ischr(vp
) || vnode_isblk(vp
))) {
3811 if (data
!= NULL
&& !vnode_vfs64bitready(vp
)) {
3816 a
.a_desc
= &vnop_ioctl_desc
;
3818 a
.a_command
= command
;
3823 #if CONFIG_VFS_FUNNEL
3824 thread_safe
= THREAD_SAFE_FS(vp
);
3826 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3827 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3828 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3829 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3834 #endif /* CONFIG_VFS_FUNNEL */
3836 _err
= (*vp
->v_op
[vnop_ioctl_desc
.vdesc_offset
])(&a
);
3838 #if CONFIG_VFS_FUNNEL
3840 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3841 unlock_fsnode(vp
, NULL
);
3843 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3845 #endif /* CONFIG_VFS_FUNNEL */
3857 struct vnop_select_args
{
3858 struct vnodeop_desc
*a_desc
;
3863 vfs_context_t a_context
;
3867 VNOP_SELECT(vnode_t vp
, int which
, int fflags
, void * wql
, vfs_context_t ctx
)
3870 struct vnop_select_args a
;
3871 #if CONFIG_VFS_FUNNEL
3873 int funnel_state
= 0;
3874 #endif /* CONFIG_VFS_FUNNEL */
3877 ctx
= vfs_context_current();
3879 a
.a_desc
= &vnop_select_desc
;
3882 a
.a_fflags
= fflags
;
3886 #if CONFIG_VFS_FUNNEL
3887 thread_safe
= THREAD_SAFE_FS(vp
);
3889 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
3890 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3891 if ( (_err
= lock_fsnode(vp
, NULL
)) ) {
3892 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3897 #endif /* CONFIG_VFS_FUNNEL */
3899 _err
= (*vp
->v_op
[vnop_select_desc
.vdesc_offset
])(&a
);
3901 #if CONFIG_VFS_FUNNEL
3903 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VFIFO
&& vp
->v_type
!= VSOCK
) {
3904 unlock_fsnode(vp
, NULL
);
3906 (void) thread_funnel_set(kernel_flock
, funnel_state
);
3908 #endif /* CONFIG_VFS_FUNNEL */
3917 *#% exchange fvp L L L
3918 *#% exchange tvp L L L
3921 struct vnop_exchange_args
{
3922 struct vnodeop_desc
*a_desc
;
3926 vfs_context_t a_context
;
3930 VNOP_EXCHANGE(vnode_t fvp
, vnode_t tvp
, int options
, vfs_context_t ctx
)
3933 struct vnop_exchange_args a
;
3934 #if CONFIG_VFS_FUNNEL
3936 int funnel_state
= 0;
3937 vnode_t lock_first
= NULL
, lock_second
= NULL
;
3938 #endif /* CONFIG_VFS_FUNNEL */
3940 a
.a_desc
= &vnop_exchange_desc
;
3943 a
.a_options
= options
;
3946 #if CONFIG_VFS_FUNNEL
3947 thread_safe
= THREAD_SAFE_FS(fvp
);
3950 * Lock in vnode address order to avoid deadlocks
3959 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) ) {
3962 if ( (_err
= lock_fsnode(lock_second
, NULL
)) ) {
3963 unlock_fsnode(lock_first
, &funnel_state
);
3967 #endif /* CONFIG_VFS_FUNNEL */
3969 _err
= (*fvp
->v_op
[vnop_exchange_desc
.vdesc_offset
])(&a
);
3971 #if CONFIG_VFS_FUNNEL
3973 unlock_fsnode(lock_second
, NULL
);
3974 unlock_fsnode(lock_first
, &funnel_state
);
3976 #endif /* CONFIG_VFS_FUNNEL */
3978 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
3979 post_event_if_success(fvp
, _err
, NOTE_ATTRIB
);
3980 post_event_if_success(tvp
, _err
, NOTE_ATTRIB
);
3992 struct vnop_revoke_args
{
3993 struct vnodeop_desc
*a_desc
;
3996 vfs_context_t a_context
;
4000 VNOP_REVOKE(vnode_t vp
, int flags
, vfs_context_t ctx
)
4002 struct vnop_revoke_args a
;
4004 #if CONFIG_VFS_FUNNEL
4006 int funnel_state
= 0;
4007 #endif /* CONFIG_VFS_FUNNEL */
4009 a
.a_desc
= &vnop_revoke_desc
;
4014 #if CONFIG_VFS_FUNNEL
4015 thread_safe
= THREAD_SAFE_FS(vp
);
4017 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
4019 #endif /* CONFIG_VFS_FUNNEL */
4021 _err
= (*vp
->v_op
[vnop_revoke_desc
.vdesc_offset
])(&a
);
4023 #if CONFIG_VFS_FUNNEL
4025 (void) thread_funnel_set(kernel_flock
, funnel_state
);
4027 #endif /* CONFIG_VFS_FUNNEL */
4039 struct vnop_mmap_args
{
4040 struct vnodeop_desc
*a_desc
;
4043 vfs_context_t a_context
;
4047 VNOP_MMAP(vnode_t vp
, int fflags
, vfs_context_t ctx
)
4050 struct vnop_mmap_args a
;
4051 #if CONFIG_VFS_FUNNEL
4053 int funnel_state
= 0;
4054 #endif /* CONFIG_VFS_FUNNEL */
4056 a
.a_desc
= &vnop_mmap_desc
;
4058 a
.a_fflags
= fflags
;
4061 #if CONFIG_VFS_FUNNEL
4062 thread_safe
= THREAD_SAFE_FS(vp
);
4064 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4068 #endif /* CONFIG_VFS_FUNNEL */
4070 _err
= (*vp
->v_op
[vnop_mmap_desc
.vdesc_offset
])(&a
);
4072 #if CONFIG_VFS_FUNNEL
4074 unlock_fsnode(vp
, &funnel_state
);
4076 #endif /* CONFIG_VFS_FUNNEL */
4085 *# mnomap - vp U U U
4088 struct vnop_mnomap_args
{
4089 struct vnodeop_desc
*a_desc
;
4091 vfs_context_t a_context
;
4095 VNOP_MNOMAP(vnode_t vp
, vfs_context_t ctx
)
4098 struct vnop_mnomap_args a
;
4099 #if CONFIG_VFS_FUNNEL
4101 int funnel_state
= 0;
4102 #endif /* CONFIG_VFS_FUNNEL */
4104 a
.a_desc
= &vnop_mnomap_desc
;
4108 #if CONFIG_VFS_FUNNEL
4109 thread_safe
= THREAD_SAFE_FS(vp
);
4111 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4115 #endif /* CONFIG_VFS_FUNNEL */
4117 _err
= (*vp
->v_op
[vnop_mnomap_desc
.vdesc_offset
])(&a
);
4119 #if CONFIG_VFS_FUNNEL
4121 unlock_fsnode(vp
, &funnel_state
);
4123 #endif /* CONFIG_VFS_FUNNEL */
4135 struct vnop_fsync_args
{
4136 struct vnodeop_desc
*a_desc
;
4139 vfs_context_t a_context
;
4143 VNOP_FSYNC(vnode_t vp
, int waitfor
, vfs_context_t ctx
)
4145 struct vnop_fsync_args a
;
4147 #if CONFIG_VFS_FUNNEL
4149 int funnel_state
= 0;
4150 #endif /* CONFIG_VFS_FUNNEL */
4152 a
.a_desc
= &vnop_fsync_desc
;
4154 a
.a_waitfor
= waitfor
;
4157 #if CONFIG_VFS_FUNNEL
4158 thread_safe
= THREAD_SAFE_FS(vp
);
4160 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4164 #endif /* CONFIG_VFS_FUNNEL */
4166 _err
= (*vp
->v_op
[vnop_fsync_desc
.vdesc_offset
])(&a
);
4168 #if CONFIG_VFS_FUNNEL
4170 unlock_fsnode(vp
, &funnel_state
);
4172 #endif /* CONFIG_VFS_FUNNEL */
4181 *#% remove dvp L U U
4185 struct vnop_remove_args
{
4186 struct vnodeop_desc
*a_desc
;
4189 struct componentname
*a_cnp
;
4191 vfs_context_t a_context
;
4195 VNOP_REMOVE(vnode_t dvp
, vnode_t vp
, struct componentname
* cnp
, int flags
, vfs_context_t ctx
)
4198 struct vnop_remove_args a
;
4199 #if CONFIG_VFS_FUNNEL
4201 int funnel_state
= 0;
4202 #endif /* CONFIG_VFS_FUNNEL */
4204 a
.a_desc
= &vnop_remove_desc
;
4211 #if CONFIG_VFS_FUNNEL
4212 thread_safe
= THREAD_SAFE_FS(dvp
);
4214 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4218 #endif /* CONFIG_VFS_FUNNEL */
4220 _err
= (*dvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&a
);
4223 vnode_setneedinactive(vp
);
4225 if ( !(NATIVE_XATTR(dvp
)) ) {
4227 * Remove any associated extended attribute file (._ AppleDouble file).
4229 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 1);
4233 #if CONFIG_VFS_FUNNEL
4235 unlock_fsnode(vp
, &funnel_state
);
4237 #endif /* CONFIG_VFS_FUNNEL */
4239 post_event_if_success(vp
, _err
, NOTE_DELETE
| NOTE_LINK
);
4240 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
4246 VNOP_COMPOUND_REMOVE(vnode_t dvp
, vnode_t
*vpp
, struct nameidata
*ndp
, int32_t flags
, struct vnode_attr
*vap
, vfs_context_t ctx
)
4249 struct vnop_compound_remove_args a
;
4250 int no_vp
= (*vpp
== NULLVP
);
4252 a
.a_desc
= &vnop_compound_remove_desc
;
4255 a
.a_cnp
= &ndp
->ni_cnd
;
4259 a
.a_remove_authorizer
= vn_authorize_unlink
;
4261 _err
= (*dvp
->v_op
[vnop_compound_remove_desc
.vdesc_offset
])(&a
);
4263 vnode_setneedinactive(*vpp
);
4265 if ( !(NATIVE_XATTR(dvp
)) ) {
4267 * Remove any associated extended attribute file (._ AppleDouble file).
4269 xattrfile_remove(dvp
, ndp
->ni_cnd
.cn_nameptr
, ctx
, 1);
4273 post_event_if_success(*vpp
, _err
, NOTE_DELETE
| NOTE_LINK
);
4274 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
4277 lookup_compound_vnop_post_hook(_err
, dvp
, *vpp
, ndp
, 0);
4278 if (*vpp
&& _err
&& _err
!= EKEEPLOOKING
) {
4284 //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
4296 struct vnop_link_args
{
4297 struct vnodeop_desc
*a_desc
;
4300 struct componentname
*a_cnp
;
4301 vfs_context_t a_context
;
4305 VNOP_LINK(vnode_t vp
, vnode_t tdvp
, struct componentname
* cnp
, vfs_context_t ctx
)
4308 struct vnop_link_args a
;
4309 #if CONFIG_VFS_FUNNEL
4311 int funnel_state
= 0;
4312 #endif /* CONFIG_VFS_FUNNEL */
4315 * For file systems with non-native extended attributes,
4316 * disallow linking to an existing "._" Apple Double file.
4318 if ( !NATIVE_XATTR(tdvp
) && (vp
->v_type
== VREG
)) {
4321 vname
= vnode_getname(vp
);
4322 if (vname
!= NULL
) {
4324 if (vname
[0] == '.' && vname
[1] == '_' && vname
[2] != '\0') {
4327 vnode_putname(vname
);
4332 a
.a_desc
= &vnop_link_desc
;
4338 #if CONFIG_VFS_FUNNEL
4339 thread_safe
= THREAD_SAFE_FS(vp
);
4341 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
4345 #endif /* CONFIG_VFS_FUNNEL */
4347 _err
= (*tdvp
->v_op
[vnop_link_desc
.vdesc_offset
])(&a
);
4349 #if CONFIG_VFS_FUNNEL
4351 unlock_fsnode(vp
, &funnel_state
);
4353 #endif /* CONFIG_VFS_FUNNEL */
4355 post_event_if_success(vp
, _err
, NOTE_LINK
);
4356 post_event_if_success(tdvp
, _err
, NOTE_WRITE
);
4362 vn_rename(struct vnode
*fdvp
, struct vnode
**fvpp
, struct componentname
*fcnp
, struct vnode_attr
*fvap
,
4363 struct vnode
*tdvp
, struct vnode
**tvpp
, struct componentname
*tcnp
, struct vnode_attr
*tvap
,
4364 uint32_t flags
, vfs_context_t ctx
)
4367 vnode_t src_attr_vp
= NULLVP
;
4368 vnode_t dst_attr_vp
= NULLVP
;
4369 struct nameidata
*fromnd
= NULL
;
4370 struct nameidata
*tond
= NULL
;
4371 char smallname1
[48];
4372 char smallname2
[48];
4373 char *xfromname
= NULL
;
4374 char *xtoname
= NULL
;
4377 batched
= vnode_compound_rename_available(fdvp
);
4379 #if CONFIG_VFS_FUNNEL
4380 vnode_t fdvp_unsafe
= (THREAD_SAFE_FS(fdvp
) ? NULLVP
: fdvp
);
4381 #endif /* CONFIG_VFS_FUNNEL */
4384 if (*fvpp
== NULLVP
)
4385 panic("Not batched, and no fvp?");
4389 * We need to preflight any potential AppleDouble file for the source file
4390 * before doing the rename operation, since we could potentially be doing
4391 * this operation on a network filesystem, and would end up duplicating
4392 * the work. Also, save the source and destination names. Skip it if the
4393 * source has a "._" prefix.
4396 if (!NATIVE_XATTR(fdvp
) &&
4397 !(fcnp
->cn_nameptr
[0] == '.' && fcnp
->cn_nameptr
[1] == '_')) {
4401 /* Get source attribute file name. */
4402 len
= fcnp
->cn_namelen
+ 3;
4403 if (len
> sizeof(smallname1
)) {
4404 MALLOC(xfromname
, char *, len
, M_TEMP
, M_WAITOK
);
4406 xfromname
= &smallname1
[0];
4408 strlcpy(xfromname
, "._", min(sizeof smallname1
, len
));
4409 strncat(xfromname
, fcnp
->cn_nameptr
, fcnp
->cn_namelen
);
4410 xfromname
[len
-1] = '\0';
4412 /* Get destination attribute file name. */
4413 len
= tcnp
->cn_namelen
+ 3;
4414 if (len
> sizeof(smallname2
)) {
4415 MALLOC(xtoname
, char *, len
, M_TEMP
, M_WAITOK
);
4417 xtoname
= &smallname2
[0];
4419 strlcpy(xtoname
, "._", min(sizeof smallname2
, len
));
4420 strncat(xtoname
, tcnp
->cn_nameptr
, tcnp
->cn_namelen
);
4421 xtoname
[len
-1] = '\0';
4424 * Look up source attribute file, keep reference on it if exists.
4425 * Note that we do the namei with the nameiop of RENAME, which is different than
4426 * in the rename syscall. It's OK if the source file does not exist, since this
4427 * is only for AppleDouble files.
4429 if (xfromname
!= NULL
) {
4430 MALLOC(fromnd
, struct nameidata
*, sizeof (struct nameidata
), M_TEMP
, M_WAITOK
);
4431 NDINIT(fromnd
, RENAME
, OP_RENAME
, NOFOLLOW
| USEDVP
| CN_NBMOUNTLOOK
,
4432 UIO_SYSSPACE
, CAST_USER_ADDR_T(xfromname
), ctx
);
4433 fromnd
->ni_dvp
= fdvp
;
4434 error
= namei(fromnd
);
4437 * If there was an error looking up source attribute file,
4438 * we'll behave as if it didn't exist.
4442 if (fromnd
->ni_vp
) {
4443 /* src_attr_vp indicates need to call vnode_put / nameidone later */
4444 src_attr_vp
= fromnd
->ni_vp
;
4446 if (fromnd
->ni_vp
->v_type
!= VREG
) {
4447 src_attr_vp
= NULLVP
;
4448 vnode_put(fromnd
->ni_vp
);
4452 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4453 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4454 * have a vnode here, so we drop our namei buffer for the source attribute file
4456 if (src_attr_vp
== NULLVP
) {
4464 _err
= VNOP_COMPOUND_RENAME(fdvp
, fvpp
, fcnp
, fvap
, tdvp
, tvpp
, tcnp
, tvap
, flags
, ctx
);
4466 printf("VNOP_COMPOUND_RENAME() returned %d\n", _err
);
4470 _err
= VNOP_RENAME(fdvp
, *fvpp
, fcnp
, tdvp
, *tvpp
, tcnp
, ctx
);
4474 mac_vnode_notify_rename(ctx
, *fvpp
, tdvp
, tcnp
);
4478 * Rename any associated extended attribute file (._ AppleDouble file).
4480 if (_err
== 0 && !NATIVE_XATTR(fdvp
) && xfromname
!= NULL
) {
4484 * Get destination attribute file vnode.
4485 * Note that tdvp already has an iocount reference. Make sure to check that we
4486 * get a valid vnode from namei.
4488 MALLOC(tond
, struct nameidata
*, sizeof(struct nameidata
), M_TEMP
, M_WAITOK
);
4489 NDINIT(tond
, RENAME
, OP_RENAME
,
4490 NOCACHE
| NOFOLLOW
| USEDVP
| CN_NBMOUNTLOOK
, UIO_SYSSPACE
,
4491 CAST_USER_ADDR_T(xtoname
), ctx
);
4492 tond
->ni_dvp
= tdvp
;
4493 error
= namei(tond
);
4499 dst_attr_vp
= tond
->ni_vp
;
4503 const char *old_name
= src_attr_vp
->v_name
;
4504 vnode_t old_parent
= src_attr_vp
->v_parent
;
4507 error
= VNOP_COMPOUND_RENAME(fdvp
, &src_attr_vp
, &fromnd
->ni_cnd
, NULL
,
4508 tdvp
, &dst_attr_vp
, &tond
->ni_cnd
, NULL
,
4511 error
= VNOP_RENAME(fdvp
, src_attr_vp
, &fromnd
->ni_cnd
,
4512 tdvp
, dst_attr_vp
, &tond
->ni_cnd
, ctx
);
4515 if (error
== 0 && old_name
== src_attr_vp
->v_name
&&
4516 old_parent
== src_attr_vp
->v_parent
) {
4517 int update_flags
= VNODE_UPDATE_NAME
;
4520 update_flags
|= VNODE_UPDATE_PARENT
;
4522 vnode_update_identity(src_attr_vp
, tdvp
,
4523 tond
->ni_cnd
.cn_nameptr
,
4524 tond
->ni_cnd
.cn_namelen
,
4525 tond
->ni_cnd
.cn_hash
,
4529 /* kevent notifications for moving resource files
4530 * _err is zero if we're here, so no need to notify directories, code
4531 * below will do that. only need to post the rename on the source and
4532 * possibly a delete on the dest
4534 post_event_if_success(src_attr_vp
, error
, NOTE_RENAME
);
4536 post_event_if_success(dst_attr_vp
, error
, NOTE_DELETE
);
4539 } else if (dst_attr_vp
) {
4541 * Just delete destination attribute file vnode if it exists, since
4542 * we didn't have a source attribute file.
4543 * Note that tdvp already has an iocount reference.
4546 struct vnop_remove_args args
;
4548 args
.a_desc
= &vnop_remove_desc
;
4550 args
.a_vp
= dst_attr_vp
;
4551 args
.a_cnp
= &tond
->ni_cnd
;
4552 args
.a_context
= ctx
;
4554 #if CONFIG_VFS_FUNNEL
4555 if (fdvp_unsafe
!= NULLVP
)
4556 error
= lock_fsnode(dst_attr_vp
, NULL
);
4557 #endif /* CONFIG_VFS_FUNNEL */
4559 error
= (*tdvp
->v_op
[vnop_remove_desc
.vdesc_offset
])(&args
);
4561 #if CONFIG_VFS_FUNNEL
4562 if (fdvp_unsafe
!= NULLVP
)
4563 unlock_fsnode(dst_attr_vp
, NULL
);
4564 #endif /* CONFIG_VFS_FUNNEL */
4567 vnode_setneedinactive(dst_attr_vp
);
4570 /* kevent notification for deleting the destination's attribute file
4571 * if it existed. Only need to post the delete on the destination, since
4572 * the code below will handle the directories.
4574 post_event_if_success(dst_attr_vp
, error
, NOTE_DELETE
);
4579 vnode_put(src_attr_vp
);
4583 vnode_put(dst_attr_vp
);
4587 FREE(fromnd
, M_TEMP
);
4592 if (xfromname
&& xfromname
!= &smallname1
[0]) {
4593 FREE(xfromname
, M_TEMP
);
4595 if (xtoname
&& xtoname
!= &smallname2
[0]) {
4596 FREE(xtoname
, M_TEMP
);
4606 *#% rename fdvp U U U
4607 *#% rename fvp U U U
4608 *#% rename tdvp L U U
4609 *#% rename tvp X U U
4612 struct vnop_rename_args
{
4613 struct vnodeop_desc
*a_desc
;
4616 struct componentname
*a_fcnp
;
4619 struct componentname
*a_tcnp
;
4620 vfs_context_t a_context
;
4624 VNOP_RENAME(struct vnode
*fdvp
, struct vnode
*fvp
, struct componentname
*fcnp
,
4625 struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
4630 struct vnop_rename_args a
;
4631 #if CONFIG_VFS_FUNNEL
4632 int funnel_state
= 0;
4633 vnode_t lock_first
= NULL
, lock_second
= NULL
;
4634 vnode_t fdvp_unsafe
= NULLVP
;
4635 vnode_t tdvp_unsafe
= NULLVP
;
4636 #endif /* CONFIG_VFS_FUNNEL */
4638 a
.a_desc
= &vnop_rename_desc
;
4647 #if CONFIG_VFS_FUNNEL
4648 if (!THREAD_SAFE_FS(fdvp
))
4650 if (!THREAD_SAFE_FS(tdvp
))
4653 if (fdvp_unsafe
!= NULLVP
) {
4655 * Lock parents in vnode address order to avoid deadlocks
4656 * note that it's possible for the fdvp to be unsafe,
4657 * but the tdvp to be safe because tvp could be a directory
4658 * in the root of a filesystem... in that case, tdvp is the
4659 * in the filesystem that this root is mounted on
4661 if (tdvp_unsafe
== NULL
|| fdvp_unsafe
== tdvp_unsafe
) {
4662 lock_first
= fdvp_unsafe
;
4664 } else if (fdvp_unsafe
< tdvp_unsafe
) {
4665 lock_first
= fdvp_unsafe
;
4666 lock_second
= tdvp_unsafe
;
4668 lock_first
= tdvp_unsafe
;
4669 lock_second
= fdvp_unsafe
;
4671 if ( (_err
= lock_fsnode(lock_first
, &funnel_state
)) )
4674 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
4675 unlock_fsnode(lock_first
, &funnel_state
);
4680 * Lock both children in vnode address order to avoid deadlocks
4682 if (tvp
== NULL
|| tvp
== fvp
) {
4685 } else if (fvp
< tvp
) {
4692 if ( (_err
= lock_fsnode(lock_first
, NULL
)) )
4695 if (lock_second
!= NULL
&& (_err
= lock_fsnode(lock_second
, NULL
))) {
4696 unlock_fsnode(lock_first
, NULL
);
4700 #endif /* CONFIG_VFS_FUNNEL */
4702 /* do the rename of the main file. */
4703 _err
= (*fdvp
->v_op
[vnop_rename_desc
.vdesc_offset
])(&a
);
4705 #if CONFIG_VFS_FUNNEL
4706 if (fdvp_unsafe
!= NULLVP
) {
4707 if (lock_second
!= NULL
)
4708 unlock_fsnode(lock_second
, NULL
);
4709 unlock_fsnode(lock_first
, NULL
);
4711 #endif /* CONFIG_VFS_FUNNEL */
4714 if (tvp
&& tvp
!= fvp
)
4715 vnode_setneedinactive(tvp
);
4718 #if CONFIG_VFS_FUNNEL
4720 if (fdvp_unsafe
!= NULLVP
) {
4721 if (tdvp_unsafe
!= NULLVP
)
4722 unlock_fsnode(tdvp_unsafe
, NULL
);
4723 unlock_fsnode(fdvp_unsafe
, &funnel_state
);
4725 #endif /* CONFIG_VFS_FUNNEL */
4727 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4729 events
= NOTE_WRITE
;
4730 if (vnode_isdir(fvp
)) {
4731 /* Link count on dir changed only if we are moving a dir and...
4732 * --Moved to new dir, not overwriting there
4733 * --Kept in same dir and DID overwrite
4735 if (((fdvp
!= tdvp
) && (!tvp
)) || ((fdvp
== tdvp
) && (tvp
))) {
4736 events
|= NOTE_LINK
;
4740 lock_vnode_and_post(fdvp
, events
);
4742 lock_vnode_and_post(tdvp
, events
);
4745 /* If you're replacing the target, post a deletion for it */
4748 lock_vnode_and_post(tvp
, NOTE_DELETE
);
4751 lock_vnode_and_post(fvp
, NOTE_RENAME
);
4758 VNOP_COMPOUND_RENAME(
4759 struct vnode
*fdvp
, struct vnode
**fvpp
, struct componentname
*fcnp
, struct vnode_attr
*fvap
,
4760 struct vnode
*tdvp
, struct vnode
**tvpp
, struct componentname
*tcnp
, struct vnode_attr
*tvap
,
4761 uint32_t flags
, vfs_context_t ctx
)
4765 struct vnop_compound_rename_args a
;
4768 no_fvp
= (*fvpp
) == NULLVP
;
4769 no_tvp
= (*tvpp
) == NULLVP
;
4771 a
.a_desc
= &vnop_compound_rename_desc
;
4785 a
.a_rename_authorizer
= vn_authorize_rename
;
4786 a
.a_reserved
= NULL
;
4788 /* do the rename of the main file. */
4789 _err
= (*fdvp
->v_op
[vnop_compound_rename_desc
.vdesc_offset
])(&a
);
4792 if (*tvpp
&& *tvpp
!= *fvpp
)
4793 vnode_setneedinactive(*tvpp
);
4796 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4797 if (0 == _err
&& *fvpp
!= *tvpp
) {
4799 panic("No fvpp after compound rename?");
4802 events
= NOTE_WRITE
;
4803 if (vnode_isdir(*fvpp
)) {
4804 /* Link count on dir changed only if we are moving a dir and...
4805 * --Moved to new dir, not overwriting there
4806 * --Kept in same dir and DID overwrite
4808 if (((fdvp
!= tdvp
) && (!*tvpp
)) || ((fdvp
== tdvp
) && (*tvpp
))) {
4809 events
|= NOTE_LINK
;
4813 lock_vnode_and_post(fdvp
, events
);
4815 lock_vnode_and_post(tdvp
, events
);
4818 /* If you're replacing the target, post a deletion for it */
4821 lock_vnode_and_post(*tvpp
, NOTE_DELETE
);
4824 lock_vnode_and_post(*fvpp
, NOTE_RENAME
);
4828 lookup_compound_vnop_post_hook(_err
, fdvp
, *fvpp
, fcnp
->cn_ndp
, 0);
4830 if (no_tvp
&& *tvpp
!= NULLVP
) {
4831 lookup_compound_vnop_post_hook(_err
, tdvp
, *tvpp
, tcnp
->cn_ndp
, 0);
4834 if (_err
&& _err
!= EKEEPLOOKING
) {
4849 vn_mkdir(struct vnode
*dvp
, struct vnode
**vpp
, struct nameidata
*ndp
,
4850 struct vnode_attr
*vap
, vfs_context_t ctx
)
4852 if (ndp
->ni_cnd
.cn_nameiop
!= CREATE
) {
4853 panic("Non-CREATE nameiop in vn_mkdir()?");
4856 if (vnode_compound_mkdir_available(dvp
)) {
4857 return VNOP_COMPOUND_MKDIR(dvp
, vpp
, ndp
, vap
, ctx
);
4859 return VNOP_MKDIR(dvp
, vpp
, &ndp
->ni_cnd
, vap
, ctx
);
4870 struct vnop_mkdir_args
{
4871 struct vnodeop_desc
*a_desc
;
4874 struct componentname
*a_cnp
;
4875 struct vnode_attr
*a_vap
;
4876 vfs_context_t a_context
;
4880 VNOP_MKDIR(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
4881 struct vnode_attr
*vap
, vfs_context_t ctx
)
4884 struct vnop_mkdir_args a
;
4885 #if CONFIG_VFS_FUNNEL
4887 int funnel_state
= 0;
4888 #endif /* CONFIG_VFS_FUNNEL */
4890 a
.a_desc
= &vnop_mkdir_desc
;
4897 #if CONFIG_VFS_FUNNEL
4898 thread_safe
= THREAD_SAFE_FS(dvp
);
4900 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
4904 #endif /* CONFIG_VFS_FUNNEL */
4906 _err
= (*dvp
->v_op
[vnop_mkdir_desc
.vdesc_offset
])(&a
);
4907 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
4909 * Remove stale Apple Double file (if any).
4911 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 0);
4914 #if CONFIG_VFS_FUNNEL
4916 unlock_fsnode(dvp
, &funnel_state
);
4918 #endif /* CONFIG_VFS_FUNNEL */
4920 post_event_if_success(dvp
, _err
, NOTE_LINK
| NOTE_WRITE
);
4926 VNOP_COMPOUND_MKDIR(struct vnode
*dvp
, struct vnode
**vpp
, struct nameidata
*ndp
,
4927 struct vnode_attr
*vap
, vfs_context_t ctx
)
4930 struct vnop_compound_mkdir_args a
;
4932 a
.a_desc
= &vnop_compound_mkdir_desc
;
4935 a
.a_cnp
= &ndp
->ni_cnd
;
4940 a
.a_mkdir_authorizer
= vn_authorize_mkdir
;
4942 a
.a_reserved
= NULL
;
4944 _err
= (*dvp
->v_op
[vnop_compound_mkdir_desc
.vdesc_offset
])(&a
);
4945 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
4947 * Remove stale Apple Double file (if any).
4949 xattrfile_remove(dvp
, ndp
->ni_cnd
.cn_nameptr
, ctx
, 0);
4952 post_event_if_success(dvp
, _err
, NOTE_LINK
| NOTE_WRITE
);
4954 lookup_compound_vnop_post_hook(_err
, dvp
, *vpp
, ndp
, (_err
== 0));
4955 if (*vpp
&& _err
&& _err
!= EKEEPLOOKING
) {
4964 vn_rmdir(vnode_t dvp
, vnode_t
*vpp
, struct nameidata
*ndp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
4966 if (vnode_compound_rmdir_available(dvp
)) {
4967 return VNOP_COMPOUND_RMDIR(dvp
, vpp
, ndp
, vap
, ctx
);
4969 if (*vpp
== NULLVP
) {
4970 panic("NULL vp, but not a compound VNOP?");
4973 panic("Non-NULL vap, but not a compound VNOP?");
4975 return VNOP_RMDIR(dvp
, *vpp
, &ndp
->ni_cnd
, ctx
);
4986 struct vnop_rmdir_args
{
4987 struct vnodeop_desc
*a_desc
;
4990 struct componentname
*a_cnp
;
4991 vfs_context_t a_context
;
4996 VNOP_RMDIR(struct vnode
*dvp
, struct vnode
*vp
, struct componentname
*cnp
, vfs_context_t ctx
)
4999 struct vnop_rmdir_args a
;
5000 #if CONFIG_VFS_FUNNEL
5002 int funnel_state
= 0;
5003 #endif /* CONFIG_VFS_FUNNEL */
5005 a
.a_desc
= &vnop_rmdir_desc
;
5011 #if CONFIG_VFS_FUNNEL
5012 thread_safe
= THREAD_SAFE_FS(dvp
);
5014 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5018 #endif /* CONFIG_VFS_FUNNEL */
5020 _err
= (*vp
->v_op
[vnop_rmdir_desc
.vdesc_offset
])(&a
);
5023 vnode_setneedinactive(vp
);
5025 if ( !(NATIVE_XATTR(dvp
)) ) {
5027 * Remove any associated extended attribute file (._ AppleDouble file).
5029 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 1);
5033 #if CONFIG_VFS_FUNNEL
5035 unlock_fsnode(vp
, &funnel_state
);
5037 #endif /* CONFIG_VFS_FUNNEL */
5039 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
5040 post_event_if_success(vp
, _err
, NOTE_DELETE
| NOTE_LINK
);
5041 post_event_if_success(dvp
, _err
, NOTE_LINK
| NOTE_WRITE
);
5047 VNOP_COMPOUND_RMDIR(struct vnode
*dvp
, struct vnode
**vpp
, struct nameidata
*ndp
,
5048 struct vnode_attr
*vap
, vfs_context_t ctx
)
5051 struct vnop_compound_rmdir_args a
;
5054 a
.a_desc
= &vnop_mkdir_desc
;
5057 a
.a_cnp
= &ndp
->ni_cnd
;
5061 a
.a_rmdir_authorizer
= vn_authorize_rmdir
;
5062 a
.a_reserved
= NULL
;
5064 no_vp
= (*vpp
== NULLVP
);
5066 _err
= (*dvp
->v_op
[vnop_compound_rmdir_desc
.vdesc_offset
])(&a
);
5067 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
5069 * Remove stale Apple Double file (if any).
5071 xattrfile_remove(dvp
, ndp
->ni_cnd
.cn_nameptr
, ctx
, 0);
5075 post_event_if_success(*vpp
, _err
, NOTE_DELETE
| NOTE_LINK
);
5077 post_event_if_success(dvp
, _err
, NOTE_LINK
| NOTE_WRITE
);
5080 lookup_compound_vnop_post_hook(_err
, dvp
, *vpp
, ndp
, 0);
5082 #if 0 /* Removing orphaned ._ files requires a vp.... */
5083 if (*vpp
&& _err
&& _err
!= EKEEPLOOKING
) {
5094 * Remove a ._ AppleDouble file
5096 #define AD_STALE_SECS (180)
5098 xattrfile_remove(vnode_t dvp
, const char * basename
, vfs_context_t ctx
, int force
)
5101 struct nameidata nd
;
5103 char *filename
= NULL
;
5106 if ((basename
== NULL
) || (basename
[0] == '\0') ||
5107 (basename
[0] == '.' && basename
[1] == '_')) {
5110 filename
= &smallname
[0];
5111 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
5112 if (len
>= sizeof(smallname
)) {
5113 len
++; /* snprintf result doesn't include '\0' */
5114 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
5115 len
= snprintf(filename
, len
, "._%s", basename
);
5117 NDINIT(&nd
, DELETE
, OP_UNLINK
, WANTPARENT
| LOCKLEAF
| NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
5118 CAST_USER_ADDR_T(filename
), ctx
);
5120 if (namei(&nd
) != 0)
5125 if (xvp
->v_type
!= VREG
)
5129 * When creating a new object and a "._" file already
5130 * exists, check to see if its a stale "._" file.
5134 struct vnode_attr va
;
5137 VATTR_WANTED(&va
, va_data_size
);
5138 VATTR_WANTED(&va
, va_modify_time
);
5139 if (VNOP_GETATTR(xvp
, &va
, ctx
) == 0 &&
5140 VATTR_IS_SUPPORTED(&va
, va_data_size
) &&
5141 VATTR_IS_SUPPORTED(&va
, va_modify_time
) &&
5142 va
.va_data_size
!= 0) {
5146 if ((tv
.tv_sec
> va
.va_modify_time
.tv_sec
) &&
5147 (tv
.tv_sec
- va
.va_modify_time
.tv_sec
) > AD_STALE_SECS
) {
5148 force
= 1; /* must be stale */
5155 error
= VNOP_REMOVE(dvp
, xvp
, &nd
.ni_cnd
, 0, ctx
);
5157 vnode_setneedinactive(xvp
);
5159 post_event_if_success(xvp
, error
, NOTE_DELETE
);
5160 post_event_if_success(dvp
, error
, NOTE_WRITE
);
5167 if (filename
&& filename
!= &smallname
[0]) {
5168 FREE(filename
, M_TEMP
);
5173 * Shadow uid/gid/mod to a ._ AppleDouble file
5176 xattrfile_setattr(vnode_t dvp
, const char * basename
, struct vnode_attr
* vap
,
5180 struct nameidata nd
;
5182 char *filename
= NULL
;
5185 if ((dvp
== NULLVP
) ||
5186 (basename
== NULL
) || (basename
[0] == '\0') ||
5187 (basename
[0] == '.' && basename
[1] == '_')) {
5190 filename
= &smallname
[0];
5191 len
= snprintf(filename
, sizeof(smallname
), "._%s", basename
);
5192 if (len
>= sizeof(smallname
)) {
5193 len
++; /* snprintf result doesn't include '\0' */
5194 MALLOC(filename
, char *, len
, M_TEMP
, M_WAITOK
);
5195 len
= snprintf(filename
, len
, "._%s", basename
);
5197 NDINIT(&nd
, LOOKUP
, OP_SETATTR
, NOFOLLOW
| USEDVP
, UIO_SYSSPACE
,
5198 CAST_USER_ADDR_T(filename
), ctx
);
5200 if (namei(&nd
) != 0)
5206 if (xvp
->v_type
== VREG
) {
5207 #if CONFIG_VFS_FUNNEL
5208 int thread_safe
= THREAD_SAFE_FS(dvp
);
5209 #endif /* CONFIG_VFS_FUNNEL */
5210 struct vnop_setattr_args a
;
5212 a
.a_desc
= &vnop_setattr_desc
;
5217 #if CONFIG_VFS_FUNNEL
5219 if ( (lock_fsnode(xvp
, NULL
)) )
5222 #endif /* CONFIG_VFS_FUNNEL */
5224 (void) (*xvp
->v_op
[vnop_setattr_desc
.vdesc_offset
])(&a
);
5226 #if CONFIG_VFS_FUNNEL
5228 unlock_fsnode(xvp
, NULL
);
5230 #endif /* CONFIG_VFS_FUNNEL */
5234 #if CONFIG_VFS_FUNNEL
5236 #endif /* CONFIG_VFS_FUNNEL */
5240 if (filename
&& filename
!= &smallname
[0]) {
5241 FREE(filename
, M_TEMP
);
5248 *#% symlink dvp L U U
5249 *#% symlink vpp - U -
5252 struct vnop_symlink_args
{
5253 struct vnodeop_desc
*a_desc
;
5256 struct componentname
*a_cnp
;
5257 struct vnode_attr
*a_vap
;
5259 vfs_context_t a_context
;
5264 VNOP_SYMLINK(struct vnode
*dvp
, struct vnode
**vpp
, struct componentname
*cnp
,
5265 struct vnode_attr
*vap
, char *target
, vfs_context_t ctx
)
5268 struct vnop_symlink_args a
;
5269 #if CONFIG_VFS_FUNNEL
5271 int funnel_state
= 0;
5272 #endif /* CONFIG_VFS_FUNNEL */
5274 a
.a_desc
= &vnop_symlink_desc
;
5279 a
.a_target
= target
;
5282 #if CONFIG_VFS_FUNNEL
5283 thread_safe
= THREAD_SAFE_FS(dvp
);
5285 if ( (_err
= lock_fsnode(dvp
, &funnel_state
)) ) {
5289 #endif /* CONFIG_VFS_FUNNEL */
5291 _err
= (*dvp
->v_op
[vnop_symlink_desc
.vdesc_offset
])(&a
);
5292 if (_err
== 0 && !NATIVE_XATTR(dvp
)) {
5294 * Remove stale Apple Double file (if any). Posts its own knotes
5296 xattrfile_remove(dvp
, cnp
->cn_nameptr
, ctx
, 0);
5299 #if CONFIG_VFS_FUNNEL
5301 unlock_fsnode(dvp
, &funnel_state
);
5303 #endif /* CONFIG_VFS_FUNNEL */
5305 post_event_if_success(dvp
, _err
, NOTE_WRITE
);
5313 *#% readdir vp L L L
5316 struct vnop_readdir_args
{
5317 struct vnodeop_desc
*a_desc
;
5323 vfs_context_t a_context
;
5328 VNOP_READDIR(struct vnode
*vp
, struct uio
*uio
, int flags
, int *eofflag
,
5329 int *numdirent
, vfs_context_t ctx
)
5332 struct vnop_readdir_args a
;
5333 #if CONFIG_VFS_FUNNEL
5335 int funnel_state
= 0;
5336 #endif /* CONFIG_VFS_FUNNEL */
5338 a
.a_desc
= &vnop_readdir_desc
;
5342 a
.a_eofflag
= eofflag
;
5343 a
.a_numdirent
= numdirent
;
5345 #if CONFIG_VFS_FUNNEL
5346 thread_safe
= THREAD_SAFE_FS(vp
);
5349 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5353 #endif /* CONFIG_VFS_FUNNEL */
5355 _err
= (*vp
->v_op
[vnop_readdir_desc
.vdesc_offset
])(&a
);
5357 #if CONFIG_VFS_FUNNEL
5359 unlock_fsnode(vp
, &funnel_state
);
5361 #endif /* CONFIG_VFS_FUNNEL */
5368 *#% readdirattr vp L L L
5371 struct vnop_readdirattr_args
{
5372 struct vnodeop_desc
*a_desc
;
5374 struct attrlist
*a_alist
;
5376 uint32_t a_maxcount
;
5378 uint32_t *a_newstate
;
5380 uint32_t *a_actualcount
;
5381 vfs_context_t a_context
;
5386 VNOP_READDIRATTR(struct vnode
*vp
, struct attrlist
*alist
, struct uio
*uio
, uint32_t maxcount
,
5387 uint32_t options
, uint32_t *newstate
, int *eofflag
, uint32_t *actualcount
, vfs_context_t ctx
)
5390 struct vnop_readdirattr_args a
;
5391 #if CONFIG_VFS_FUNNEL
5393 int funnel_state
= 0;
5394 #endif /* CONFIG_VFS_FUNNEL */
5396 a
.a_desc
= &vnop_readdirattr_desc
;
5400 a
.a_maxcount
= maxcount
;
5401 a
.a_options
= options
;
5402 a
.a_newstate
= newstate
;
5403 a
.a_eofflag
= eofflag
;
5404 a
.a_actualcount
= actualcount
;
5407 #if CONFIG_VFS_FUNNEL
5408 thread_safe
= THREAD_SAFE_FS(vp
);
5410 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5414 #endif /* CONFIG_VFS_FUNNEL */
5416 _err
= (*vp
->v_op
[vnop_readdirattr_desc
.vdesc_offset
])(&a
);
5418 #if CONFIG_VFS_FUNNEL
5420 unlock_fsnode(vp
, &funnel_state
);
5422 #endif /* CONFIG_VFS_FUNNEL */
5430 *#% readlink vp L L L
5433 struct vnop_readlink_args
{
5434 struct vnodeop_desc
*a_desc
;
5437 vfs_context_t a_context
;
5442 * Returns: 0 Success
5443 * lock_fsnode:ENOENT No such file or directory [only for VFS
5444 * that is not thread safe & vnode is
5445 * currently being/has been terminated]
5446 * <vfs_readlink>:EINVAL
5447 * <vfs_readlink>:???
5449 * Note: The return codes from the underlying VFS's readlink routine
5450 * can't be fully enumerated here, since third party VFS authors
5451 * may not limit their error returns to the ones documented here,
5452 * even though this may result in some programs functioning
5455 * The return codes documented above are those which may currently
5456 * be returned by HFS from hfs_vnop_readlink, not including
5457 * additional error code which may be propagated from underlying
5461 VNOP_READLINK(struct vnode
*vp
, struct uio
*uio
, vfs_context_t ctx
)
5464 struct vnop_readlink_args a
;
5465 #if CONFIG_VFS_FUNNEL
5467 int funnel_state
= 0;
5468 #endif /* CONFIG_VFS_FUNNEL */
5470 a
.a_desc
= &vnop_readlink_desc
;
5475 #if CONFIG_VFS_FUNNEL
5476 thread_safe
= THREAD_SAFE_FS(vp
);
5478 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5482 #endif /* CONFIG_VFS_FUNNEL */
5484 _err
= (*vp
->v_op
[vnop_readlink_desc
.vdesc_offset
])(&a
);
5486 #if CONFIG_VFS_FUNNEL
5488 unlock_fsnode(vp
, &funnel_state
);
5490 #endif /* CONFIG_VFS_FUNNEL */
5498 *#% inactive vp L U U
5501 struct vnop_inactive_args
{
5502 struct vnodeop_desc
*a_desc
;
5504 vfs_context_t a_context
;
5508 VNOP_INACTIVE(struct vnode
*vp
, vfs_context_t ctx
)
5511 struct vnop_inactive_args a
;
5512 #if CONFIG_VFS_FUNNEL
5514 int funnel_state
= 0;
5515 #endif /* CONFIG_VFS_FUNNEL */
5517 a
.a_desc
= &vnop_inactive_desc
;
5521 #if CONFIG_VFS_FUNNEL
5522 thread_safe
= THREAD_SAFE_FS(vp
);
5524 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5528 #endif /* CONFIG_VFS_FUNNEL */
5530 _err
= (*vp
->v_op
[vnop_inactive_desc
.vdesc_offset
])(&a
);
5532 #if CONFIG_VFS_FUNNEL
5534 unlock_fsnode(vp
, &funnel_state
);
5536 #endif /* CONFIG_VFS_FUNNEL */
5539 /* For file systems that do not support namedstream natively, mark
5540 * the shadow stream file vnode to be recycled as soon as the last
5541 * reference goes away. To avoid re-entering reclaim code, do not
5542 * call recycle on terminating namedstream vnodes.
5544 if (vnode_isnamedstream(vp
) &&
5545 (vp
->v_parent
!= NULLVP
) &&
5546 vnode_isshadow(vp
) &&
5547 ((vp
->v_lflag
& VL_TERMINATE
) == 0)) {
5559 *#% reclaim vp U U U
5562 struct vnop_reclaim_args
{
5563 struct vnodeop_desc
*a_desc
;
5565 vfs_context_t a_context
;
5569 VNOP_RECLAIM(struct vnode
*vp
, vfs_context_t ctx
)
5572 struct vnop_reclaim_args a
;
5573 #if CONFIG_VFS_FUNNEL
5575 int funnel_state
= 0;
5576 #endif /* CONFIG_VFS_FUNNEL */
5578 a
.a_desc
= &vnop_reclaim_desc
;
5582 #if CONFIG_VFS_FUNNEL
5583 thread_safe
= THREAD_SAFE_FS(vp
);
5585 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5587 #endif /* CONFIG_VFS_FUNNEL */
5589 _err
= (*vp
->v_op
[vnop_reclaim_desc
.vdesc_offset
])(&a
);
5591 #if CONFIG_VFS_FUNNEL
5593 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5595 #endif /* CONFIG_VFS_FUNNEL */
5602 * Returns: 0 Success
5603 * lock_fsnode:ENOENT No such file or directory [only for VFS
5604 * that is not thread safe & vnode is
5605 * currently being/has been terminated]
5606 * <vnop_pathconf_desc>:??? [per FS implementation specific]
5611 *#% pathconf vp L L L
5614 struct vnop_pathconf_args
{
5615 struct vnodeop_desc
*a_desc
;
5619 vfs_context_t a_context
;
5623 VNOP_PATHCONF(struct vnode
*vp
, int name
, int32_t *retval
, vfs_context_t ctx
)
5626 struct vnop_pathconf_args a
;
5627 #if CONFIG_VFS_FUNNEL
5629 int funnel_state
= 0;
5630 #endif /* CONFIG_VFS_FUNNEL */
5632 a
.a_desc
= &vnop_pathconf_desc
;
5635 a
.a_retval
= retval
;
5638 #if CONFIG_VFS_FUNNEL
5639 thread_safe
= THREAD_SAFE_FS(vp
);
5641 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5645 #endif /* CONFIG_VFS_FUNNEL */
5647 _err
= (*vp
->v_op
[vnop_pathconf_desc
.vdesc_offset
])(&a
);
5649 #if CONFIG_VFS_FUNNEL
5651 unlock_fsnode(vp
, &funnel_state
);
5653 #endif /* CONFIG_VFS_FUNNEL */
5659 * Returns: 0 Success
5660 * err_advlock:ENOTSUP
5662 * <vnop_advlock_desc>:???
5664 * Notes: VFS implementations of advisory locking using calls through
5665 * <vnop_advlock_desc> because lock enforcement does not occur
5666 * locally should try to limit themselves to the return codes
5667 * documented above for lf_advlock and err_advlock.
5672 *#% advlock vp U U U
5675 struct vnop_advlock_args
{
5676 struct vnodeop_desc
*a_desc
;
5682 vfs_context_t a_context
;
5686 VNOP_ADVLOCK(struct vnode
*vp
, caddr_t id
, int op
, struct flock
*fl
, int flags
, vfs_context_t ctx
)
5689 struct vnop_advlock_args a
;
5690 #if CONFIG_VFS_FUNNEL
5692 int funnel_state
= 0;
5693 #endif /* CONFIG_VFS_FUNNEL */
5695 a
.a_desc
= &vnop_advlock_desc
;
5703 #if CONFIG_VFS_FUNNEL
5704 thread_safe
= THREAD_SAFE_FS(vp
);
5706 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5708 #endif /* CONFIG_VFS_FUNNEL */
5710 /* Disallow advisory locking on non-seekable vnodes */
5711 if (vnode_isfifo(vp
)) {
5712 _err
= err_advlock(&a
);
5714 if ((vp
->v_flag
& VLOCKLOCAL
)) {
5715 /* Advisory locking done at this layer */
5716 _err
= lf_advlock(&a
);
5718 /* Advisory locking done by underlying filesystem */
5719 _err
= (*vp
->v_op
[vnop_advlock_desc
.vdesc_offset
])(&a
);
5723 #if CONFIG_VFS_FUNNEL
5725 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5727 #endif /* CONFIG_VFS_FUNNEL */
5737 *#% allocate vp L L L
5740 struct vnop_allocate_args
{
5741 struct vnodeop_desc
*a_desc
;
5745 off_t
*a_bytesallocated
;
5747 vfs_context_t a_context
;
5752 VNOP_ALLOCATE(struct vnode
*vp
, off_t length
, u_int32_t flags
, off_t
*bytesallocated
, off_t offset
, vfs_context_t ctx
)
5755 struct vnop_allocate_args a
;
5756 #if CONFIG_VFS_FUNNEL
5758 int funnel_state
= 0;
5759 #endif /* CONFIG_VFS_FUNNEL */
5761 a
.a_desc
= &vnop_allocate_desc
;
5763 a
.a_length
= length
;
5765 a
.a_bytesallocated
= bytesallocated
;
5766 a
.a_offset
= offset
;
5769 #if CONFIG_VFS_FUNNEL
5770 thread_safe
= THREAD_SAFE_FS(vp
);
5772 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5776 #endif /* CONFIG_VFS_FUNNEL */
5778 _err
= (*vp
->v_op
[vnop_allocate_desc
.vdesc_offset
])(&a
);
5781 add_fsevent(FSE_STAT_CHANGED
, ctx
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
5785 #if CONFIG_VFS_FUNNEL
5787 unlock_fsnode(vp
, &funnel_state
);
5789 #endif /* CONFIG_VFS_FUNNEL */
5800 struct vnop_pagein_args
{
5801 struct vnodeop_desc
*a_desc
;
5804 upl_offset_t a_pl_offset
;
5808 vfs_context_t a_context
;
5812 VNOP_PAGEIN(struct vnode
*vp
, upl_t pl
, upl_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t ctx
)
5815 struct vnop_pagein_args a
;
5816 #if CONFIG_VFS_FUNNEL
5818 int funnel_state
= 0;
5819 #endif /* CONFIG_VFS_FUNNEL */
5821 a
.a_desc
= &vnop_pagein_desc
;
5824 a
.a_pl_offset
= pl_offset
;
5825 a
.a_f_offset
= f_offset
;
5830 #if CONFIG_VFS_FUNNEL
5831 thread_safe
= THREAD_SAFE_FS(vp
);
5833 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5835 #endif /* CONFIG_VFS_FUNNEL */
5837 _err
= (*vp
->v_op
[vnop_pagein_desc
.vdesc_offset
])(&a
);
5839 #if CONFIG_VFS_FUNNEL
5841 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5843 #endif /* CONFIG_VFS_FUNNEL */
5851 *#% pageout vp = = =
5854 struct vnop_pageout_args
{
5855 struct vnodeop_desc
*a_desc
;
5858 upl_offset_t a_pl_offset
;
5862 vfs_context_t a_context
;
5867 VNOP_PAGEOUT(struct vnode
*vp
, upl_t pl
, upl_offset_t pl_offset
, off_t f_offset
, size_t size
, int flags
, vfs_context_t ctx
)
5870 struct vnop_pageout_args a
;
5871 #if CONFIG_VFS_FUNNEL
5873 int funnel_state
= 0;
5874 #endif /* CONFIG_VFS_FUNNEL */
5876 a
.a_desc
= &vnop_pageout_desc
;
5879 a
.a_pl_offset
= pl_offset
;
5880 a
.a_f_offset
= f_offset
;
5885 #if CONFIG_VFS_FUNNEL
5886 thread_safe
= THREAD_SAFE_FS(vp
);
5888 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
5890 #endif /* CONFIG_VFS_FUNNEL */
5892 _err
= (*vp
->v_op
[vnop_pageout_desc
.vdesc_offset
])(&a
);
5894 #if CONFIG_VFS_FUNNEL
5896 (void) thread_funnel_set(kernel_flock
, funnel_state
);
5898 #endif /* CONFIG_VFS_FUNNEL */
5900 post_event_if_success(vp
, _err
, NOTE_WRITE
);
5906 vn_remove(vnode_t dvp
, vnode_t
*vpp
, struct nameidata
*ndp
, int32_t flags
, struct vnode_attr
*vap
, vfs_context_t ctx
)
5908 if (vnode_compound_remove_available(dvp
)) {
5909 return VNOP_COMPOUND_REMOVE(dvp
, vpp
, ndp
, flags
, vap
, ctx
);
5911 return VNOP_REMOVE(dvp
, *vpp
, &ndp
->ni_cnd
, flags
, ctx
);
5920 *#% searchfs vp L L L
5923 struct vnop_searchfs_args
{
5924 struct vnodeop_desc
*a_desc
;
5926 void *a_searchparams1
;
5927 void *a_searchparams2
;
5928 struct attrlist
*a_searchattrs
;
5929 uint32_t a_maxmatches
;
5930 struct timeval
*a_timelimit
;
5931 struct attrlist
*a_returnattrs
;
5932 uint32_t *a_nummatches
;
5933 uint32_t a_scriptcode
;
5936 struct searchstate
*a_searchstate
;
5937 vfs_context_t a_context
;
5942 VNOP_SEARCHFS(struct vnode
*vp
, void *searchparams1
, void *searchparams2
, struct attrlist
*searchattrs
, uint32_t maxmatches
, struct timeval
*timelimit
, struct attrlist
*returnattrs
, uint32_t *nummatches
, uint32_t scriptcode
, uint32_t options
, struct uio
*uio
, struct searchstate
*searchstate
, vfs_context_t ctx
)
5945 struct vnop_searchfs_args a
;
5946 #if CONFIG_VFS_FUNNEL
5948 int funnel_state
= 0;
5949 #endif /* CONFIG_VFS_FUNNEL */
5951 a
.a_desc
= &vnop_searchfs_desc
;
5953 a
.a_searchparams1
= searchparams1
;
5954 a
.a_searchparams2
= searchparams2
;
5955 a
.a_searchattrs
= searchattrs
;
5956 a
.a_maxmatches
= maxmatches
;
5957 a
.a_timelimit
= timelimit
;
5958 a
.a_returnattrs
= returnattrs
;
5959 a
.a_nummatches
= nummatches
;
5960 a
.a_scriptcode
= scriptcode
;
5961 a
.a_options
= options
;
5963 a
.a_searchstate
= searchstate
;
5966 #if CONFIG_VFS_FUNNEL
5967 thread_safe
= THREAD_SAFE_FS(vp
);
5969 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
5973 #endif /* CONFIG_VFS_FUNNEL */
5975 _err
= (*vp
->v_op
[vnop_searchfs_desc
.vdesc_offset
])(&a
);
5977 #if CONFIG_VFS_FUNNEL
5979 unlock_fsnode(vp
, &funnel_state
);
5981 #endif /* CONFIG_VFS_FUNNEL */
5985 #endif /* CONFIG_SEARCHFS */
5990 *#% copyfile fvp U U U
5991 *#% copyfile tdvp L U U
5992 *#% copyfile tvp X U U
5995 struct vnop_copyfile_args
{
5996 struct vnodeop_desc
*a_desc
;
6000 struct componentname
*a_tcnp
;
6003 vfs_context_t a_context
;
6007 VNOP_COPYFILE(struct vnode
*fvp
, struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
6008 int mode
, int flags
, vfs_context_t ctx
)
6011 struct vnop_copyfile_args a
;
6012 a
.a_desc
= &vnop_copyfile_desc
;
6020 _err
= (*fvp
->v_op
[vnop_copyfile_desc
.vdesc_offset
])(&a
);
6025 VNOP_GETXATTR(vnode_t vp
, const char *name
, uio_t uio
, size_t *size
, int options
, vfs_context_t ctx
)
6027 struct vnop_getxattr_args a
;
6029 #if CONFIG_VFS_FUNNEL
6031 int funnel_state
= 0;
6032 #endif /* CONFIG_VFS_FUNNEL */
6034 a
.a_desc
= &vnop_getxattr_desc
;
6039 a
.a_options
= options
;
6042 #if CONFIG_VFS_FUNNEL
6043 thread_safe
= THREAD_SAFE_FS(vp
);
6045 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
6049 #endif /* CONFIG_VFS_FUNNEL */
6051 error
= (*vp
->v_op
[vnop_getxattr_desc
.vdesc_offset
])(&a
);
6053 #if CONFIG_VFS_FUNNEL
6055 unlock_fsnode(vp
, &funnel_state
);
6057 #endif /* CONFIG_VFS_FUNNEL */
6063 VNOP_SETXATTR(vnode_t vp
, const char *name
, uio_t uio
, int options
, vfs_context_t ctx
)
6065 struct vnop_setxattr_args a
;
6067 #if CONFIG_VFS_FUNNEL
6069 int funnel_state
= 0;
6070 #endif /* CONFIG_VFS_FUNNEL */
6072 a
.a_desc
= &vnop_setxattr_desc
;
6076 a
.a_options
= options
;
6079 #if CONFIG_VFS_FUNNEL
6080 thread_safe
= THREAD_SAFE_FS(vp
);
6082 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
6086 #endif /* CONFIG_VFS_FUNNEL */
6088 error
= (*vp
->v_op
[vnop_setxattr_desc
.vdesc_offset
])(&a
);
6090 #if CONFIG_VFS_FUNNEL
6092 unlock_fsnode(vp
, &funnel_state
);
6094 #endif /* CONFIG_VFS_FUNNEL */
6097 vnode_uncache_authorized_action(vp
, KAUTH_INVALIDATE_CACHED_RIGHTS
);
6099 post_event_if_success(vp
, error
, NOTE_ATTRIB
);
6105 VNOP_REMOVEXATTR(vnode_t vp
, const char *name
, int options
, vfs_context_t ctx
)
6107 struct vnop_removexattr_args a
;
6109 #if CONFIG_VFS_FUNNEL
6111 int funnel_state
= 0;
6112 #endif /* CONFIG_VFS_FUNNEL */
6114 a
.a_desc
= &vnop_removexattr_desc
;
6117 a
.a_options
= options
;
6120 #if CONFIG_VFS_FUNNEL
6121 thread_safe
= THREAD_SAFE_FS(vp
);
6123 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
6127 #endif /* CONFIG_VFS_FUNNEL */
6129 error
= (*vp
->v_op
[vnop_removexattr_desc
.vdesc_offset
])(&a
);
6131 #if CONFIG_VFS_FUNNEL
6133 unlock_fsnode(vp
, &funnel_state
);
6135 #endif /* CONFIG_VFS_FUNNEL */
6137 post_event_if_success(vp
, error
, NOTE_ATTRIB
);
6143 VNOP_LISTXATTR(vnode_t vp
, uio_t uio
, size_t *size
, int options
, vfs_context_t ctx
)
6145 struct vnop_listxattr_args a
;
6147 #if CONFIG_VFS_FUNNEL
6149 int funnel_state
= 0;
6150 #endif /* CONFIG_VFS_FUNNEL */
6152 a
.a_desc
= &vnop_listxattr_desc
;
6156 a
.a_options
= options
;
6159 #if CONFIG_VFS_FUNNEL
6160 thread_safe
= THREAD_SAFE_FS(vp
);
6162 if ( (error
= lock_fsnode(vp
, &funnel_state
)) ) {
6166 #endif /* CONFIG_VFS_FUNNEL */
6168 error
= (*vp
->v_op
[vnop_listxattr_desc
.vdesc_offset
])(&a
);
6170 #if CONFIG_VFS_FUNNEL
6172 unlock_fsnode(vp
, &funnel_state
);
6174 #endif /* CONFIG_VFS_FUNNEL */
6183 *#% blktooff vp = = =
6186 struct vnop_blktooff_args
{
6187 struct vnodeop_desc
*a_desc
;
6194 VNOP_BLKTOOFF(struct vnode
*vp
, daddr64_t lblkno
, off_t
*offset
)
6197 struct vnop_blktooff_args a
;
6198 #if CONFIG_VFS_FUNNEL
6200 int funnel_state
= 0;
6201 #endif /* CONFIG_VFS_FUNNEL */
6203 a
.a_desc
= &vnop_blktooff_desc
;
6205 a
.a_lblkno
= lblkno
;
6206 a
.a_offset
= offset
;
6208 #if CONFIG_VFS_FUNNEL
6209 thread_safe
= THREAD_SAFE_FS(vp
);
6211 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
6213 #endif /* CONFIG_VFS_FUNNEL */
6215 _err
= (*vp
->v_op
[vnop_blktooff_desc
.vdesc_offset
])(&a
);
6217 #if CONFIG_VFS_FUNNEL
6219 (void) thread_funnel_set(kernel_flock
, funnel_state
);
6221 #endif /* CONFIG_VFS_FUNNEL */
6229 *#% offtoblk vp = = =
6232 struct vnop_offtoblk_args
{
6233 struct vnodeop_desc
*a_desc
;
6236 daddr64_t
*a_lblkno
;
6240 VNOP_OFFTOBLK(struct vnode
*vp
, off_t offset
, daddr64_t
*lblkno
)
6243 struct vnop_offtoblk_args a
;
6244 #if CONFIG_VFS_FUNNEL
6246 int funnel_state
= 0;
6247 #endif /* CONFIG_VFS_FUNNEL */
6249 a
.a_desc
= &vnop_offtoblk_desc
;
6251 a
.a_offset
= offset
;
6252 a
.a_lblkno
= lblkno
;
6254 #if CONFIG_VFS_FUNNEL
6255 thread_safe
= THREAD_SAFE_FS(vp
);
6257 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
6259 #endif /* CONFIG_VFS_FUNNEL */
6261 _err
= (*vp
->v_op
[vnop_offtoblk_desc
.vdesc_offset
])(&a
);
6263 #if CONFIG_VFS_FUNNEL
6265 (void) thread_funnel_set(kernel_flock
, funnel_state
);
6267 #endif /* CONFIG_VFS_FUNNEL */
6275 *#% blockmap vp L L L
6278 struct vnop_blockmap_args
{
6279 struct vnodeop_desc
*a_desc
;
6287 vfs_context_t a_context
;
6291 VNOP_BLOCKMAP(struct vnode
*vp
, off_t foffset
, size_t size
, daddr64_t
*bpn
, size_t *run
, void *poff
, int flags
, vfs_context_t ctx
)
6294 struct vnop_blockmap_args a
;
6295 size_t localrun
= 0;
6296 #if CONFIG_VFS_FUNNEL
6298 int funnel_state
= 0;
6299 #endif /* CONFIG_VFS_FUNNEL */
6302 ctx
= vfs_context_current();
6304 a
.a_desc
= &vnop_blockmap_desc
;
6306 a
.a_foffset
= foffset
;
6309 a
.a_run
= &localrun
;
6314 #if CONFIG_VFS_FUNNEL
6315 thread_safe
= THREAD_SAFE_FS(vp
);
6317 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
6319 #endif /* CONFIG_VFS_FUNNEL */
6321 _err
= (*vp
->v_op
[vnop_blockmap_desc
.vdesc_offset
])(&a
);
6323 #if CONFIG_VFS_FUNNEL
6325 (void) thread_funnel_set(kernel_flock
, funnel_state
);
6327 #endif /* CONFIG_VFS_FUNNEL */
6330 * We used a local variable to request information from the underlying
6331 * filesystem about the length of the I/O run in question. If
6332 * we get malformed output from the filesystem, we cap it to the length
6333 * requested, at most. Update 'run' on the way out.
6336 if (localrun
> size
) {
6349 struct vnop_strategy_args
{
6350 struct vnodeop_desc
*a_desc
;
6356 VNOP_STRATEGY(struct buf
*bp
)
6359 struct vnop_strategy_args a
;
6360 a
.a_desc
= &vnop_strategy_desc
;
6362 _err
= (*buf_vnode(bp
)->v_op
[vnop_strategy_desc
.vdesc_offset
])(&a
);
6367 struct vnop_bwrite_args
{
6368 struct vnodeop_desc
*a_desc
;
6373 VNOP_BWRITE(struct buf
*bp
)
6376 struct vnop_bwrite_args a
;
6377 a
.a_desc
= &vnop_bwrite_desc
;
6379 _err
= (*buf_vnode(bp
)->v_op
[vnop_bwrite_desc
.vdesc_offset
])(&a
);
6384 struct vnop_kqfilt_add_args
{
6385 struct vnodeop_desc
*a_desc
;
6388 vfs_context_t a_context
;
6392 VNOP_KQFILT_ADD(struct vnode
*vp
, struct knote
*kn
, vfs_context_t ctx
)
6395 struct vnop_kqfilt_add_args a
;
6396 #if CONFIG_VFS_FUNNEL
6398 int funnel_state
= 0;
6399 #endif /* CONFIG_VFS_FUNNEL */
6401 a
.a_desc
= VDESC(vnop_kqfilt_add
);
6406 #if CONFIG_VFS_FUNNEL
6407 thread_safe
= THREAD_SAFE_FS(vp
);
6409 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
6413 #endif /* CONFIG_VFS_FUNNEL */
6415 _err
= (*vp
->v_op
[vnop_kqfilt_add_desc
.vdesc_offset
])(&a
);
6417 #if CONFIG_VFS_FUNNEL
6419 unlock_fsnode(vp
, &funnel_state
);
6421 #endif /* CONFIG_VFS_FUNNEL */
6427 struct vnop_kqfilt_remove_args
{
6428 struct vnodeop_desc
*a_desc
;
6431 vfs_context_t a_context
;
6435 VNOP_KQFILT_REMOVE(struct vnode
*vp
, uintptr_t ident
, vfs_context_t ctx
)
6438 struct vnop_kqfilt_remove_args a
;
6439 #if CONFIG_VFS_FUNNEL
6441 int funnel_state
= 0;
6442 #endif /* CONFIG_VFS_FUNNEL */
6444 a
.a_desc
= VDESC(vnop_kqfilt_remove
);
6449 #if CONFIG_VFS_FUNNEL
6450 thread_safe
= THREAD_SAFE_FS(vp
);
6452 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
6456 #endif /* CONFIG_VFS_FUNNEL */
6458 _err
= (*vp
->v_op
[vnop_kqfilt_remove_desc
.vdesc_offset
])(&a
);
6460 #if CONFIG_VFS_FUNNEL
6462 unlock_fsnode(vp
, &funnel_state
);
6464 #endif /* CONFIG_VFS_FUNNEL */
6470 VNOP_MONITOR(vnode_t vp
, uint32_t events
, uint32_t flags
, void *handle
, vfs_context_t ctx
)
6473 struct vnop_monitor_args a
;
6474 #if CONFIG_VFS_FUNNEL
6476 int funnel_state
= 0;
6477 #endif /* CONFIG_VFS_FUNNEL */
6479 a
.a_desc
= VDESC(vnop_monitor
);
6481 a
.a_events
= events
;
6483 a
.a_handle
= handle
;
6486 #if CONFIG_VFS_FUNNEL
6487 thread_safe
= THREAD_SAFE_FS(vp
);
6489 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
6493 #endif /* CONFIG_VFS_FUNNEL */
6495 _err
= (*vp
->v_op
[vnop_monitor_desc
.vdesc_offset
])(&a
);
6497 #if CONFIG_VFS_FUNNEL
6499 unlock_fsnode(vp
, &funnel_state
);
6501 #endif /* CONFIG_VFS_FUNNEL */
6507 struct vnop_setlabel_args
{
6508 struct vnodeop_desc
*a_desc
;
6511 vfs_context_t a_context
;
6515 VNOP_SETLABEL(struct vnode
*vp
, struct label
*label
, vfs_context_t ctx
)
6518 struct vnop_setlabel_args a
;
6519 #if CONFIG_VFS_FUNNEL
6521 int funnel_state
= 0;
6522 #endif /* CONFIG_VFS_FUNNEL */
6524 a
.a_desc
= VDESC(vnop_setlabel
);
6529 #if CONFIG_VFS_FUNNEL
6530 thread_safe
= THREAD_SAFE_FS(vp
);
6532 if ( (_err
= lock_fsnode(vp
, &funnel_state
)) ) {
6536 #endif /* CONFIG_VFS_FUNNEL */
6538 _err
= (*vp
->v_op
[vnop_setlabel_desc
.vdesc_offset
])(&a
);
6540 #if CONFIG_VFS_FUNNEL
6542 unlock_fsnode(vp
, &funnel_state
);
6544 #endif /* CONFIG_VFS_FUNNEL */
6552 * Get a named streamed
6555 VNOP_GETNAMEDSTREAM(vnode_t vp
, vnode_t
*svpp
, const char *name
, enum nsoperation operation
, int flags
, vfs_context_t ctx
)
6557 struct vnop_getnamedstream_args a
;
6559 #if CONFIG_VFS_FUNNEL
6560 if (!THREAD_SAFE_FS(vp
))
6562 #endif /* CONFIG_VFS_FUNNEL */
6564 a
.a_desc
= &vnop_getnamedstream_desc
;
6568 a
.a_operation
= operation
;
6572 return (*vp
->v_op
[vnop_getnamedstream_desc
.vdesc_offset
])(&a
);
6576 * Create a named streamed
6579 VNOP_MAKENAMEDSTREAM(vnode_t vp
, vnode_t
*svpp
, const char *name
, int flags
, vfs_context_t ctx
)
6581 struct vnop_makenamedstream_args a
;
6583 #if CONFIG_VFS_FUNNEL
6584 if (!THREAD_SAFE_FS(vp
))
6586 #endif /* CONFIG_VFS_FUNNEL */
6588 a
.a_desc
= &vnop_makenamedstream_desc
;
6595 return (*vp
->v_op
[vnop_makenamedstream_desc
.vdesc_offset
])(&a
);
6600 * Remove a named streamed
6603 VNOP_REMOVENAMEDSTREAM(vnode_t vp
, vnode_t svp
, const char *name
, int flags
, vfs_context_t ctx
)
6605 struct vnop_removenamedstream_args a
;
6607 #if CONFIG_VFS_FUNNEL
6608 if (!THREAD_SAFE_FS(vp
))
6610 #endif /* CONFIG_VFS_FUNNEL */
6612 a
.a_desc
= &vnop_removenamedstream_desc
;
6619 return (*vp
->v_op
[vnop_removenamedstream_desc
.vdesc_offset
])(&a
);