2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
76 * External virtual filesystem routines
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/proc_internal.h>
82 #include <sys/kauth.h>
83 #include <sys/mount_internal.h>
86 #include <sys/vnode.h>
87 #include <sys/vnode_internal.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
91 #include <sys/buf_internal.h>
92 #include <sys/errno.h>
93 #include <sys/malloc.h>
94 #include <sys/uio_internal.h>
96 #include <sys/domain.h>
98 #include <sys/syslog.h>
99 #include <sys/ubc_internal.h>
101 #include <sys/sysctl.h>
102 #include <sys/filedesc.h>
103 #include <sys/event.h>
104 #include <sys/kdebug.h>
105 #include <sys/kauth.h>
106 #include <sys/user.h>
107 #include <sys/systm.h>
108 #include <sys/kern_memorystatus.h>
109 #include <sys/lockf.h>
110 #include <miscfs/fifofs/fifo.h>
112 #include <nfs/nfs_conf.h>
115 #include <machine/machine_routines.h>
117 #include <kern/assert.h>
118 #include <mach/kern_return.h>
119 #include <kern/thread.h>
120 #include <kern/sched_prim.h>
122 #include <miscfs/specfs/specdev.h>
124 #include <mach/mach_types.h>
125 #include <mach/memory_object_types.h>
126 #include <mach/memory_object_control.h>
128 #include <kern/kalloc.h> /* kalloc()/kfree() */
129 #include <kern/clock.h> /* delay_for_interval() */
130 #include <libkern/OSAtomic.h> /* OSAddAtomic() */
132 #include <console/video_console.h>
136 #include <libkern/OSDebug.h>
139 #include <vm/vm_protos.h> /* vnode_pager_vrele() */
142 #include <security/mac_framework.h>
145 #include <vfs/vfs_disk_conditioner.h>
146 #include <libkern/section_keywords.h>
148 extern lck_grp_t
*vnode_lck_grp
;
149 extern lck_attr_t
*vnode_lck_attr
;
152 extern lck_grp_t
*trigger_vnode_lck_grp
;
153 extern lck_attr_t
*trigger_vnode_lck_attr
;
156 extern lck_mtx_t
* mnt_list_mtx_lock
;
158 enum vtype iftovt_tab
[16] = {
159 VNON
, VFIFO
, VCHR
, VNON
, VDIR
, VNON
, VBLK
, VNON
,
160 VREG
, VNON
, VLNK
, VNON
, VSOCK
, VNON
, VNON
, VBAD
,
162 int vttoif_tab
[9] = {
163 0, S_IFREG
, S_IFDIR
, S_IFBLK
, S_IFCHR
, S_IFLNK
,
164 S_IFSOCK
, S_IFIFO
, S_IFMT
,
168 /* XXX These should be in a BSD accessible Mach header, but aren't. */
169 extern void memory_object_mark_used(
170 memory_object_control_t control
);
172 extern void memory_object_mark_unused(
173 memory_object_control_t control
,
176 extern void memory_object_mark_io_tracking(
177 memory_object_control_t control
);
179 /* XXX next protptype should be from <nfs/nfs.h> */
180 extern int nfs_vinvalbuf(vnode_t
, int, vfs_context_t
, int);
182 extern int paniclog_append_noflush(const char *format
, ...);
184 /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */
185 __private_extern__
void qsort(
189 int (*)(const void *, const void *));
191 __private_extern__
void vntblinit(void);
192 __private_extern__
int unlink1(vfs_context_t
, vnode_t
, user_addr_t
,
195 extern int system_inshutdown
;
197 static void vnode_list_add(vnode_t
);
198 static void vnode_async_list_add(vnode_t
);
199 static void vnode_list_remove(vnode_t
);
200 static void vnode_list_remove_locked(vnode_t
);
202 static void vnode_abort_advlocks(vnode_t
);
203 static errno_t
vnode_drain(vnode_t
);
204 static void vgone(vnode_t
, int flags
);
205 static void vclean(vnode_t vp
, int flag
);
206 static void vnode_reclaim_internal(vnode_t
, int, int, int);
208 static void vnode_dropiocount(vnode_t
);
210 static vnode_t
checkalias(vnode_t vp
, dev_t nvp_rdev
);
211 static int vnode_reload(vnode_t
);
212 static int vnode_isinuse_locked(vnode_t
, int, int);
214 static int unmount_callback(mount_t
, __unused
void *);
216 static void insmntque(vnode_t vp
, mount_t mp
);
217 static int mount_getvfscnt(void);
218 static int mount_fillfsids(fsid_t
*, int );
219 static void vnode_iterate_setup(mount_t
);
220 int vnode_umount_preflight(mount_t
, vnode_t
, int);
221 static int vnode_iterate_prepare(mount_t
);
222 static int vnode_iterate_reloadq(mount_t
);
223 static void vnode_iterate_clear(mount_t
);
224 static mount_t
vfs_getvfs_locked(fsid_t
*);
225 static int vn_create_reg(vnode_t dvp
, vnode_t
*vpp
, struct nameidata
*ndp
,
226 struct vnode_attr
*vap
, uint32_t flags
, int fmode
, uint32_t *statusp
, vfs_context_t ctx
);
227 static int vnode_authattr_new_internal(vnode_t dvp
, struct vnode_attr
*vap
, int noauth
, uint32_t *defaulted_fieldsp
, vfs_context_t ctx
);
229 errno_t
rmdir_remove_orphaned_appleDouble(vnode_t
, vfs_context_t
, int *);
232 static void record_vp(vnode_t vp
, int count
);
235 #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG)
236 extern int bootarg_no_vnode_jetsam
; /* from bsd_init.c default value is 0 */
237 #endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */
239 extern int bootarg_no_vnode_drain
; /* from bsd_init.c default value is 0 */
241 boolean_t root_is_CF_drive
= FALSE
;
244 static int vnode_resolver_create(mount_t
, vnode_t
, struct vnode_trigger_param
*, boolean_t external
);
245 static void vnode_resolver_detach(vnode_t
);
248 TAILQ_HEAD(freelst
, vnode
) vnode_free_list
; /* vnode free list */
249 TAILQ_HEAD(deadlst
, vnode
) vnode_dead_list
; /* vnode dead list */
250 TAILQ_HEAD(async_work_lst
, vnode
) vnode_async_work_list
;
253 TAILQ_HEAD(ragelst
, vnode
) vnode_rage_list
; /* vnode rapid age list */
254 struct timeval rage_tv
;
257 static int vfs_unmountall_started
= 0;
259 #define RAGE_LIMIT_MIN 100
260 #define RAGE_TIME_LIMIT 5
264 * NOTE: These are shadowed from PlatformSupport definitions, but XNU
267 #define PLATFORM_DATA_VOLUME_MOUNT_POINT "/System/Volumes/Data"
268 #define PLATFORM_VM_VOLUME_MOUNT_POINT "/private/var/vm"
271 struct mntlist mountlist
; /* mounted filesystem list */
272 static int nummounts
= 0;
274 static int print_busy_vnodes
= 0; /* print out busy vnodes */
277 #define VLISTCHECK(fun, vp, list) \
278 if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \
279 panic("%s: %s vnode not on %slist", (fun), (list), (list));
281 #define VLISTCHECK(fun, vp, list)
282 #endif /* DIAGNOSTIC */
284 #define VLISTNONE(vp) \
286 (vp)->v_freelist.tqe_next = (struct vnode *)0; \
287 (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \
290 #define VONLIST(vp) \
291 ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
293 /* remove a vnode from free vnode list */
294 #define VREMFREE(fun, vp) \
296 VLISTCHECK((fun), (vp), "free"); \
297 TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \
303 /* remove a vnode from dead vnode list */
304 #define VREMDEAD(fun, vp) \
306 VLISTCHECK((fun), (vp), "dead"); \
307 TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \
309 vp->v_listflag &= ~VLIST_DEAD; \
314 /* remove a vnode from async work vnode list */
315 #define VREMASYNC_WORK(fun, vp) \
317 VLISTCHECK((fun), (vp), "async_work"); \
318 TAILQ_REMOVE(&vnode_async_work_list, (vp), v_freelist); \
320 vp->v_listflag &= ~VLIST_ASYNC_WORK; \
321 async_work_vnodes--; \
325 /* remove a vnode from rage vnode list */
326 #define VREMRAGE(fun, vp) \
328 if ( !(vp->v_listflag & VLIST_RAGE)) \
329 panic("VREMRAGE: vp not on rage list"); \
330 VLISTCHECK((fun), (vp), "rage"); \
331 TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \
333 vp->v_listflag &= ~VLIST_RAGE; \
337 static void async_work_continue(void);
340 * Initialize the vnode management data structures.
342 __private_extern__
void
345 thread_t thread
= THREAD_NULL
;
347 TAILQ_INIT(&vnode_free_list
);
348 TAILQ_INIT(&vnode_rage_list
);
349 TAILQ_INIT(&vnode_dead_list
);
350 TAILQ_INIT(&vnode_async_work_list
);
351 TAILQ_INIT(&mountlist
);
353 microuptime(&rage_tv
);
354 rage_limit
= desiredvnodes
/ 100;
356 if (rage_limit
< RAGE_LIMIT_MIN
) {
357 rage_limit
= RAGE_LIMIT_MIN
;
361 * create worker threads
363 kernel_thread_start((thread_continue_t
)async_work_continue
, NULL
, &thread
);
364 thread_deallocate(thread
);
367 /* the timeout is in 10 msecs */
369 vnode_waitforwrites(vnode_t vp
, int output_target
, int slpflag
, int slptimeout
, const char *msg
)
374 KERNEL_DEBUG(0x3010280 | DBG_FUNC_START
, (int)vp
, output_target
, vp
->v_numoutput
, 0, 0);
376 if (vp
->v_numoutput
> output_target
) {
381 while ((vp
->v_numoutput
> output_target
) && error
== 0) {
383 vp
->v_flag
|= VTHROTTLED
;
385 vp
->v_flag
|= VBWAIT
;
388 ts
.tv_sec
= (slptimeout
/ 100);
389 ts
.tv_nsec
= (slptimeout
% 1000) * 10 * NSEC_PER_USEC
* 1000;
390 error
= msleep((caddr_t
)&vp
->v_numoutput
, &vp
->v_lock
, (slpflag
| (PRIBIO
+ 1)), msg
, &ts
);
396 KERNEL_DEBUG(0x3010280 | DBG_FUNC_END
, (int)vp
, output_target
, vp
->v_numoutput
, error
, 0);
403 vnode_startwrite(vnode_t vp
)
405 OSAddAtomic(1, &vp
->v_numoutput
);
410 vnode_writedone(vnode_t vp
)
415 OSAddAtomic(-1, &vp
->v_numoutput
);
419 if (vp
->v_numoutput
< 0) {
420 panic("vnode_writedone: numoutput < 0");
423 if ((vp
->v_flag
& VTHROTTLED
)) {
424 vp
->v_flag
&= ~VTHROTTLED
;
427 if ((vp
->v_flag
& VBWAIT
) && (vp
->v_numoutput
== 0)) {
428 vp
->v_flag
&= ~VBWAIT
;
434 wakeup((caddr_t
)&vp
->v_numoutput
);
442 vnode_hasdirtyblks(vnode_t vp
)
444 struct cl_writebehind
*wbp
;
447 * Not taking the buf_mtxp as there is little
448 * point doing it. Even if the lock is taken the
449 * state can change right after that. If their
450 * needs to be a synchronization, it must be driven
453 if (vp
->v_dirtyblkhd
.lh_first
) {
457 if (!UBCINFOEXISTS(vp
)) {
461 wbp
= vp
->v_ubcinfo
->cl_wbehind
;
463 if (wbp
&& (wbp
->cl_number
|| wbp
->cl_scmap
)) {
471 vnode_hascleanblks(vnode_t vp
)
474 * Not taking the buf_mtxp as there is little
475 * point doing it. Even if the lock is taken the
476 * state can change right after that. If their
477 * needs to be a synchronization, it must be driven
480 if (vp
->v_cleanblkhd
.lh_first
) {
487 vnode_iterate_setup(mount_t mp
)
489 mp
->mnt_lflag
|= MNT_LITER
;
493 vnode_umount_preflight(mount_t mp
, vnode_t skipvp
, int flags
)
498 TAILQ_FOREACH(vp
, &mp
->mnt_vnodelist
, v_mntvnodes
) {
499 if (vp
->v_type
== VDIR
) {
505 if ((flags
& SKIPSYSTEM
) && ((vp
->v_flag
& VSYSTEM
) || (vp
->v_flag
& VNOFLUSH
))) {
508 if ((flags
& SKIPSWAP
) && (vp
->v_flag
& VSWAP
)) {
511 if ((flags
& WRITECLOSE
) && (vp
->v_writecount
== 0 || vp
->v_type
!= VREG
)) {
515 /* Look for busy vnode */
516 if ((vp
->v_usecount
!= 0) && ((vp
->v_usecount
- vp
->v_kusecount
) != 0)) {
518 if (print_busy_vnodes
&& ((flags
& FORCECLOSE
) == 0)) {
519 vprint("vnode_umount_preflight - busy vnode", vp
);
523 } else if (vp
->v_iocount
> 0) {
524 /* Busy if iocount is > 0 for more than 3 seconds */
525 tsleep(&vp
->v_iocount
, PVFS
, "vnode_drain_network", 3 * hz
);
526 if (vp
->v_iocount
> 0) {
528 if (print_busy_vnodes
&& ((flags
& FORCECLOSE
) == 0)) {
529 vprint("vnode_umount_preflight - busy vnode", vp
);
542 * This routine prepares iteration by moving all the vnodes to worker queue
543 * called with mount lock held
546 vnode_iterate_prepare(mount_t mp
)
550 if (TAILQ_EMPTY(&mp
->mnt_vnodelist
)) {
555 vp
= TAILQ_FIRST(&mp
->mnt_vnodelist
);
556 vp
->v_mntvnodes
.tqe_prev
= &(mp
->mnt_workerqueue
.tqh_first
);
557 mp
->mnt_workerqueue
.tqh_first
= mp
->mnt_vnodelist
.tqh_first
;
558 mp
->mnt_workerqueue
.tqh_last
= mp
->mnt_vnodelist
.tqh_last
;
560 TAILQ_INIT(&mp
->mnt_vnodelist
);
561 if (mp
->mnt_newvnodes
.tqh_first
!= NULL
) {
562 panic("vnode_iterate_prepare: newvnode when entering vnode");
564 TAILQ_INIT(&mp
->mnt_newvnodes
);
570 /* called with mount lock held */
572 vnode_iterate_reloadq(mount_t mp
)
576 /* add the remaining entries in workerq to the end of mount vnode list */
577 if (!TAILQ_EMPTY(&mp
->mnt_workerqueue
)) {
579 mvp
= TAILQ_LAST(&mp
->mnt_vnodelist
, vnodelst
);
581 /* Joining the workerque entities to mount vnode list */
583 mvp
->v_mntvnodes
.tqe_next
= mp
->mnt_workerqueue
.tqh_first
;
585 mp
->mnt_vnodelist
.tqh_first
= mp
->mnt_workerqueue
.tqh_first
;
587 mp
->mnt_workerqueue
.tqh_first
->v_mntvnodes
.tqe_prev
= mp
->mnt_vnodelist
.tqh_last
;
588 mp
->mnt_vnodelist
.tqh_last
= mp
->mnt_workerqueue
.tqh_last
;
589 TAILQ_INIT(&mp
->mnt_workerqueue
);
592 /* add the newvnodes to the head of mount vnode list */
593 if (!TAILQ_EMPTY(&mp
->mnt_newvnodes
)) {
595 nlvp
= TAILQ_LAST(&mp
->mnt_newvnodes
, vnodelst
);
597 mp
->mnt_newvnodes
.tqh_first
->v_mntvnodes
.tqe_prev
= &mp
->mnt_vnodelist
.tqh_first
;
598 nlvp
->v_mntvnodes
.tqe_next
= mp
->mnt_vnodelist
.tqh_first
;
599 if (mp
->mnt_vnodelist
.tqh_first
) {
600 mp
->mnt_vnodelist
.tqh_first
->v_mntvnodes
.tqe_prev
= &nlvp
->v_mntvnodes
.tqe_next
;
602 mp
->mnt_vnodelist
.tqh_last
= mp
->mnt_newvnodes
.tqh_last
;
604 mp
->mnt_vnodelist
.tqh_first
= mp
->mnt_newvnodes
.tqh_first
;
605 TAILQ_INIT(&mp
->mnt_newvnodes
);
614 vnode_iterate_clear(mount_t mp
)
616 mp
->mnt_lflag
&= ~MNT_LITER
;
621 #include <i386/panic_hooks.h>
623 struct vnode_iterate_panic_hook
{
630 vnode_iterate_panic_hook(panic_hook_t
*hook_
)
632 struct vnode_iterate_panic_hook
*hook
= (struct vnode_iterate_panic_hook
*)hook_
;
633 panic_phys_range_t range
;
636 if (panic_phys_range_before(hook
->mp
, &phys
, &range
)) {
637 paniclog_append_noflush("mp = %p, phys = %p, prev (%p: %p-%p)\n",
638 hook
->mp
, phys
, range
.type
, range
.phys_start
,
639 range
.phys_start
+ range
.len
);
641 paniclog_append_noflush("mp = %p, phys = %p, prev (!)\n", hook
->mp
, phys
);
644 if (panic_phys_range_before(hook
->vp
, &phys
, &range
)) {
645 paniclog_append_noflush("vp = %p, phys = %p, prev (%p: %p-%p)\n",
646 hook
->vp
, phys
, range
.type
, range
.phys_start
,
647 range
.phys_start
+ range
.len
);
649 paniclog_append_noflush("vp = %p, phys = %p, prev (!)\n", hook
->vp
, phys
);
651 panic_dump_mem((void *)(((vm_offset_t
)hook
->mp
- 4096) & ~4095), 12288);
653 #endif //CONFIG_EMBEDDED
656 vnode_iterate(mount_t mp
, int flags
, int (*callout
)(struct vnode
*, void *),
664 * The mount iterate mutex is held for the duration of the iteration.
665 * This can be done by a state flag on the mount structure but we can
666 * run into priority inversion issues sometimes.
667 * Using a mutex allows us to benefit from the priority donation
668 * mechanisms in the kernel for locks. This mutex should never be
669 * acquired in spin mode and it should be acquired before attempting to
670 * acquire the mount lock.
672 mount_iterate_lock(mp
);
676 vnode_iterate_setup(mp
);
678 /* If it returns 0 then there is nothing to do */
679 retval
= vnode_iterate_prepare(mp
);
682 vnode_iterate_clear(mp
);
684 mount_iterate_unlock(mp
);
689 struct vnode_iterate_panic_hook hook
;
692 panic_hook(&hook
.hook
, vnode_iterate_panic_hook
);
694 /* iterate over all the vnodes */
695 while (!TAILQ_EMPTY(&mp
->mnt_workerqueue
)) {
696 vp
= TAILQ_FIRST(&mp
->mnt_workerqueue
);
700 TAILQ_REMOVE(&mp
->mnt_workerqueue
, vp
, v_mntvnodes
);
701 TAILQ_INSERT_TAIL(&mp
->mnt_vnodelist
, vp
, v_mntvnodes
);
703 if ((vp
->v_data
== NULL
) || (vp
->v_type
== VNON
) || (vp
->v_mount
!= mp
)) {
708 if (vget_internal(vp
, vid
, (flags
| VNODE_NODEAD
| VNODE_WITHID
| VNODE_NOSUSPEND
))) {
712 if (flags
& VNODE_RELOAD
) {
714 * we're reloading the filesystem
715 * cast out any inactive vnodes...
717 if (vnode_reload(vp
)) {
718 /* vnode will be recycled on the refcount drop */
725 retval
= callout(vp
, arg
);
729 case VNODE_RETURNED_DONE
:
731 if (retval
== VNODE_RETURNED_DONE
) {
738 case VNODE_CLAIMED_DONE
:
751 panic_unhook(&hook
.hook
);
753 (void)vnode_iterate_reloadq(mp
);
754 vnode_iterate_clear(mp
);
756 mount_iterate_unlock(mp
);
761 mount_lock_renames(mount_t mp
)
763 lck_mtx_lock(&mp
->mnt_renamelock
);
767 mount_unlock_renames(mount_t mp
)
769 lck_mtx_unlock(&mp
->mnt_renamelock
);
773 mount_iterate_lock(mount_t mp
)
775 lck_mtx_lock(&mp
->mnt_iter_lock
);
779 mount_iterate_unlock(mount_t mp
)
781 lck_mtx_unlock(&mp
->mnt_iter_lock
);
785 mount_lock(mount_t mp
)
787 lck_mtx_lock(&mp
->mnt_mlock
);
791 mount_lock_spin(mount_t mp
)
793 lck_mtx_lock_spin(&mp
->mnt_mlock
);
797 mount_unlock(mount_t mp
)
799 lck_mtx_unlock(&mp
->mnt_mlock
);
804 mount_ref(mount_t mp
, int locked
)
819 mount_drop(mount_t mp
, int locked
)
827 if (mp
->mnt_count
== 0 && (mp
->mnt_lflag
& MNT_LDRAIN
)) {
828 wakeup(&mp
->mnt_lflag
);
838 mount_iterref(mount_t mp
, int locked
)
845 if (mp
->mnt_iterref
< 0) {
857 mount_isdrained(mount_t mp
, int locked
)
864 if (mp
->mnt_iterref
< 0) {
876 mount_iterdrop(mount_t mp
)
880 wakeup(&mp
->mnt_iterref
);
885 mount_iterdrain(mount_t mp
)
888 while (mp
->mnt_iterref
) {
889 msleep((caddr_t
)&mp
->mnt_iterref
, mnt_list_mtx_lock
, PVFS
, "mount_iterdrain", NULL
);
891 /* mount iterations drained */
892 mp
->mnt_iterref
= -1;
896 mount_iterreset(mount_t mp
)
899 if (mp
->mnt_iterref
== -1) {
905 /* always called with mount lock held */
907 mount_refdrain(mount_t mp
)
909 if (mp
->mnt_lflag
& MNT_LDRAIN
) {
910 panic("already in drain");
912 mp
->mnt_lflag
|= MNT_LDRAIN
;
914 while (mp
->mnt_count
) {
915 msleep((caddr_t
)&mp
->mnt_lflag
, &mp
->mnt_mlock
, PVFS
, "mount_drain", NULL
);
918 if (mp
->mnt_vnodelist
.tqh_first
!= NULL
) {
919 panic("mount_refdrain: dangling vnode");
922 mp
->mnt_lflag
&= ~MNT_LDRAIN
;
927 /* Tags the mount point as not supportine extended readdir for NFS exports */
929 mount_set_noreaddirext(mount_t mp
)
932 mp
->mnt_kern_flag
|= MNTK_DENY_READDIREXT
;
937 * Mark a mount point as busy. Used to synchronize access and to delay
941 vfs_busy(mount_t mp
, int flags
)
944 if (mp
->mnt_lflag
& MNT_LDEAD
) {
950 if (mp
->mnt_lflag
& MNT_LUNMOUNT
) {
951 if (flags
& LK_NOWAIT
|| mp
->mnt_lflag
& MNT_LDEAD
) {
957 * Since all busy locks are shared except the exclusive
958 * lock granted when unmounting, the only place that a
959 * wakeup needs to be done is at the release of the
960 * exclusive lock at the end of dounmount.
962 mp
->mnt_lflag
|= MNT_LWAIT
;
963 msleep((caddr_t
)mp
, &mp
->mnt_mlock
, (PVFS
| PDROP
), "vfsbusy", NULL
);
969 lck_rw_lock_shared(&mp
->mnt_rwlock
);
972 * Until we are granted the rwlock, it's possible for the mount point to
973 * change state, so re-evaluate before granting the vfs_busy.
975 if (mp
->mnt_lflag
& (MNT_LDEAD
| MNT_LUNMOUNT
)) {
976 lck_rw_done(&mp
->mnt_rwlock
);
983 * Free a busy filesystem.
986 vfs_unbusy(mount_t mp
)
988 lck_rw_done(&mp
->mnt_rwlock
);
994 vfs_rootmountfailed(mount_t mp
)
997 mp
->mnt_vtable
->vfc_refcount
--;
1002 mount_lock_destroy(mp
);
1005 mac_mount_label_destroy(mp
);
1008 FREE_ZONE(mp
, sizeof(struct mount
), M_MOUNT
);
1012 * Lookup a filesystem type, and if found allocate and initialize
1013 * a mount structure for it.
1015 * Devname is usually updated by mount(8) after booting.
1018 vfs_rootmountalloc_internal(struct vfstable
*vfsp
, const char *devname
)
1022 mp
= _MALLOC_ZONE(sizeof(struct mount
), M_MOUNT
, M_WAITOK
);
1023 bzero((char *)mp
, sizeof(struct mount
));
1025 /* Initialize the default IO constraints */
1026 mp
->mnt_maxreadcnt
= mp
->mnt_maxwritecnt
= MAXPHYS
;
1027 mp
->mnt_segreadcnt
= mp
->mnt_segwritecnt
= 32;
1028 mp
->mnt_maxsegreadsize
= mp
->mnt_maxreadcnt
;
1029 mp
->mnt_maxsegwritesize
= mp
->mnt_maxwritecnt
;
1030 mp
->mnt_devblocksize
= DEV_BSIZE
;
1031 mp
->mnt_alignmentmask
= PAGE_MASK
;
1032 mp
->mnt_ioqueue_depth
= MNT_DEFAULT_IOQUEUE_DEPTH
;
1033 mp
->mnt_ioscale
= 1;
1034 mp
->mnt_ioflags
= 0;
1035 mp
->mnt_realrootvp
= NULLVP
;
1036 mp
->mnt_authcache_ttl
= CACHED_LOOKUP_RIGHT_TTL
;
1037 mp
->mnt_throttle_mask
= LOWPRI_MAX_NUM_DEV
- 1;
1038 mp
->mnt_devbsdunit
= 0;
1040 mount_lock_init(mp
);
1041 (void)vfs_busy(mp
, LK_NOWAIT
);
1043 TAILQ_INIT(&mp
->mnt_vnodelist
);
1044 TAILQ_INIT(&mp
->mnt_workerqueue
);
1045 TAILQ_INIT(&mp
->mnt_newvnodes
);
1047 mp
->mnt_vtable
= vfsp
;
1048 mp
->mnt_op
= vfsp
->vfc_vfsops
;
1049 mp
->mnt_flag
= MNT_RDONLY
| MNT_ROOTFS
;
1050 mp
->mnt_vnodecovered
= NULLVP
;
1051 //mp->mnt_stat.f_type = vfsp->vfc_typenum;
1052 mp
->mnt_flag
|= vfsp
->vfc_flags
& MNT_VISFLAGMASK
;
1055 vfsp
->vfc_refcount
++;
1056 mount_list_unlock();
1058 strlcpy(mp
->mnt_vfsstat
.f_fstypename
, vfsp
->vfc_name
, MFSTYPENAMELEN
);
1059 mp
->mnt_vfsstat
.f_mntonname
[0] = '/';
1060 /* XXX const poisoning layering violation */
1061 (void) copystr((const void *)devname
, mp
->mnt_vfsstat
.f_mntfromname
, MAXPATHLEN
- 1, NULL
);
1064 mac_mount_label_init(mp
);
1065 mac_mount_label_associate(vfs_context_kernel(), mp
);
1071 vfs_rootmountalloc(const char *fstypename
, const char *devname
, mount_t
*mpp
)
1073 struct vfstable
*vfsp
;
1075 for (vfsp
= vfsconf
; vfsp
; vfsp
= vfsp
->vfc_next
) {
1076 if (!strncmp(vfsp
->vfc_name
, fstypename
,
1077 sizeof(vfsp
->vfc_name
))) {
1085 *mpp
= vfs_rootmountalloc_internal(vfsp
, devname
);
1094 #define DBG_MOUNTROOT (FSDBG_CODE(DBG_MOUNT, 0))
1097 * Find an appropriate filesystem to use for the root. If a filesystem
1098 * has not been preselected, walk through the list of known filesystems
1099 * trying those that have mountroot routines, and try them until one
1100 * works or we have tried them all.
1102 extern int (*mountroot
)(void);
1110 struct vfstable
*vfsp
;
1111 vfs_context_t ctx
= vfs_context_kernel();
1112 struct vfs_attr vfsattr
;
1115 vnode_t bdevvp_rootvp
;
1117 KDBG_RELEASE(DBG_MOUNTROOT
| DBG_FUNC_START
);
1118 if (mountroot
!= NULL
) {
1120 * used for netboot which follows a different set of rules
1122 error
= (*mountroot
)();
1124 KDBG_RELEASE(DBG_MOUNTROOT
| DBG_FUNC_END
, error
, 0);
1127 if ((error
= bdevvp(rootdev
, &rootvp
))) {
1128 printf("vfs_mountroot: can't setup bdevvp\n");
1130 KDBG_RELEASE(DBG_MOUNTROOT
| DBG_FUNC_END
, error
, 1);
1134 * 4951998 - code we call in vfc_mountroot may replace rootvp
1135 * so keep a local copy for some house keeping.
1137 bdevvp_rootvp
= rootvp
;
1139 for (vfsp
= vfsconf
; vfsp
; vfsp
= vfsp
->vfc_next
) {
1140 if (vfsp
->vfc_mountroot
== NULL
1141 && !ISSET(vfsp
->vfc_vfsflags
, VFC_VFSCANMOUNTROOT
)) {
1145 mp
= vfs_rootmountalloc_internal(vfsp
, "root_device");
1146 mp
->mnt_devvp
= rootvp
;
1148 if (vfsp
->vfc_mountroot
) {
1149 error
= (*vfsp
->vfc_mountroot
)(mp
, rootvp
, ctx
);
1151 error
= VFS_MOUNT(mp
, rootvp
, 0, ctx
);
1155 if (bdevvp_rootvp
!= rootvp
) {
1158 * bump the iocount and fix up mnt_devvp for the
1159 * new rootvp (it will already have a usecount taken)...
1160 * drop the iocount and the usecount on the orignal
1161 * since we are no longer going to use it...
1163 vnode_getwithref(rootvp
);
1164 mp
->mnt_devvp
= rootvp
;
1166 vnode_rele(bdevvp_rootvp
);
1167 vnode_put(bdevvp_rootvp
);
1169 mp
->mnt_devvp
->v_specflags
|= SI_MOUNTEDON
;
1176 * cache the IO attributes for the underlying physical media...
1177 * an error return indicates the underlying driver doesn't
1178 * support all the queries necessary... however, reasonable
1179 * defaults will have been set, so no reason to bail or care
1181 vfs_init_io_attributes(rootvp
, mp
);
1183 if (mp
->mnt_ioflags
& MNT_IOFLAGS_FUSION_DRIVE
) {
1184 root_is_CF_drive
= TRUE
;
1188 * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS.
1190 if (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSNATIVEXATTR
) {
1191 mp
->mnt_kern_flag
|= MNTK_EXTENDED_ATTRS
;
1193 if (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSPREFLIGHT
) {
1194 mp
->mnt_kern_flag
|= MNTK_UNMOUNT_PREFLIGHT
;
1197 #if !CONFIG_EMBEDDED
1200 if (MNTK_VIRTUALDEV
& mp
->mnt_kern_flag
) {
1202 } else if (disk_conditioner_mount_is_ssd(mp
)) {
1207 vc_progress_setdiskspeed(speed
);
1210 * Probe root file system for additional features.
1212 (void)VFS_START(mp
, 0, ctx
);
1214 VFSATTR_INIT(&vfsattr
);
1215 VFSATTR_WANTED(&vfsattr
, f_capabilities
);
1216 if (vfs_getattr(mp
, &vfsattr
, ctx
) == 0 &&
1217 VFSATTR_IS_SUPPORTED(&vfsattr
, f_capabilities
)) {
1218 if ((vfsattr
.f_capabilities
.capabilities
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_EXTENDED_ATTR
) &&
1219 (vfsattr
.f_capabilities
.valid
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_EXTENDED_ATTR
)) {
1220 mp
->mnt_kern_flag
|= MNTK_EXTENDED_ATTRS
;
1223 if ((vfsattr
.f_capabilities
.capabilities
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_NAMEDSTREAMS
) &&
1224 (vfsattr
.f_capabilities
.valid
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_NAMEDSTREAMS
)) {
1225 mp
->mnt_kern_flag
|= MNTK_NAMED_STREAMS
;
1228 if ((vfsattr
.f_capabilities
.capabilities
[VOL_CAPABILITIES_FORMAT
] & VOL_CAP_FMT_PATH_FROM_ID
) &&
1229 (vfsattr
.f_capabilities
.valid
[VOL_CAPABILITIES_FORMAT
] & VOL_CAP_FMT_PATH_FROM_ID
)) {
1230 mp
->mnt_kern_flag
|= MNTK_PATH_FROM_ID
;
1233 if ((vfsattr
.f_capabilities
.capabilities
[VOL_CAPABILITIES_FORMAT
] & VOL_CAP_FMT_DIR_HARDLINKS
) &&
1234 (vfsattr
.f_capabilities
.valid
[VOL_CAPABILITIES_FORMAT
] & VOL_CAP_FMT_DIR_HARDLINKS
)) {
1235 mp
->mnt_kern_flag
|= MNTK_DIR_HARDLINKS
;
1240 * get rid of iocount reference returned
1241 * by bdevvp (or picked up by us on the substitued
1242 * rootvp)... it (or we) will have also taken
1243 * a usecount reference which we want to keep
1248 if ((vfs_flags(mp
) & MNT_MULTILABEL
) == 0) {
1249 KDBG_RELEASE(DBG_MOUNTROOT
| DBG_FUNC_END
, 0, 2);
1253 error
= VFS_ROOT(mp
, &vp
, ctx
);
1255 printf("%s() VFS_ROOT() returned %d\n",
1257 dounmount(mp
, MNT_FORCE
, 0, ctx
);
1260 error
= vnode_label(mp
, NULL
, vp
, NULL
, 0, ctx
);
1262 * get rid of reference provided by VFS_ROOT
1267 printf("%s() vnode_label() returned %d\n",
1269 dounmount(mp
, MNT_FORCE
, 0, ctx
);
1273 KDBG_RELEASE(DBG_MOUNTROOT
| DBG_FUNC_END
, 0, 3);
1279 vfs_rootmountfailed(mp
);
1281 if (error
!= EINVAL
) {
1282 printf("%s_mountroot failed: %d\n", vfsp
->vfc_name
, error
);
1285 KDBG_RELEASE(DBG_MOUNTROOT
| DBG_FUNC_END
, error
? error
: ENODEV
, 4);
1290 * Mount the data volume of an ROSV volume group
1293 vfs_mount_rosv_data(void)
1295 #if CONFIG_ROSV_STARTUP
1297 int do_rosv_mounts
= 0;
1299 error
= vnode_get(rootvnode
);
1301 /* root must be mounted first */
1302 printf("vnode_get(rootvnode) failed with error %d\n", error
);
1306 printf("NOTE: Attempting ROSV mount\n");
1307 struct vfs_attr vfsattr
;
1308 VFSATTR_INIT(&vfsattr
);
1309 VFSATTR_WANTED(&vfsattr
, f_capabilities
);
1310 if (vfs_getattr(rootvnode
->v_mount
, &vfsattr
, vfs_context_kernel()) == 0 &&
1311 VFSATTR_IS_SUPPORTED(&vfsattr
, f_capabilities
)) {
1312 if ((vfsattr
.f_capabilities
.capabilities
[VOL_CAPABILITIES_FORMAT
] & VOL_CAP_FMT_VOL_GROUPS
) &&
1313 (vfsattr
.f_capabilities
.valid
[VOL_CAPABILITIES_FORMAT
] & VOL_CAP_FMT_VOL_GROUPS
)) {
1314 printf("NOTE: DETECTED ROSV CONFIG\n");
1319 if (!do_rosv_mounts
) {
1320 vnode_put(rootvnode
);
1321 //bail out if config not supported
1325 char datapath
[] = PLATFORM_DATA_VOLUME_MOUNT_POINT
; /* !const because of internal casting */
1327 /* Mount the data volume */
1328 printf("attempting kernel mount for data volume... \n");
1329 error
= kernel_mount(rootvnode
->v_mount
->mnt_vfsstat
.f_fstypename
, NULLVP
, NULLVP
,
1330 datapath
, (rootvnode
->v_mount
), 0, 0, (KERNEL_MOUNT_DATAVOL
), vfs_context_kernel());
1333 printf("Failed to mount data volume (%d)\n", error
);
1336 vnode_put(rootvnode
);
1346 * Mount the VM volume of a container
1354 error
= vnode_get(rootvnode
);
1356 /* root must be mounted first */
1357 printf("vnode_get(rootvnode) failed with error %d\n", error
);
1361 char vmpath
[] = PLATFORM_VM_VOLUME_MOUNT_POINT
; /* !const because of internal casting */
1363 /* Mount the VM volume */
1364 printf("attempting kernel mount for vm volume... \n");
1365 error
= kernel_mount(rootvnode
->v_mount
->mnt_vfsstat
.f_fstypename
, NULLVP
, NULLVP
,
1366 vmpath
, (rootvnode
->v_mount
), 0, 0, (KERNEL_MOUNT_VMVOL
), vfs_context_kernel());
1369 printf("Failed to mount vm volume (%d)\n", error
);
1371 printf("mounted VM volume\n");
1374 vnode_put(rootvnode
);
1382 * Lookup a mount point by filesystem identifier.
1386 vfs_getvfs(fsid_t
*fsid
)
1388 return mount_list_lookupby_fsid(fsid
, 0, 0);
1391 static struct mount
*
1392 vfs_getvfs_locked(fsid_t
*fsid
)
1394 return mount_list_lookupby_fsid(fsid
, 1, 0);
1398 vfs_getvfs_by_mntonname(char *path
)
1400 mount_t retmp
= (mount_t
)0;
1404 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
) {
1405 if (!strncmp(mp
->mnt_vfsstat
.f_mntonname
, path
,
1406 sizeof(mp
->mnt_vfsstat
.f_mntonname
))) {
1408 if (mount_iterref(retmp
, 1)) {
1415 mount_list_unlock();
1419 /* generation number for creation of new fsids */
1420 u_short mntid_gen
= 0;
1422 * Get a new unique fsid
1425 vfs_getnewfsid(struct mount
*mp
)
1432 /* generate a new fsid */
1433 mtype
= mp
->mnt_vtable
->vfc_typenum
;
1434 if (++mntid_gen
== 0) {
1437 tfsid
.val
[0] = makedev(nblkdev
+ mtype
, mntid_gen
);
1438 tfsid
.val
[1] = mtype
;
1440 while (vfs_getvfs_locked(&tfsid
)) {
1441 if (++mntid_gen
== 0) {
1444 tfsid
.val
[0] = makedev(nblkdev
+ mtype
, mntid_gen
);
1447 mp
->mnt_vfsstat
.f_fsid
.val
[0] = tfsid
.val
[0];
1448 mp
->mnt_vfsstat
.f_fsid
.val
[1] = tfsid
.val
[1];
1449 mount_list_unlock();
1453 * Routines having to do with the management of the vnode table.
1455 extern int(**dead_vnodeop_p
)(void *);
1456 long numvnodes
, freevnodes
, deadvnodes
, async_work_vnodes
;
1459 int async_work_timed_out
= 0;
1460 int async_work_handled
= 0;
1461 int dead_vnode_wanted
= 0;
1462 int dead_vnode_waited
= 0;
1465 * Move a vnode from one mount queue to another.
1468 insmntque(vnode_t vp
, mount_t mp
)
1472 * Delete from old mount point vnode list, if on one.
1474 if ((lmp
= vp
->v_mount
) != NULL
&& lmp
!= dead_mountp
) {
1475 if ((vp
->v_lflag
& VNAMED_MOUNT
) == 0) {
1476 panic("insmntque: vp not in mount vnode list");
1478 vp
->v_lflag
&= ~VNAMED_MOUNT
;
1480 mount_lock_spin(lmp
);
1484 if (vp
->v_mntvnodes
.tqe_next
== NULL
) {
1485 if (TAILQ_LAST(&lmp
->mnt_vnodelist
, vnodelst
) == vp
) {
1486 TAILQ_REMOVE(&lmp
->mnt_vnodelist
, vp
, v_mntvnodes
);
1487 } else if (TAILQ_LAST(&lmp
->mnt_newvnodes
, vnodelst
) == vp
) {
1488 TAILQ_REMOVE(&lmp
->mnt_newvnodes
, vp
, v_mntvnodes
);
1489 } else if (TAILQ_LAST(&lmp
->mnt_workerqueue
, vnodelst
) == vp
) {
1490 TAILQ_REMOVE(&lmp
->mnt_workerqueue
, vp
, v_mntvnodes
);
1493 vp
->v_mntvnodes
.tqe_next
->v_mntvnodes
.tqe_prev
= vp
->v_mntvnodes
.tqe_prev
;
1494 *vp
->v_mntvnodes
.tqe_prev
= vp
->v_mntvnodes
.tqe_next
;
1496 vp
->v_mntvnodes
.tqe_next
= NULL
;
1497 vp
->v_mntvnodes
.tqe_prev
= NULL
;
1503 * Insert into list of vnodes for the new mount point, if available.
1505 if ((vp
->v_mount
= mp
) != NULL
) {
1506 mount_lock_spin(mp
);
1507 if ((vp
->v_mntvnodes
.tqe_next
!= 0) && (vp
->v_mntvnodes
.tqe_prev
!= 0)) {
1508 panic("vp already in mount list");
1510 if (mp
->mnt_lflag
& MNT_LITER
) {
1511 TAILQ_INSERT_HEAD(&mp
->mnt_newvnodes
, vp
, v_mntvnodes
);
1513 TAILQ_INSERT_HEAD(&mp
->mnt_vnodelist
, vp
, v_mntvnodes
);
1515 if (vp
->v_lflag
& VNAMED_MOUNT
) {
1516 panic("insmntque: vp already in mount vnode list");
1518 vp
->v_lflag
|= VNAMED_MOUNT
;
1526 * Create a vnode for a block device.
1527 * Used for root filesystem, argdev, and swap areas.
1528 * Also used for memory file system special devices.
1531 bdevvp(dev_t dev
, vnode_t
*vpp
)
1535 struct vnode_fsparam vfsp
;
1536 struct vfs_context context
;
1543 context
.vc_thread
= current_thread();
1544 context
.vc_ucred
= FSCRED
;
1546 vfsp
.vnfs_mp
= (struct mount
*)0;
1547 vfsp
.vnfs_vtype
= VBLK
;
1548 vfsp
.vnfs_str
= "bdevvp";
1549 vfsp
.vnfs_dvp
= NULL
;
1550 vfsp
.vnfs_fsnode
= NULL
;
1551 vfsp
.vnfs_cnp
= NULL
;
1552 vfsp
.vnfs_vops
= spec_vnodeop_p
;
1553 vfsp
.vnfs_rdev
= dev
;
1554 vfsp
.vnfs_filesize
= 0;
1556 vfsp
.vnfs_flags
= VNFS_NOCACHE
| VNFS_CANTCACHE
;
1558 vfsp
.vnfs_marksystem
= 0;
1559 vfsp
.vnfs_markroot
= 0;
1561 if ((error
= vnode_create(VNCREATE_FLAVOR
, VCREATESIZE
, &vfsp
, &nvp
))) {
1565 vnode_lock_spin(nvp
);
1566 nvp
->v_flag
|= VBDEVVP
;
1567 nvp
->v_tag
= VT_NON
; /* set this to VT_NON so during aliasing it can be replaced */
1569 if ((error
= vnode_ref(nvp
))) {
1570 panic("bdevvp failed: vnode_ref");
1573 if ((error
= VNOP_FSYNC(nvp
, MNT_WAIT
, &context
))) {
1574 panic("bdevvp failed: fsync");
1577 if ((error
= buf_invalidateblks(nvp
, BUF_WRITE_DATA
, 0, 0))) {
1578 panic("bdevvp failed: invalidateblks");
1584 * XXXMAC: We can't put a MAC check here, the system will
1585 * panic without this vnode.
1589 if ((error
= VNOP_OPEN(nvp
, FREAD
, &context
))) {
1590 panic("bdevvp failed: open");
1599 * Check to see if the new vnode represents a special device
1600 * for which we already have a vnode (either because of
1601 * bdevvp() or because of a different vnode representing
1602 * the same block device). If such an alias exists, deallocate
1603 * the existing contents and return the aliased vnode. The
1604 * caller is responsible for filling it with its new contents.
1607 checkalias(struct vnode
*nvp
, dev_t nvp_rdev
)
1611 struct specinfo
*sin
= NULL
;
1614 vpp
= &speclisth
[SPECHASH(nvp_rdev
)];
1618 for (vp
= *vpp
; vp
; vp
= vp
->v_specnext
) {
1619 if (nvp_rdev
== vp
->v_rdev
&& nvp
->v_type
== vp
->v_type
) {
1628 if (vnode_getwithvid(vp
, vid
)) {
1632 * Termination state is checked in vnode_getwithvid
1637 * Alias, but not in use, so flush it out.
1639 if ((vp
->v_iocount
== 1) && (vp
->v_usecount
== 0)) {
1640 vnode_reclaim_internal(vp
, 1, 1, 0);
1641 vnode_put_locked(vp
);
1646 if (vp
== NULL
|| vp
->v_tag
!= VT_NON
) {
1648 MALLOC_ZONE(sin
, struct specinfo
*, sizeof(struct specinfo
),
1649 M_SPECINFO
, M_WAITOK
);
1652 nvp
->v_specinfo
= sin
;
1653 bzero(nvp
->v_specinfo
, sizeof(struct specinfo
));
1654 nvp
->v_rdev
= nvp_rdev
;
1655 nvp
->v_specflags
= 0;
1656 nvp
->v_speclastr
= -1;
1657 nvp
->v_specinfo
->si_opencount
= 0;
1658 nvp
->v_specinfo
->si_initted
= 0;
1659 nvp
->v_specinfo
->si_throttleable
= 0;
1663 /* We dropped the lock, someone could have added */
1665 for (vp
= *vpp
; vp
; vp
= vp
->v_specnext
) {
1666 if (nvp_rdev
== vp
->v_rdev
&& nvp
->v_type
== vp
->v_type
) {
1674 nvp
->v_hashchain
= vpp
;
1675 nvp
->v_specnext
= *vpp
;
1679 nvp
->v_specflags
|= SI_ALIASED
;
1680 vp
->v_specflags
|= SI_ALIASED
;
1682 vnode_put_locked(vp
);
1692 FREE_ZONE(sin
, sizeof(struct specinfo
), M_SPECINFO
);
1695 if ((vp
->v_flag
& (VBDEVVP
| VDEVFLUSH
)) != 0) {
1699 panic("checkalias with VT_NON vp that shouldn't: %p", vp
);
1706 * Get a reference on a particular vnode and lock it if requested.
1707 * If the vnode was on the inactive list, remove it from the list.
1708 * If the vnode was on the free list, remove it from the list and
1709 * move it to inactive list as needed.
1710 * The vnode lock bit is set if the vnode is being eliminated in
1711 * vgone. The process is awakened when the transition is completed,
1712 * and an error returned to indicate that the vnode is no longer
1713 * usable (possibly having been changed to a new file system type).
1716 vget_internal(vnode_t vp
, int vid
, int vflags
)
1720 vnode_lock_spin(vp
);
1722 if ((vflags
& VNODE_WRITEABLE
) && (vp
->v_writecount
== 0)) {
1724 * vnode to be returned only if it has writers opened
1728 error
= vnode_getiocount(vp
, vid
, vflags
);
1737 * Returns: 0 Success
1738 * ENOENT No such file or directory [terminating]
1741 vnode_ref(vnode_t vp
)
1743 return vnode_ref_ext(vp
, 0, 0);
1747 * Returns: 0 Success
1748 * ENOENT No such file or directory [terminating]
1751 vnode_ref_ext(vnode_t vp
, int fmode
, int flags
)
1755 vnode_lock_spin(vp
);
1758 * once all the current call sites have been fixed to insure they have
1759 * taken an iocount, we can toughen this assert up and insist that the
1760 * iocount is non-zero... a non-zero usecount doesn't insure correctness
1762 if (vp
->v_iocount
<= 0 && vp
->v_usecount
<= 0) {
1763 panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp
, vp
->v_iocount
, vp
->v_usecount
);
1767 * if you are the owner of drain/termination, can acquire usecount
1769 if ((flags
& VNODE_REF_FORCE
) == 0) {
1770 if ((vp
->v_lflag
& (VL_DRAIN
| VL_TERMINATE
| VL_DEAD
))) {
1771 if (vp
->v_owner
!= current_thread()) {
1779 if (fmode
& FWRITE
) {
1780 if (++vp
->v_writecount
<= 0) {
1781 panic("vnode_ref_ext: v_writecount");
1784 if (fmode
& O_EVTONLY
) {
1785 if (++vp
->v_kusecount
<= 0) {
1786 panic("vnode_ref_ext: v_kusecount");
1789 if (vp
->v_flag
& VRAGE
) {
1792 ut
= get_bsdthread_info(current_thread());
1794 if (!(current_proc()->p_lflag
& P_LRAGE_VNODES
) &&
1795 !(ut
->uu_flag
& UT_RAGE_VNODES
)) {
1797 * a 'normal' process accessed this vnode
1798 * so make sure its no longer marked
1799 * for rapid aging... also, make sure
1800 * it gets removed from the rage list...
1801 * when v_usecount drops back to 0, it
1802 * will be put back on the real free list
1804 vp
->v_flag
&= ~VRAGE
;
1805 vp
->v_references
= 0;
1806 vnode_list_remove(vp
);
1809 if (vp
->v_usecount
== 1 && vp
->v_type
== VREG
&& !(vp
->v_flag
& VSYSTEM
)) {
1810 if (vp
->v_ubcinfo
) {
1811 vnode_lock_convert(vp
);
1812 memory_object_mark_used(vp
->v_ubcinfo
->ui_control
);
1823 vnode_on_reliable_media(vnode_t vp
)
1825 if (!(vp
->v_mount
->mnt_kern_flag
& MNTK_VIRTUALDEV
) && (vp
->v_mount
->mnt_flag
& MNT_LOCAL
)) {
1832 vnode_async_list_add(vnode_t vp
)
1836 if (VONLIST(vp
) || (vp
->v_lflag
& (VL_TERMINATE
| VL_DEAD
))) {
1837 panic("vnode_async_list_add: %p is in wrong state", vp
);
1840 TAILQ_INSERT_HEAD(&vnode_async_work_list
, vp
, v_freelist
);
1841 vp
->v_listflag
|= VLIST_ASYNC_WORK
;
1843 async_work_vnodes
++;
1845 vnode_list_unlock();
1847 wakeup(&vnode_async_work_list
);
1852 * put the vnode on appropriate free list.
1853 * called with vnode LOCKED
1856 vnode_list_add(vnode_t vp
)
1858 boolean_t need_dead_wakeup
= FALSE
;
1861 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
1867 * if it is already on a list or non zero references return
1869 if (VONLIST(vp
) || (vp
->v_usecount
!= 0) || (vp
->v_iocount
!= 0) || (vp
->v_lflag
& VL_TERMINATE
)) {
1874 * In vclean, we might have deferred ditching locked buffers
1875 * because something was still referencing them (indicated by
1876 * usecount). We can ditch them now.
1878 if (ISSET(vp
->v_lflag
, VL_DEAD
)
1879 && (!LIST_EMPTY(&vp
->v_cleanblkhd
) || !LIST_EMPTY(&vp
->v_dirtyblkhd
))) {
1880 ++vp
->v_iocount
; // Probably not necessary, but harmless
1885 buf_invalidateblks(vp
, BUF_INVALIDATE_LOCKED
, 0, 0);
1887 vnode_dropiocount(vp
);
1893 if ((vp
->v_flag
& VRAGE
) && !(vp
->v_lflag
& VL_DEAD
)) {
1895 * add the new guy to the appropriate end of the RAGE list
1897 if ((vp
->v_flag
& VAGE
)) {
1898 TAILQ_INSERT_HEAD(&vnode_rage_list
, vp
, v_freelist
);
1900 TAILQ_INSERT_TAIL(&vnode_rage_list
, vp
, v_freelist
);
1903 vp
->v_listflag
|= VLIST_RAGE
;
1907 * reset the timestamp for the last inserted vp on the RAGE
1908 * queue to let new_vnode know that its not ok to start stealing
1909 * from this list... as long as we're actively adding to this list
1910 * we'll push out the vnodes we want to donate to the real free list
1911 * once we stop pushing, we'll let some time elapse before we start
1912 * stealing them in the new_vnode routine
1914 microuptime(&rage_tv
);
1917 * if VL_DEAD, insert it at head of the dead list
1918 * else insert at tail of LRU list or at head if VAGE is set
1920 if ((vp
->v_lflag
& VL_DEAD
)) {
1921 TAILQ_INSERT_HEAD(&vnode_dead_list
, vp
, v_freelist
);
1922 vp
->v_listflag
|= VLIST_DEAD
;
1925 if (dead_vnode_wanted
) {
1926 dead_vnode_wanted
--;
1927 need_dead_wakeup
= TRUE
;
1929 } else if ((vp
->v_flag
& VAGE
)) {
1930 TAILQ_INSERT_HEAD(&vnode_free_list
, vp
, v_freelist
);
1931 vp
->v_flag
&= ~VAGE
;
1934 TAILQ_INSERT_TAIL(&vnode_free_list
, vp
, v_freelist
);
1938 vnode_list_unlock();
1940 if (need_dead_wakeup
== TRUE
) {
1941 wakeup_one((caddr_t
)&dead_vnode_wanted
);
1947 * remove the vnode from appropriate free list.
1948 * called with vnode LOCKED and
1949 * the list lock held
1952 vnode_list_remove_locked(vnode_t vp
)
1956 * the v_listflag field is
1957 * protected by the vnode_list_lock
1959 if (vp
->v_listflag
& VLIST_RAGE
) {
1960 VREMRAGE("vnode_list_remove", vp
);
1961 } else if (vp
->v_listflag
& VLIST_DEAD
) {
1962 VREMDEAD("vnode_list_remove", vp
);
1963 } else if (vp
->v_listflag
& VLIST_ASYNC_WORK
) {
1964 VREMASYNC_WORK("vnode_list_remove", vp
);
1966 VREMFREE("vnode_list_remove", vp
);
1973 * remove the vnode from appropriate free list.
1974 * called with vnode LOCKED
1977 vnode_list_remove(vnode_t vp
)
1980 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
1983 * we want to avoid taking the list lock
1984 * in the case where we're not on the free
1985 * list... this will be true for most
1986 * directories and any currently in use files
1988 * we're guaranteed that we can't go from
1989 * the not-on-list state to the on-list
1990 * state since we hold the vnode lock...
1991 * all calls to vnode_list_add are done
1992 * under the vnode lock... so we can
1993 * check for that condition (the prevelant one)
1994 * without taking the list lock
1999 * however, we're not guaranteed that
2000 * we won't go from the on-list state
2001 * to the not-on-list state until we
2002 * hold the vnode_list_lock... this
2003 * is due to "new_vnode" removing vnodes
2004 * from the free list uder the list_lock
2005 * w/o the vnode lock... so we need to
2006 * check again whether we're currently
2009 vnode_list_remove_locked(vp
);
2011 vnode_list_unlock();
2017 vnode_rele(vnode_t vp
)
2019 vnode_rele_internal(vp
, 0, 0, 0);
2024 vnode_rele_ext(vnode_t vp
, int fmode
, int dont_reenter
)
2026 vnode_rele_internal(vp
, fmode
, dont_reenter
, 0);
2031 vnode_rele_internal(vnode_t vp
, int fmode
, int dont_reenter
, int locked
)
2034 vnode_lock_spin(vp
);
2038 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
2041 if (--vp
->v_usecount
< 0) {
2042 panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp
, vp
->v_usecount
, vp
->v_tag
, vp
->v_type
, vp
->v_flag
);
2045 if (fmode
& FWRITE
) {
2046 if (--vp
->v_writecount
< 0) {
2047 panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp
, vp
->v_writecount
, vp
->v_tag
, vp
->v_type
, vp
->v_flag
);
2050 if (fmode
& O_EVTONLY
) {
2051 if (--vp
->v_kusecount
< 0) {
2052 panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp
, vp
->v_kusecount
, vp
->v_tag
, vp
->v_type
, vp
->v_flag
);
2055 if (vp
->v_kusecount
> vp
->v_usecount
) {
2056 panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.", vp
, vp
->v_kusecount
, vp
->v_usecount
, vp
->v_tag
, vp
->v_type
, vp
->v_flag
);
2059 if ((vp
->v_iocount
> 0) || (vp
->v_usecount
> 0)) {
2061 * vnode is still busy... if we're the last
2062 * usecount, mark for a future call to VNOP_INACTIVE
2063 * when the iocount finally drops to 0
2065 if (vp
->v_usecount
== 0) {
2066 vp
->v_lflag
|= VL_NEEDINACTIVE
;
2067 vp
->v_flag
&= ~(VNOCACHE_DATA
| VRAOFF
| VOPENEVT
);
2071 vp
->v_flag
&= ~(VNOCACHE_DATA
| VRAOFF
| VOPENEVT
);
2073 if (ISSET(vp
->v_lflag
, VL_TERMINATE
| VL_DEAD
) || dont_reenter
) {
2075 * vnode is being cleaned, or
2076 * we've requested that we don't reenter
2077 * the filesystem on this release...in
2078 * the latter case, we'll mark the vnode aged
2081 if (!(vp
->v_lflag
& (VL_TERMINATE
| VL_DEAD
| VL_MARKTERM
))) {
2082 vp
->v_lflag
|= VL_NEEDINACTIVE
;
2084 if (vnode_on_reliable_media(vp
) == FALSE
|| vp
->v_flag
& VISDIRTY
) {
2085 vnode_async_list_add(vp
);
2096 * at this point both the iocount and usecount
2098 * pick up an iocount so that we can call
2099 * VNOP_INACTIVE with the vnode lock unheld
2105 vp
->v_lflag
&= ~VL_NEEDINACTIVE
;
2108 VNOP_INACTIVE(vp
, vfs_context_current());
2110 vnode_lock_spin(vp
);
2112 * because we dropped the vnode lock to call VNOP_INACTIVE
2113 * the state of the vnode may have changed... we may have
2114 * picked up an iocount, usecount or the MARKTERM may have
2115 * been set... we need to reevaluate the reference counts
2116 * to determine if we can call vnode_reclaim_internal at
2117 * this point... if the reference counts are up, we'll pick
2118 * up the MARKTERM state when they get subsequently dropped
2120 if ((vp
->v_iocount
== 1) && (vp
->v_usecount
== 0) &&
2121 ((vp
->v_lflag
& (VL_MARKTERM
| VL_TERMINATE
| VL_DEAD
)) == VL_MARKTERM
)) {
2124 ut
= get_bsdthread_info(current_thread());
2126 if (ut
->uu_defer_reclaims
) {
2127 vp
->v_defer_reclaimlist
= ut
->uu_vreclaims
;
2128 ut
->uu_vreclaims
= vp
;
2131 vnode_lock_convert(vp
);
2132 vnode_reclaim_internal(vp
, 1, 1, 0);
2134 vnode_dropiocount(vp
);
2137 if (vp
->v_usecount
== 0 && vp
->v_type
== VREG
&& !(vp
->v_flag
& VSYSTEM
)) {
2138 if (vp
->v_ubcinfo
) {
2139 vnode_lock_convert(vp
);
2140 memory_object_mark_unused(vp
->v_ubcinfo
->ui_control
, (vp
->v_flag
& VRAGE
) == VRAGE
);
2150 * Remove any vnodes in the vnode table belonging to mount point mp.
2152 * If MNT_NOFORCE is specified, there should not be any active ones,
2153 * return error if any are found (nb: this is a user error, not a
2154 * system error). If MNT_FORCE is specified, detach any active vnodes
2159 vflush(struct mount
*mp
, struct vnode
*skipvp
, int flags
)
2166 bool first_try
= true;
2169 * See comments in vnode_iterate() for the rationale for this lock
2171 mount_iterate_lock(mp
);
2174 vnode_iterate_setup(mp
);
2176 * On regular unmounts(not forced) do a
2177 * quick check for vnodes to be in use. This
2178 * preserves the caching of vnodes. automounter
2179 * tries unmounting every so often to see whether
2180 * it is still busy or not.
2182 if (((flags
& FORCECLOSE
) == 0) && ((mp
->mnt_kern_flag
& MNTK_UNMOUNT_PREFLIGHT
) != 0)) {
2183 if (vnode_umount_preflight(mp
, skipvp
, flags
)) {
2184 vnode_iterate_clear(mp
);
2186 mount_iterate_unlock(mp
);
2191 /* If it returns 0 then there is nothing to do */
2192 retval
= vnode_iterate_prepare(mp
);
2195 vnode_iterate_clear(mp
);
2197 mount_iterate_unlock(mp
);
2201 /* iterate over all the vnodes */
2202 while (!TAILQ_EMPTY(&mp
->mnt_workerqueue
)) {
2203 vp
= TAILQ_FIRST(&mp
->mnt_workerqueue
);
2204 TAILQ_REMOVE(&mp
->mnt_workerqueue
, vp
, v_mntvnodes
);
2205 TAILQ_INSERT_TAIL(&mp
->mnt_vnodelist
, vp
, v_mntvnodes
);
2207 if ((vp
->v_mount
!= mp
) || (vp
== skipvp
)) {
2213 vnode_lock_spin(vp
);
2215 // If vnode is already terminating, wait for it...
2216 while (vp
->v_id
== vid
&& ISSET(vp
->v_lflag
, VL_TERMINATE
)) {
2217 vp
->v_lflag
|= VL_TERMWANT
;
2218 msleep(&vp
->v_lflag
, &vp
->v_lock
, PVFS
, "vflush", NULL
);
2221 if ((vp
->v_id
!= vid
) || ISSET(vp
->v_lflag
, VL_DEAD
)) {
2228 * If requested, skip over vnodes marked VSYSTEM.
2229 * Skip over all vnodes marked VNOFLUSH.
2231 if ((flags
& SKIPSYSTEM
) && ((vp
->v_flag
& VSYSTEM
) ||
2232 (vp
->v_flag
& VNOFLUSH
))) {
2238 * If requested, skip over vnodes marked VSWAP.
2240 if ((flags
& SKIPSWAP
) && (vp
->v_flag
& VSWAP
)) {
2246 * If requested, skip over vnodes marked VROOT.
2248 if ((flags
& SKIPROOT
) && (vp
->v_flag
& VROOT
)) {
2254 * If WRITECLOSE is set, only flush out regular file
2255 * vnodes open for writing.
2257 if ((flags
& WRITECLOSE
) &&
2258 (vp
->v_writecount
== 0 || vp
->v_type
!= VREG
)) {
2264 * If the real usecount is 0, all we need to do is clear
2265 * out the vnode data structures and we are done.
2267 if (((vp
->v_usecount
== 0) ||
2268 ((vp
->v_usecount
- vp
->v_kusecount
) == 0))) {
2269 vnode_lock_convert(vp
);
2270 vp
->v_iocount
++; /* so that drain waits for * other iocounts */
2274 vnode_reclaim_internal(vp
, 1, 1, 0);
2275 vnode_dropiocount(vp
);
2284 * If FORCECLOSE is set, forcibly close the vnode.
2285 * For block or character devices, revert to an
2286 * anonymous device. For all other files, just kill them.
2288 if (flags
& FORCECLOSE
) {
2289 vnode_lock_convert(vp
);
2291 if (vp
->v_type
!= VBLK
&& vp
->v_type
!= VCHR
) {
2292 vp
->v_iocount
++; /* so that drain waits * for other iocounts */
2296 vnode_abort_advlocks(vp
);
2297 vnode_reclaim_internal(vp
, 1, 1, 0);
2298 vnode_dropiocount(vp
);
2303 vp
->v_lflag
&= ~VL_DEAD
;
2304 vp
->v_op
= spec_vnodeop_p
;
2305 vp
->v_flag
|= VDEVFLUSH
;
2312 /* log vnodes blocking unforced unmounts */
2313 if (print_busy_vnodes
&& first_try
&& ((flags
& FORCECLOSE
) == 0)) {
2314 vprint("vflush - busy vnode", vp
);
2322 /* At this point the worker queue is completed */
2323 if (busy
&& ((flags
& FORCECLOSE
) == 0) && reclaimed
) {
2326 (void)vnode_iterate_reloadq(mp
);
2328 /* returned with mount lock held */
2332 /* if new vnodes were created in between retry the reclaim */
2333 if (vnode_iterate_reloadq(mp
) != 0) {
2334 if (!(busy
&& ((flags
& FORCECLOSE
) == 0))) {
2339 vnode_iterate_clear(mp
);
2341 mount_iterate_unlock(mp
);
2343 if (busy
&& ((flags
& FORCECLOSE
) == 0)) {
2349 long num_recycledvnodes
= 0;
2351 * Disassociate the underlying file system from a vnode.
2352 * The vnode lock is held on entry.
2355 vclean(vnode_t vp
, int flags
)
2357 vfs_context_t ctx
= vfs_context_current();
2360 int already_terminating
;
2367 * Check to see if the vnode is in use.
2368 * If so we have to reference it before we clean it out
2369 * so that its count cannot fall to zero and generate a
2370 * race against ourselves to recycle it.
2372 active
= vp
->v_usecount
;
2375 * just in case we missed sending a needed
2376 * VNOP_INACTIVE, we'll do it now
2378 need_inactive
= (vp
->v_lflag
& VL_NEEDINACTIVE
);
2380 vp
->v_lflag
&= ~VL_NEEDINACTIVE
;
2383 * Prevent the vnode from being recycled or
2384 * brought into use while we clean it out.
2386 already_terminating
= (vp
->v_lflag
& VL_TERMINATE
);
2388 vp
->v_lflag
|= VL_TERMINATE
;
2391 is_namedstream
= vnode_isnamedstream(vp
);
2396 OSAddAtomicLong(1, &num_recycledvnodes
);
2398 if (flags
& DOCLOSE
) {
2399 clflags
|= IO_NDELAY
;
2401 if (flags
& REVOKEALL
) {
2402 clflags
|= IO_REVOKE
;
2405 if (active
&& (flags
& DOCLOSE
)) {
2406 VNOP_CLOSE(vp
, clflags
, ctx
);
2410 * Clean out any buffers associated with the vnode.
2412 if (flags
& DOCLOSE
) {
2413 #if CONFIG_NFS_CLIENT
2414 if (vp
->v_tag
== VT_NFS
) {
2415 nfs_vinvalbuf(vp
, V_SAVE
, ctx
, 0);
2417 #endif /* CONFIG_NFS_CLIENT */
2419 VNOP_FSYNC(vp
, MNT_WAIT
, ctx
);
2422 * If the vnode is still in use (by the journal for
2423 * example) we don't want to invalidate locked buffers
2424 * here. In that case, either the journal will tidy them
2425 * up, or we will deal with it when the usecount is
2426 * finally released in vnode_rele_internal.
2428 buf_invalidateblks(vp
, BUF_WRITE_DATA
| (active
? 0 : BUF_INVALIDATE_LOCKED
), 0, 0);
2430 if (UBCINFOEXISTS(vp
)) {
2432 * Clean the pages in VM.
2434 (void)ubc_msync(vp
, (off_t
)0, ubc_getsize(vp
), NULL
, UBC_PUSHALL
| UBC_INVALIDATE
| UBC_SYNC
);
2437 if (active
|| need_inactive
) {
2438 VNOP_INACTIVE(vp
, ctx
);
2442 if ((is_namedstream
!= 0) && (vp
->v_parent
!= NULLVP
)) {
2443 vnode_t pvp
= vp
->v_parent
;
2445 /* Delete the shadow stream file before we reclaim its vnode */
2446 if (vnode_isshadow(vp
)) {
2447 vnode_relenamedstream(pvp
, vp
);
2451 * No more streams associated with the parent. We
2452 * have a ref on it, so its identity is stable.
2453 * If the parent is on an opaque volume, then we need to know
2454 * whether it has associated named streams.
2456 if (vfs_authopaque(pvp
->v_mount
)) {
2457 vnode_lock_spin(pvp
);
2458 pvp
->v_lflag
&= ~VL_HASSTREAMS
;
2465 * Destroy ubc named reference
2466 * cluster_release is done on this path
2467 * along with dropping the reference on the ucred
2468 * (and in the case of forced unmount of an mmap-ed file,
2469 * the ubc reference on the vnode is dropped here too).
2471 ubc_destroy_named(vp
);
2475 * cleanup trigger info from vnode (if any)
2477 if (vp
->v_resolve
) {
2478 vnode_resolver_detach(vp
);
2483 * Reclaim the vnode.
2485 if (VNOP_RECLAIM(vp
, ctx
)) {
2486 panic("vclean: cannot reclaim");
2489 // make sure the name & parent ptrs get cleaned out!
2490 vnode_update_identity(vp
, NULLVP
, NULL
, 0, 0, VNODE_UPDATE_PARENT
| VNODE_UPDATE_NAME
| VNODE_UPDATE_PURGE
| VNODE_UPDATE_PURGEFIRMLINK
);
2495 * Remove the vnode from any mount list it might be on. It is not
2496 * safe to do this any earlier because unmount needs to wait for
2497 * any vnodes to terminate and it cannot do that if it cannot find
2500 insmntque(vp
, (struct mount
*)0);
2502 vp
->v_mount
= dead_mountp
;
2503 vp
->v_op
= dead_vnodeop_p
;
2507 vp
->v_lflag
|= VL_DEAD
;
2508 vp
->v_flag
&= ~VISDIRTY
;
2510 if (already_terminating
== 0) {
2511 vp
->v_lflag
&= ~VL_TERMINATE
;
2513 * Done with purge, notify sleepers of the grim news.
2515 if (vp
->v_lflag
& VL_TERMWANT
) {
2516 vp
->v_lflag
&= ~VL_TERMWANT
;
2517 wakeup(&vp
->v_lflag
);
2523 * Eliminate all activity associated with the requested vnode
2524 * and with all vnodes aliased to the requested vnode.
2528 vn_revoke(vnode_t vp
, int flags
, __unused vfs_context_t a_context
)
2530 vn_revoke(vnode_t vp
, __unused
int flags
, __unused vfs_context_t a_context
)
2537 if ((flags
& REVOKEALL
) == 0) {
2538 panic("vnop_revoke");
2542 if (vnode_isaliased(vp
)) {
2544 * If a vgone (or vclean) is already in progress,
2545 * return an immediate error
2547 if (vp
->v_lflag
& VL_TERMINATE
) {
2552 * Ensure that vp will not be vgone'd while we
2553 * are eliminating its aliases.
2556 while ((vp
->v_specflags
& SI_ALIASED
)) {
2557 for (vq
= *vp
->v_hashchain
; vq
; vq
= vq
->v_specnext
) {
2558 if (vq
->v_rdev
!= vp
->v_rdev
||
2559 vq
->v_type
!= vp
->v_type
|| vp
== vq
) {
2564 if (vnode_getwithvid(vq
, vid
)) {
2569 if (!(vq
->v_lflag
& VL_TERMINATE
)) {
2570 vnode_reclaim_internal(vq
, 1, 1, 0);
2572 vnode_put_locked(vq
);
2581 if (vp
->v_lflag
& VL_TERMINATE
) {
2585 vnode_reclaim_internal(vp
, 1, 0, REVOKEALL
);
2592 * Recycle an unused vnode to the front of the free list.
2593 * Release the passed interlock if the vnode will be recycled.
2596 vnode_recycle(struct vnode
*vp
)
2598 vnode_lock_spin(vp
);
2600 if (vp
->v_iocount
|| vp
->v_usecount
) {
2601 vp
->v_lflag
|= VL_MARKTERM
;
2605 vnode_lock_convert(vp
);
2606 vnode_reclaim_internal(vp
, 1, 0, 0);
2614 vnode_reload(vnode_t vp
)
2616 vnode_lock_spin(vp
);
2618 if ((vp
->v_iocount
> 1) || vp
->v_usecount
) {
2622 if (vp
->v_iocount
<= 0) {
2623 panic("vnode_reload with no iocount %d", vp
->v_iocount
);
2626 /* mark for release when iocount is dopped */
2627 vp
->v_lflag
|= VL_MARKTERM
;
2635 vgone(vnode_t vp
, int flags
)
2641 * Clean out the filesystem specific data.
2642 * vclean also takes care of removing the
2643 * vnode from any mount list it might be on
2645 vclean(vp
, flags
| DOCLOSE
);
2648 * If special device, remove it from special device alias list
2651 if ((vp
->v_type
== VBLK
|| vp
->v_type
== VCHR
) && vp
->v_specinfo
!= 0) {
2653 if (*vp
->v_hashchain
== vp
) {
2654 *vp
->v_hashchain
= vp
->v_specnext
;
2656 for (vq
= *vp
->v_hashchain
; vq
; vq
= vq
->v_specnext
) {
2657 if (vq
->v_specnext
!= vp
) {
2660 vq
->v_specnext
= vp
->v_specnext
;
2664 panic("missing bdev");
2667 if (vp
->v_specflags
& SI_ALIASED
) {
2669 for (vq
= *vp
->v_hashchain
; vq
; vq
= vq
->v_specnext
) {
2670 if (vq
->v_rdev
!= vp
->v_rdev
||
2671 vq
->v_type
!= vp
->v_type
) {
2680 panic("missing alias");
2683 vx
->v_specflags
&= ~SI_ALIASED
;
2685 vp
->v_specflags
&= ~SI_ALIASED
;
2689 struct specinfo
*tmp
= vp
->v_specinfo
;
2690 vp
->v_specinfo
= NULL
;
2691 FREE_ZONE(tmp
, sizeof(struct specinfo
), M_SPECINFO
);
2697 * Lookup a vnode by device number.
2700 check_mountedon(dev_t dev
, enum vtype type
, int *errorp
)
2708 for (vp
= speclisth
[SPECHASH(dev
)]; vp
; vp
= vp
->v_specnext
) {
2709 if (dev
!= vp
->v_rdev
|| type
!= vp
->v_type
) {
2714 if (vnode_getwithvid(vp
, vid
)) {
2717 vnode_lock_spin(vp
);
2718 if ((vp
->v_usecount
> 0) || (vp
->v_iocount
> 1)) {
2720 if ((*errorp
= vfs_mountedon(vp
)) != 0) {
2734 * Calculate the total number of references to a special device.
2743 if (!vnode_isspec(vp
)) {
2744 return vp
->v_usecount
- vp
->v_kusecount
;
2748 if (!vnode_isaliased(vp
)) {
2749 return vp
->v_specinfo
->si_opencount
;
2755 * Grab first vnode and its vid.
2757 vq
= *vp
->v_hashchain
;
2758 vid
= vq
? vq
->v_id
: 0;
2764 * Attempt to get the vnode outside the SPECHASH lock.
2766 if (vnode_getwithvid(vq
, vid
)) {
2771 if (vq
->v_rdev
== vp
->v_rdev
&& vq
->v_type
== vp
->v_type
) {
2772 if ((vq
->v_usecount
== 0) && (vq
->v_iocount
== 1) && vq
!= vp
) {
2774 * Alias, but not in use, so flush it out.
2776 vnode_reclaim_internal(vq
, 1, 1, 0);
2777 vnode_put_locked(vq
);
2781 count
+= vq
->v_specinfo
->si_opencount
;
2787 * must do this with the reference still held on 'vq'
2788 * so that it can't be destroyed while we're poking
2789 * through v_specnext
2791 vnext
= vq
->v_specnext
;
2792 vid
= vnext
? vnext
->v_id
: 0;
2804 int prtactive
= 0; /* 1 => print out reclaim of active vnodes */
2807 * Print out a description of a vnode.
2809 static const char *typename
[] =
2810 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
2813 vprint(const char *label
, struct vnode
*vp
)
2817 if (label
!= NULL
) {
2818 printf("%s: ", label
);
2820 printf("name %s type %s, usecount %d, writecount %d\n",
2821 vp
->v_name
, typename
[vp
->v_type
],
2822 vp
->v_usecount
, vp
->v_writecount
);
2824 if (vp
->v_flag
& VROOT
) {
2825 strlcat(sbuf
, "|VROOT", sizeof(sbuf
));
2827 if (vp
->v_flag
& VTEXT
) {
2828 strlcat(sbuf
, "|VTEXT", sizeof(sbuf
));
2830 if (vp
->v_flag
& VSYSTEM
) {
2831 strlcat(sbuf
, "|VSYSTEM", sizeof(sbuf
));
2833 if (vp
->v_flag
& VNOFLUSH
) {
2834 strlcat(sbuf
, "|VNOFLUSH", sizeof(sbuf
));
2836 if (vp
->v_flag
& VBWAIT
) {
2837 strlcat(sbuf
, "|VBWAIT", sizeof(sbuf
));
2839 if (vnode_isaliased(vp
)) {
2840 strlcat(sbuf
, "|VALIASED", sizeof(sbuf
));
2842 if (sbuf
[0] != '\0') {
2843 printf("vnode flags (%s\n", &sbuf
[1]);
2849 vn_getpath(struct vnode
*vp
, char *pathbuf
, int *len
)
2851 return build_path(vp
, pathbuf
, *len
, len
, BUILDPATH_NO_FS_ENTER
, vfs_context_current());
2855 vn_getpath_fsenter(struct vnode
*vp
, char *pathbuf
, int *len
)
2857 return build_path(vp
, pathbuf
, *len
, len
, 0, vfs_context_current());
2861 * vn_getpath_fsenter_with_parent will reenter the file system to fine the path of the
2862 * vnode. It requires that there are IO counts on both the vnode and the directory vnode.
2864 * vn_getpath_fsenter is called by MAC hooks to authorize operations for every thing, but
2865 * unlink, rmdir and rename. For these operation the MAC hook calls vn_getpath. This presents
2866 * problems where if the path can not be found from the name cache, those operations can
2867 * erroneously fail with EPERM even though the call should succeed. When removing or moving
2868 * file system objects with operations such as unlink or rename, those operations need to
2869 * take IO counts on the target and containing directory. Calling vn_getpath_fsenter from a
2870 * MAC hook from these operations during forced unmount operations can lead to dead
2871 * lock. This happens when the operation starts, IO counts are taken on the containing
2872 * directories and targets. Before the MAC hook is called a forced unmount from another
2873 * thread takes place and blocks on the on going operation's directory vnode in vdrain.
2874 * After which, the MAC hook gets called and calls vn_getpath_fsenter. vn_getpath_fsenter
2875 * is called with the understanding that there is an IO count on the target. If in
2876 * build_path the directory vnode is no longer in the cache, then the parent object id via
2877 * vnode_getattr from the target is obtain and used to call VFS_VGET to get the parent
2878 * vnode. The file system's VFS_VGET then looks up by inode in its hash and tries to get
2879 * an IO count. But VFS_VGET "sees" the directory vnode is in vdrain and can block
2880 * depending on which version and how it calls the vnode_get family of interfaces.
2882 * N.B. A reasonable interface to use is vnode_getwithvid. This interface was modified to
2883 * call vnode_getiocount with VNODE_DRAINO, so it will happily get an IO count and not
2884 * cause issues, but there is no guarantee that all or any file systems are doing that.
2886 * vn_getpath_fsenter_with_parent can enter the file system safely since there is a known
2887 * IO count on the directory vnode by calling build_path_with_parent.
2891 vn_getpath_fsenter_with_parent(struct vnode
*dvp
, struct vnode
*vp
, char *pathbuf
, int *len
)
2893 return build_path_with_parent(vp
, dvp
, pathbuf
, *len
, len
, 0, vfs_context_current());
2897 vn_getpath_ext(struct vnode
*vp
, struct vnode
*dvp
, char *pathbuf
, int *len
, int flags
)
2899 int bpflags
= (flags
& VN_GETPATH_FSENTER
) ? 0 : BUILDPATH_NO_FS_ENTER
;
2901 if (flags
&& (flags
!= VN_GETPATH_FSENTER
)) {
2902 if (flags
& VN_GETPATH_NO_FIRMLINK
) {
2903 bpflags
|= BUILDPATH_NO_FIRMLINK
;;
2905 if (flags
& VN_GETPATH_VOLUME_RELATIVE
) {
2906 bpflags
|= (BUILDPATH_VOLUME_RELATIVE
| BUILDPATH_NO_FIRMLINK
);
2908 if (flags
& VN_GETPATH_NO_PROCROOT
) {
2909 bpflags
|= BUILDPATH_NO_PROCROOT
;
2913 return build_path_with_parent(vp
, dvp
, pathbuf
, *len
, len
, bpflags
, vfs_context_current());
2917 vn_getpath_no_firmlink(struct vnode
*vp
, char *pathbuf
, int *len
)
2919 return vn_getpath_ext(vp
, NULLVP
, pathbuf
, len
, VN_GETPATH_NO_FIRMLINK
);
2923 vn_getcdhash(struct vnode
*vp
, off_t offset
, unsigned char *cdhash
)
2925 return ubc_cs_getcdhash(vp
, offset
, cdhash
);
2929 static char *extension_table
= NULL
;
2931 static int max_ext_width
;
2934 extension_cmp(const void *a
, const void *b
)
2936 return strlen((const char *)a
) - strlen((const char *)b
);
2941 // This is the api LaunchServices uses to inform the kernel
2942 // the list of package extensions to ignore.
2944 // Internally we keep the list sorted by the length of the
2945 // the extension (from longest to shortest). We sort the
2946 // list of extensions so that we can speed up our searches
2947 // when comparing file names -- we only compare extensions
2948 // that could possibly fit into the file name, not all of
2949 // them (i.e. a short 8 character name can't have an 8
2950 // character extension).
2952 extern lck_mtx_t
*pkg_extensions_lck
;
2954 __private_extern__
int
2955 set_package_extensions_table(user_addr_t data
, int nentries
, int maxwidth
)
2957 char *new_exts
, *old_exts
;
2960 if (nentries
<= 0 || nentries
> 1024 || maxwidth
<= 0 || maxwidth
> 255) {
2965 // allocate one byte extra so we can guarantee null termination
2966 MALLOC(new_exts
, char *, (nentries
* maxwidth
) + 1, M_TEMP
, M_WAITOK
);
2967 if (new_exts
== NULL
) {
2971 error
= copyin(data
, new_exts
, nentries
* maxwidth
);
2973 FREE(new_exts
, M_TEMP
);
2977 new_exts
[(nentries
* maxwidth
)] = '\0'; // guarantee null termination of the block
2979 qsort(new_exts
, nentries
, maxwidth
, extension_cmp
);
2981 lck_mtx_lock(pkg_extensions_lck
);
2983 old_exts
= extension_table
;
2984 extension_table
= new_exts
;
2986 max_ext_width
= maxwidth
;
2988 lck_mtx_unlock(pkg_extensions_lck
);
2991 FREE(old_exts
, M_TEMP
);
2999 is_package_name(const char *name
, int len
)
3002 const char *ptr
, *name_ext
;
3009 for (ptr
= name
; *ptr
!= '\0'; ptr
++) {
3015 // if there is no "." extension, it can't match
3016 if (name_ext
== NULL
) {
3020 // advance over the "."
3023 lck_mtx_lock(pkg_extensions_lck
);
3025 // now iterate over all the extensions to see if any match
3026 ptr
= &extension_table
[0];
3027 for (i
= 0; i
< nexts
; i
++, ptr
+= max_ext_width
) {
3028 extlen
= strlen(ptr
);
3029 if (strncasecmp(name_ext
, ptr
, extlen
) == 0 && name_ext
[extlen
] == '\0') {
3031 lck_mtx_unlock(pkg_extensions_lck
);
3036 lck_mtx_unlock(pkg_extensions_lck
);
3038 // if we get here, no extension matched
3043 vn_path_package_check(__unused vnode_t vp
, char *path
, int pathlen
, int *component
)
3054 while (end
< path
+ pathlen
&& *end
!= '\0') {
3055 while (end
< path
+ pathlen
&& *end
== '/' && *end
!= '\0') {
3061 while (end
< path
+ pathlen
&& *end
!= '/' && *end
!= '\0') {
3065 if (end
> path
+ pathlen
) {
3066 // hmm, string wasn't null terminated
3071 if (is_package_name(ptr
, end
- ptr
)) {
3084 * Determine if a name is inappropriate for a searchfs query.
3085 * This list consists of /System currently.
3089 vn_searchfs_inappropriate_name(const char *name
, int len
)
3091 const char *bad_names
[] = { "System" };
3092 int bad_len
[] = { 6 };
3095 for (i
= 0; i
< (int) (sizeof(bad_names
) / sizeof(bad_names
[0])); i
++) {
3096 if (len
== bad_len
[i
] && strncmp(name
, bad_names
[i
], strlen(bad_names
[i
]) + 1) == 0) {
3101 // if we get here, no name matched
3106 * Top level filesystem related information gathering.
3108 extern unsigned int vfs_nummntops
;
3111 * The VFS_NUMMNTOPS shouldn't be at name[1] since
3112 * is a VFS generic variable. Since we no longer support
3113 * VT_UFS, we reserve its value to support this sysctl node.
3115 * It should have been:
3116 * name[0]: VFS_GENERIC
3117 * name[1]: VFS_NUMMNTOPS
3119 SYSCTL_INT(_vfs
, VFS_NUMMNTOPS
, nummntops
,
3120 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3121 &vfs_nummntops
, 0, "");
3124 vfs_sysctl(int *name __unused
, u_int namelen __unused
,
3125 user_addr_t oldp __unused
, size_t *oldlenp __unused
,
3126 user_addr_t newp __unused
, size_t newlen __unused
, proc_t p __unused
);
3129 vfs_sysctl(int *name __unused
, u_int namelen __unused
,
3130 user_addr_t oldp __unused
, size_t *oldlenp __unused
,
3131 user_addr_t newp __unused
, size_t newlen __unused
, proc_t p __unused
)
3138 // The following code disallows specific sysctl's that came through
3139 // the direct sysctl interface (vfs_sysctl_node) instead of the newer
3140 // sysctl_vfs_ctlbyfsid() interface. We can not allow these selectors
3141 // through vfs_sysctl_node() because it passes the user's oldp pointer
3142 // directly to the file system which (for these selectors) casts it
3143 // back to a struct sysctl_req and then proceed to use SYSCTL_IN()
3144 // which jumps through an arbitrary function pointer. When called
3145 // through the sysctl_vfs_ctlbyfsid() interface this does not happen
3146 // and so it's safe.
3148 // Unfortunately we have to pull in definitions from AFP and SMB and
3149 // perform explicit name checks on the file system to determine if
3150 // these selectors are being used.
3153 #define AFPFS_VFS_CTL_GETID 0x00020001
3154 #define AFPFS_VFS_CTL_NETCHANGE 0x00020002
3155 #define AFPFS_VFS_CTL_VOLCHANGE 0x00020003
3157 #define SMBFS_SYSCTL_REMOUNT 1
3158 #define SMBFS_SYSCTL_REMOUNT_INFO 2
3159 #define SMBFS_SYSCTL_GET_SERVER_SHARE 3
3163 is_bad_sysctl_name(struct vfstable
*vfsp
, int selector_name
)
3165 switch (selector_name
) {
3168 case VFS_CTL_NOLOCKS
:
3169 case VFS_CTL_NSTATUS
:
3172 case VFS_CTL_SERVERINFO
:
3179 // the more complicated check for some of SMB's special values
3180 if (strcmp(vfsp
->vfc_name
, "smbfs") == 0) {
3181 switch (selector_name
) {
3182 case SMBFS_SYSCTL_REMOUNT
:
3183 case SMBFS_SYSCTL_REMOUNT_INFO
:
3184 case SMBFS_SYSCTL_GET_SERVER_SHARE
:
3187 } else if (strcmp(vfsp
->vfc_name
, "afpfs") == 0) {
3188 switch (selector_name
) {
3189 case AFPFS_VFS_CTL_GETID
:
3190 case AFPFS_VFS_CTL_NETCHANGE
:
3191 case AFPFS_VFS_CTL_VOLCHANGE
:
3197 // If we get here we passed all the checks so the selector is ok
3203 int vfs_sysctl_node SYSCTL_HANDLER_ARGS
3206 struct vfstable
*vfsp
;
3210 fstypenum
= oidp
->oid_number
;
3214 /* all sysctl names at this level should have at least one name slot for the FS */
3216 return EISDIR
; /* overloaded */
3219 for (vfsp
= vfsconf
; vfsp
; vfsp
= vfsp
->vfc_next
) {
3220 if (vfsp
->vfc_typenum
== fstypenum
) {
3221 vfsp
->vfc_refcount
++;
3225 mount_list_unlock();
3231 if (is_bad_sysctl_name(vfsp
, name
[0])) {
3232 printf("vfs: bad selector 0x%.8x for old-style sysctl(). use the sysctl-by-fsid interface instead\n", name
[0]);
3236 error
= (vfsp
->vfc_vfsops
->vfs_sysctl
)(name
, namelen
, req
->oldptr
, &req
->oldlen
, req
->newptr
, req
->newlen
, vfs_context_current());
3239 vfsp
->vfc_refcount
--;
3240 mount_list_unlock();
3246 * Check to see if a filesystem is mounted on a block device.
3249 vfs_mountedon(struct vnode
*vp
)
3255 if (vp
->v_specflags
& SI_MOUNTEDON
) {
3259 if (vp
->v_specflags
& SI_ALIASED
) {
3260 for (vq
= *vp
->v_hashchain
; vq
; vq
= vq
->v_specnext
) {
3261 if (vq
->v_rdev
!= vp
->v_rdev
||
3262 vq
->v_type
!= vp
->v_type
) {
3265 if (vq
->v_specflags
& SI_MOUNTEDON
) {
3276 struct unmount_info
{
3277 int u_errs
; // Total failed unmounts
3278 int u_busy
; // EBUSY failed unmounts
3282 unmount_callback(mount_t mp
, void *arg
)
3286 struct unmount_info
*uip
= arg
;
3289 mount_iterdrop(mp
); // avoid vfs_iterate deadlock in dounmount()
3291 MALLOC_ZONE(mntname
, void *, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
3293 strlcpy(mntname
, mp
->mnt_vfsstat
.f_mntonname
, MAXPATHLEN
);
3296 error
= dounmount(mp
, MNT_FORCE
, 1, vfs_context_current());
3299 printf("Unmount of %s failed (%d)\n", mntname
? mntname
:"?", error
);
3300 if (error
== EBUSY
) {
3305 FREE_ZONE(mntname
, MAXPATHLEN
, M_NAMEI
);
3308 return VFS_RETURNED
;
3312 * Unmount all filesystems. The list is traversed in reverse order
3313 * of mounting to avoid dependencies.
3314 * Busy mounts are retried.
3316 __private_extern__
void
3317 vfs_unmountall(void)
3319 int mounts
, sec
= 1;
3320 struct unmount_info ui
;
3322 vfs_unmountall_started
= 1;
3325 ui
.u_errs
= ui
.u_busy
= 0;
3326 vfs_iterate(VFS_ITERATE_CB_DROPREF
| VFS_ITERATE_TAIL_FIRST
, unmount_callback
, &ui
);
3327 mounts
= mount_getvfscnt();
3332 if (ui
.u_busy
> 0) { // Busy mounts - wait & retry
3333 tsleep(&nummounts
, PVFS
, "busy mount", sec
* hz
);
3338 printf("Unmounting timed out\n");
3339 } else if (ui
.u_errs
< mounts
) {
3340 // If the vfs_iterate missed mounts in progress - wait a bit
3341 tsleep(&nummounts
, PVFS
, "missed mount", 2 * hz
);
3346 * This routine is called from vnode_pager_deallocate out of the VM
3347 * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named
3348 * on a vnode that has a UBCINFO
3350 __private_extern__
void
3351 vnode_pager_vrele(vnode_t vp
)
3353 struct ubc_info
*uip
;
3355 vnode_lock_spin(vp
);
3357 vp
->v_lflag
&= ~VNAMED_UBC
;
3358 if (vp
->v_usecount
!= 0) {
3360 * At the eleventh hour, just before the ubcinfo is
3361 * destroyed, ensure the ubc-specific v_usecount
3362 * reference has gone. We use v_usecount != 0 as a hint;
3363 * ubc_unmap() does nothing if there's no mapping.
3365 * This case is caused by coming here via forced unmount,
3366 * versus the usual vm_object_deallocate() path.
3367 * In the forced unmount case, ubc_destroy_named()
3368 * releases the pager before memory_object_last_unmap()
3373 vnode_lock_spin(vp
);
3376 uip
= vp
->v_ubcinfo
;
3377 vp
->v_ubcinfo
= UBC_INFO_NULL
;
3381 ubc_info_deallocate(uip
);
3385 #include <sys/disk.h>
3387 u_int32_t rootunit
= (u_int32_t
)-1;
3390 extern int lowpri_throttle_enabled
;
3391 extern int iosched_enabled
;
3395 vfs_init_io_attributes(vnode_t devvp
, mount_t mp
)
3398 off_t readblockcnt
= 0;
3399 off_t writeblockcnt
= 0;
3400 off_t readmaxcnt
= 0;
3401 off_t writemaxcnt
= 0;
3402 off_t readsegcnt
= 0;
3403 off_t writesegcnt
= 0;
3404 off_t readsegsize
= 0;
3405 off_t writesegsize
= 0;
3406 off_t alignment
= 0;
3407 u_int32_t minsaturationbytecount
= 0;
3408 u_int32_t ioqueue_depth
= 0;
3412 u_int64_t location
= 0;
3413 vfs_context_t ctx
= vfs_context_current();
3414 dk_corestorage_info_t cs_info
;
3415 boolean_t cs_present
= FALSE
;;
3420 VNOP_IOCTL(devvp
, DKIOCGETTHROTTLEMASK
, (caddr_t
)&mp
->mnt_throttle_mask
, 0, NULL
);
3422 * as a reasonable approximation, only use the lowest bit of the mask
3423 * to generate a disk unit number
3425 mp
->mnt_devbsdunit
= num_trailing_0(mp
->mnt_throttle_mask
);
3427 if (devvp
== rootvp
) {
3428 rootunit
= mp
->mnt_devbsdunit
;
3431 if (mp
->mnt_devbsdunit
== rootunit
) {
3433 * this mount point exists on the same device as the root
3434 * partition, so it comes under the hard throttle control...
3435 * this is true even for the root mount point itself
3437 mp
->mnt_kern_flag
|= MNTK_ROOTDEV
;
3440 * force the spec device to re-cache
3441 * the underlying block size in case
3442 * the filesystem overrode the initial value
3444 set_fsblocksize(devvp
);
3447 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETBLOCKSIZE
,
3448 (caddr_t
)&blksize
, 0, ctx
))) {
3452 mp
->mnt_devblocksize
= blksize
;
3455 * set the maximum possible I/O size
3456 * this may get clipped to a smaller value
3457 * based on which constraints are being advertised
3458 * and if those advertised constraints result in a smaller
3459 * limit for a given I/O
3461 mp
->mnt_maxreadcnt
= MAX_UPL_SIZE_BYTES
;
3462 mp
->mnt_maxwritecnt
= MAX_UPL_SIZE_BYTES
;
3464 if (VNOP_IOCTL(devvp
, DKIOCISVIRTUAL
, (caddr_t
)&isvirtual
, 0, ctx
) == 0) {
3466 mp
->mnt_kern_flag
|= MNTK_VIRTUALDEV
;
3467 mp
->mnt_flag
|= MNT_REMOVABLE
;
3470 if (VNOP_IOCTL(devvp
, DKIOCISSOLIDSTATE
, (caddr_t
)&isssd
, 0, ctx
) == 0) {
3472 mp
->mnt_kern_flag
|= MNTK_SSD
;
3475 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETFEATURES
,
3476 (caddr_t
)&features
, 0, ctx
))) {
3480 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXBLOCKCOUNTREAD
,
3481 (caddr_t
)&readblockcnt
, 0, ctx
))) {
3485 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXBLOCKCOUNTWRITE
,
3486 (caddr_t
)&writeblockcnt
, 0, ctx
))) {
3490 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXBYTECOUNTREAD
,
3491 (caddr_t
)&readmaxcnt
, 0, ctx
))) {
3495 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXBYTECOUNTWRITE
,
3496 (caddr_t
)&writemaxcnt
, 0, ctx
))) {
3500 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXSEGMENTCOUNTREAD
,
3501 (caddr_t
)&readsegcnt
, 0, ctx
))) {
3505 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXSEGMENTCOUNTWRITE
,
3506 (caddr_t
)&writesegcnt
, 0, ctx
))) {
3510 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXSEGMENTBYTECOUNTREAD
,
3511 (caddr_t
)&readsegsize
, 0, ctx
))) {
3515 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXSEGMENTBYTECOUNTWRITE
,
3516 (caddr_t
)&writesegsize
, 0, ctx
))) {
3520 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT
,
3521 (caddr_t
)&alignment
, 0, ctx
))) {
3525 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETCOMMANDPOOLSIZE
,
3526 (caddr_t
)&ioqueue_depth
, 0, ctx
))) {
3531 mp
->mnt_maxreadcnt
= (readmaxcnt
> UINT32_MAX
) ? UINT32_MAX
: readmaxcnt
;
3535 temp
= readblockcnt
* blksize
;
3536 temp
= (temp
> UINT32_MAX
) ? UINT32_MAX
: temp
;
3538 if (temp
< mp
->mnt_maxreadcnt
) {
3539 mp
->mnt_maxreadcnt
= (u_int32_t
)temp
;
3544 mp
->mnt_maxwritecnt
= (writemaxcnt
> UINT32_MAX
) ? UINT32_MAX
: writemaxcnt
;
3547 if (writeblockcnt
) {
3548 temp
= writeblockcnt
* blksize
;
3549 temp
= (temp
> UINT32_MAX
) ? UINT32_MAX
: temp
;
3551 if (temp
< mp
->mnt_maxwritecnt
) {
3552 mp
->mnt_maxwritecnt
= (u_int32_t
)temp
;
3557 temp
= (readsegcnt
> UINT16_MAX
) ? UINT16_MAX
: readsegcnt
;
3559 temp
= mp
->mnt_maxreadcnt
/ PAGE_SIZE
;
3561 if (temp
> UINT16_MAX
) {
3565 mp
->mnt_segreadcnt
= (u_int16_t
)temp
;
3568 temp
= (writesegcnt
> UINT16_MAX
) ? UINT16_MAX
: writesegcnt
;
3570 temp
= mp
->mnt_maxwritecnt
/ PAGE_SIZE
;
3572 if (temp
> UINT16_MAX
) {
3576 mp
->mnt_segwritecnt
= (u_int16_t
)temp
;
3579 temp
= (readsegsize
> UINT32_MAX
) ? UINT32_MAX
: readsegsize
;
3581 temp
= mp
->mnt_maxreadcnt
;
3583 mp
->mnt_maxsegreadsize
= (u_int32_t
)temp
;
3586 temp
= (writesegsize
> UINT32_MAX
) ? UINT32_MAX
: writesegsize
;
3588 temp
= mp
->mnt_maxwritecnt
;
3590 mp
->mnt_maxsegwritesize
= (u_int32_t
)temp
;
3593 temp
= (alignment
> PAGE_SIZE
) ? PAGE_MASK
: alignment
- 1;
3597 mp
->mnt_alignmentmask
= temp
;
3600 if (ioqueue_depth
> MNT_DEFAULT_IOQUEUE_DEPTH
) {
3601 temp
= ioqueue_depth
;
3603 temp
= MNT_DEFAULT_IOQUEUE_DEPTH
;
3606 mp
->mnt_ioqueue_depth
= temp
;
3607 mp
->mnt_ioscale
= MNT_IOSCALE(mp
->mnt_ioqueue_depth
);
3609 if (mp
->mnt_ioscale
> 1) {
3610 printf("ioqueue_depth = %d, ioscale = %d\n", (int)mp
->mnt_ioqueue_depth
, (int)mp
->mnt_ioscale
);
3613 if (features
& DK_FEATURE_FORCE_UNIT_ACCESS
) {
3614 mp
->mnt_ioflags
|= MNT_IOFLAGS_FUA_SUPPORTED
;
3617 if (VNOP_IOCTL(devvp
, DKIOCGETIOMINSATURATIONBYTECOUNT
, (caddr_t
)&minsaturationbytecount
, 0, ctx
) == 0) {
3618 mp
->mnt_minsaturationbytecount
= minsaturationbytecount
;
3620 mp
->mnt_minsaturationbytecount
= 0;
3623 if (VNOP_IOCTL(devvp
, DKIOCCORESTORAGE
, (caddr_t
)&cs_info
, 0, ctx
) == 0) {
3627 if (features
& DK_FEATURE_UNMAP
) {
3628 mp
->mnt_ioflags
|= MNT_IOFLAGS_UNMAP_SUPPORTED
;
3630 if (cs_present
== TRUE
) {
3631 mp
->mnt_ioflags
|= MNT_IOFLAGS_CSUNMAP_SUPPORTED
;
3634 if (cs_present
== TRUE
) {
3636 * for now we'll use the following test as a proxy for
3637 * the underlying drive being FUSION in nature
3639 if ((cs_info
.flags
& DK_CORESTORAGE_PIN_YOUR_METADATA
)) {
3640 mp
->mnt_ioflags
|= MNT_IOFLAGS_FUSION_DRIVE
;
3643 /* Check for APFS Fusion */
3644 dk_apfs_flavour_t flavour
;
3645 if ((VNOP_IOCTL(devvp
, DKIOCGETAPFSFLAVOUR
, (caddr_t
)&flavour
, 0, ctx
) == 0) &&
3646 (flavour
== DK_APFS_FUSION
)) {
3647 mp
->mnt_ioflags
|= MNT_IOFLAGS_FUSION_DRIVE
;
3651 if (VNOP_IOCTL(devvp
, DKIOCGETLOCATION
, (caddr_t
)&location
, 0, ctx
) == 0) {
3652 if (location
& DK_LOCATION_EXTERNAL
) {
3653 mp
->mnt_ioflags
|= MNT_IOFLAGS_PERIPHERAL_DRIVE
;
3654 mp
->mnt_flag
|= MNT_REMOVABLE
;
3659 if (iosched_enabled
&& (features
& DK_FEATURE_PRIORITY
)) {
3660 mp
->mnt_ioflags
|= MNT_IOFLAGS_IOSCHED_SUPPORTED
;
3661 throttle_info_disable_throttle(mp
->mnt_devbsdunit
, (mp
->mnt_ioflags
& MNT_IOFLAGS_FUSION_DRIVE
) != 0);
3663 #endif /* CONFIG_IOSCHED */
3667 static struct klist fs_klist
;
3668 lck_grp_t
*fs_klist_lck_grp
;
3669 lck_mtx_t
*fs_klist_lock
;
3672 vfs_event_init(void)
3674 klist_init(&fs_klist
);
3675 fs_klist_lck_grp
= lck_grp_alloc_init("fs_klist", NULL
);
3676 fs_klist_lock
= lck_mtx_alloc_init(fs_klist_lck_grp
, NULL
);
3680 vfs_event_signal(fsid_t
*fsid
, u_int32_t event
, intptr_t data
)
3682 if (event
== VQ_DEAD
|| event
== VQ_NOTRESP
) {
3683 struct mount
*mp
= vfs_getvfs(fsid
);
3685 mount_lock_spin(mp
);
3687 mp
->mnt_kern_flag
&= ~MNT_LNOTRESP
; // Now responding
3689 mp
->mnt_kern_flag
|= MNT_LNOTRESP
; // Not responding
3695 lck_mtx_lock(fs_klist_lock
);
3696 KNOTE(&fs_klist
, event
);
3697 lck_mtx_unlock(fs_klist_lock
);
3701 * return the number of mounted filesystems.
3704 sysctl_vfs_getvfscnt(void)
3706 return mount_getvfscnt();
3711 mount_getvfscnt(void)
3717 mount_list_unlock();
3724 mount_fillfsids(fsid_t
*fsidlst
, int count
)
3731 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
) {
3732 if (actual
<= count
) {
3733 fsidlst
[actual
] = mp
->mnt_vfsstat
.f_fsid
;
3737 mount_list_unlock();
3742 * fill in the array of fsid_t's up to a max of 'count', the actual
3743 * number filled in will be set in '*actual'. If there are more fsid_t's
3744 * than room in fsidlst then ENOMEM will be returned and '*actual' will
3745 * have the actual count.
3746 * having *actual filled out even in the error case is depended upon.
3749 sysctl_vfs_getvfslist(fsid_t
*fsidlst
, int count
, int *actual
)
3755 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
) {
3757 if (*actual
<= count
) {
3758 fsidlst
[(*actual
) - 1] = mp
->mnt_vfsstat
.f_fsid
;
3761 mount_list_unlock();
3762 return *actual
<= count
? 0 : ENOMEM
;
3766 sysctl_vfs_vfslist(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
3767 __unused
int arg2
, struct sysctl_req
*req
)
3773 /* This is a readonly node. */
3774 if (req
->newptr
!= USER_ADDR_NULL
) {
3778 /* they are querying us so just return the space required. */
3779 if (req
->oldptr
== USER_ADDR_NULL
) {
3780 req
->oldidx
= sysctl_vfs_getvfscnt() * sizeof(fsid_t
);
3785 * Retrieve an accurate count of the amount of space required to copy
3786 * out all the fsids in the system.
3788 space
= req
->oldlen
;
3789 req
->oldlen
= sysctl_vfs_getvfscnt() * sizeof(fsid_t
);
3791 /* they didn't give us enough space. */
3792 if (space
< req
->oldlen
) {
3796 MALLOC(fsidlst
, fsid_t
*, req
->oldlen
, M_TEMP
, M_WAITOK
| M_ZERO
);
3797 if (fsidlst
== NULL
) {
3801 error
= sysctl_vfs_getvfslist(fsidlst
, req
->oldlen
/ sizeof(fsid_t
),
3804 * If we get back ENOMEM, then another mount has been added while we
3805 * slept in malloc above. If this is the case then try again.
3807 if (error
== ENOMEM
) {
3808 FREE(fsidlst
, M_TEMP
);
3809 req
->oldlen
= space
;
3813 error
= SYSCTL_OUT(req
, fsidlst
, actual
* sizeof(fsid_t
));
3815 FREE(fsidlst
, M_TEMP
);
3820 * Do a sysctl by fsid.
3823 sysctl_vfs_ctlbyfsid(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3824 struct sysctl_req
*req
)
3826 union union_vfsidctl vc
;
3828 struct vfsstatfs
*sp
;
3829 int *name
, flags
, namelen
;
3830 int error
= 0, gotref
= 0;
3831 vfs_context_t ctx
= vfs_context_current();
3832 proc_t p
= req
->p
; /* XXX req->p != current_proc()? */
3833 boolean_t is_64_bit
;
3837 is_64_bit
= proc_is64bit(p
);
3839 error
= SYSCTL_IN(req
, &vc
, is_64_bit
? sizeof(vc
.vc64
):sizeof(vc
.vc32
));
3843 if (vc
.vc32
.vc_vers
!= VFS_CTL_VERS1
) { /* works for 32 and 64 */
3847 mp
= mount_list_lookupby_fsid(&vc
.vc32
.vc_fsid
, 0, 1); /* works for 32 and 64 */
3853 /* reset so that the fs specific code can fetch it. */
3856 * Note if this is a VFS_CTL then we pass the actual sysctl req
3857 * in for "oldp" so that the lower layer can DTRT and use the
3858 * SYSCTL_IN/OUT routines.
3860 if (mp
->mnt_op
->vfs_sysctl
!= NULL
) {
3862 if (vfs_64bitready(mp
)) {
3863 error
= mp
->mnt_op
->vfs_sysctl(name
, namelen
,
3864 CAST_USER_ADDR_T(req
),
3865 NULL
, USER_ADDR_NULL
, 0,
3871 error
= mp
->mnt_op
->vfs_sysctl(name
, namelen
,
3872 CAST_USER_ADDR_T(req
),
3873 NULL
, USER_ADDR_NULL
, 0,
3876 if (error
!= ENOTSUP
) {
3881 case VFS_CTL_UMOUNT
:
3884 req
->newptr
= vc
.vc64
.vc_ptr
;
3885 req
->newlen
= (size_t)vc
.vc64
.vc_len
;
3887 req
->newptr
= CAST_USER_ADDR_T(vc
.vc32
.vc_ptr
);
3888 req
->newlen
= vc
.vc32
.vc_len
;
3890 error
= SYSCTL_IN(req
, &flags
, sizeof(flags
));
3898 /* safedounmount consumes a ref */
3899 error
= safedounmount(mp
, flags
, ctx
);
3901 case VFS_CTL_STATFS
:
3904 req
->newptr
= vc
.vc64
.vc_ptr
;
3905 req
->newlen
= (size_t)vc
.vc64
.vc_len
;
3907 req
->newptr
= CAST_USER_ADDR_T(vc
.vc32
.vc_ptr
);
3908 req
->newlen
= vc
.vc32
.vc_len
;
3910 error
= SYSCTL_IN(req
, &flags
, sizeof(flags
));
3914 sp
= &mp
->mnt_vfsstat
;
3915 if (((flags
& MNT_NOWAIT
) == 0 || (flags
& (MNT_WAIT
| MNT_DWAIT
))) &&
3916 (error
= vfs_update_vfsstat(mp
, ctx
, VFS_USER_EVENT
))) {
3920 struct user64_statfs sfs
;
3921 bzero(&sfs
, sizeof(sfs
));
3922 sfs
.f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
3923 sfs
.f_type
= mp
->mnt_vtable
->vfc_typenum
;
3924 sfs
.f_bsize
= (user64_long_t
)sp
->f_bsize
;
3925 sfs
.f_iosize
= (user64_long_t
)sp
->f_iosize
;
3926 sfs
.f_blocks
= (user64_long_t
)sp
->f_blocks
;
3927 sfs
.f_bfree
= (user64_long_t
)sp
->f_bfree
;
3928 sfs
.f_bavail
= (user64_long_t
)sp
->f_bavail
;
3929 sfs
.f_files
= (user64_long_t
)sp
->f_files
;
3930 sfs
.f_ffree
= (user64_long_t
)sp
->f_ffree
;
3931 sfs
.f_fsid
= sp
->f_fsid
;
3932 sfs
.f_owner
= sp
->f_owner
;
3933 #ifdef CONFIG_NFS_CLIENT
3934 if (mp
->mnt_kern_flag
& MNTK_TYPENAME_OVERRIDE
) {
3935 strlcpy(&sfs
.f_fstypename
[0], &mp
->fstypename_override
[0], MFSNAMELEN
);
3937 #endif /* CONFIG_NFS_CLIENT */
3939 strlcpy(sfs
.f_fstypename
, sp
->f_fstypename
, MFSNAMELEN
);
3941 strlcpy(sfs
.f_mntonname
, sp
->f_mntonname
, MNAMELEN
);
3942 strlcpy(sfs
.f_mntfromname
, sp
->f_mntfromname
, MNAMELEN
);
3944 error
= SYSCTL_OUT(req
, &sfs
, sizeof(sfs
));
3946 struct user32_statfs sfs
;
3947 bzero(&sfs
, sizeof(sfs
));
3948 sfs
.f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
3949 sfs
.f_type
= mp
->mnt_vtable
->vfc_typenum
;
3952 * It's possible for there to be more than 2^^31 blocks in the filesystem, so we
3953 * have to fudge the numbers here in that case. We inflate the blocksize in order
3954 * to reflect the filesystem size as best we can.
3956 if (sp
->f_blocks
> INT_MAX
) {
3960 * Work out how far we have to shift the block count down to make it fit.
3961 * Note that it's possible to have to shift so far that the resulting
3962 * blocksize would be unreportably large. At that point, we will clip
3963 * any values that don't fit.
3965 * For safety's sake, we also ensure that f_iosize is never reported as
3966 * being smaller than f_bsize.
3968 for (shift
= 0; shift
< 32; shift
++) {
3969 if ((sp
->f_blocks
>> shift
) <= INT_MAX
) {
3972 if ((((long long)sp
->f_bsize
) << (shift
+ 1)) > INT_MAX
) {
3976 #define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s)))
3977 sfs
.f_blocks
= (user32_long_t
)__SHIFT_OR_CLIP(sp
->f_blocks
, shift
);
3978 sfs
.f_bfree
= (user32_long_t
)__SHIFT_OR_CLIP(sp
->f_bfree
, shift
);
3979 sfs
.f_bavail
= (user32_long_t
)__SHIFT_OR_CLIP(sp
->f_bavail
, shift
);
3980 #undef __SHIFT_OR_CLIP
3981 sfs
.f_bsize
= (user32_long_t
)(sp
->f_bsize
<< shift
);
3982 sfs
.f_iosize
= lmax(sp
->f_iosize
, sp
->f_bsize
);
3984 sfs
.f_bsize
= (user32_long_t
)sp
->f_bsize
;
3985 sfs
.f_iosize
= (user32_long_t
)sp
->f_iosize
;
3986 sfs
.f_blocks
= (user32_long_t
)sp
->f_blocks
;
3987 sfs
.f_bfree
= (user32_long_t
)sp
->f_bfree
;
3988 sfs
.f_bavail
= (user32_long_t
)sp
->f_bavail
;
3990 sfs
.f_files
= (user32_long_t
)sp
->f_files
;
3991 sfs
.f_ffree
= (user32_long_t
)sp
->f_ffree
;
3992 sfs
.f_fsid
= sp
->f_fsid
;
3993 sfs
.f_owner
= sp
->f_owner
;
3995 #ifdef CONFIG_NFS_CLIENT
3996 if (mp
->mnt_kern_flag
& MNTK_TYPENAME_OVERRIDE
) {
3997 strlcpy(&sfs
.f_fstypename
[0], &mp
->fstypename_override
[0], MFSNAMELEN
);
3999 #endif /* CONFIG_NFS_CLIENT */
4001 strlcpy(sfs
.f_fstypename
, sp
->f_fstypename
, MFSNAMELEN
);
4003 strlcpy(sfs
.f_mntonname
, sp
->f_mntonname
, MNAMELEN
);
4004 strlcpy(sfs
.f_mntfromname
, sp
->f_mntfromname
, MNAMELEN
);
4006 error
= SYSCTL_OUT(req
, &sfs
, sizeof(sfs
));
4020 static int filt_fsattach(struct knote
*kn
, struct kevent_qos_s
*kev
);
4021 static void filt_fsdetach(struct knote
*kn
);
4022 static int filt_fsevent(struct knote
*kn
, long hint
);
4023 static int filt_fstouch(struct knote
*kn
, struct kevent_qos_s
*kev
);
4024 static int filt_fsprocess(struct knote
*kn
, struct kevent_qos_s
*kev
);
4025 SECURITY_READ_ONLY_EARLY(struct filterops
) fs_filtops
= {
4026 .f_attach
= filt_fsattach
,
4027 .f_detach
= filt_fsdetach
,
4028 .f_event
= filt_fsevent
,
4029 .f_touch
= filt_fstouch
,
4030 .f_process
= filt_fsprocess
,
4034 filt_fsattach(struct knote
*kn
, __unused
struct kevent_qos_s
*kev
)
4036 kn
->kn_flags
|= EV_CLEAR
; /* automatic */
4037 kn
->kn_sdata
= 0; /* incoming data is ignored */
4039 lck_mtx_lock(fs_klist_lock
);
4040 KNOTE_ATTACH(&fs_klist
, kn
);
4041 lck_mtx_unlock(fs_klist_lock
);
4044 * filter only sees future events,
4045 * so it can't be fired already.
4051 filt_fsdetach(struct knote
*kn
)
4053 lck_mtx_lock(fs_klist_lock
);
4054 KNOTE_DETACH(&fs_klist
, kn
);
4055 lck_mtx_unlock(fs_klist_lock
);
4059 filt_fsevent(struct knote
*kn
, long hint
)
4062 * Backwards compatibility:
4063 * Other filters would do nothing if kn->kn_sfflags == 0
4066 if ((kn
->kn_sfflags
== 0) || (kn
->kn_sfflags
& hint
)) {
4067 kn
->kn_fflags
|= hint
;
4070 return kn
->kn_fflags
!= 0;
4074 filt_fstouch(struct knote
*kn
, struct kevent_qos_s
*kev
)
4078 lck_mtx_lock(fs_klist_lock
);
4080 kn
->kn_sfflags
= kev
->fflags
;
4083 * the above filter function sets bits even if nobody is looking for them.
4084 * Just preserve those bits even in the new mask is more selective
4087 * For compatibility with previous implementations, we leave kn_fflags
4088 * as they were before.
4090 //if (kn->kn_sfflags)
4091 // kn->kn_fflags &= kn->kn_sfflags;
4092 res
= (kn
->kn_fflags
!= 0);
4094 lck_mtx_unlock(fs_klist_lock
);
4100 filt_fsprocess(struct knote
*kn
, struct kevent_qos_s
*kev
)
4104 lck_mtx_lock(fs_klist_lock
);
4105 if (kn
->kn_fflags
) {
4106 knote_fill_kevent(kn
, kev
, 0);
4109 lck_mtx_unlock(fs_klist_lock
);
4114 sysctl_vfs_noremotehang(__unused
struct sysctl_oid
*oidp
,
4115 __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
4121 /* We need a pid. */
4122 if (req
->newptr
== USER_ADDR_NULL
) {
4126 error
= SYSCTL_IN(req
, &pid
, sizeof(pid
));
4131 p
= proc_find(pid
< 0 ? -pid
: pid
);
4137 * Fetching the value is ok, but we only fetch if the old
4140 if (req
->oldptr
!= USER_ADDR_NULL
) {
4141 out
= !((p
->p_flag
& P_NOREMOTEHANG
) == 0);
4143 error
= SYSCTL_OUT(req
, &out
, sizeof(out
));
4147 /* cansignal offers us enough security. */
4148 if (p
!= req
->p
&& proc_suser(req
->p
) != 0) {
4154 OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG
), &p
->p_flag
);
4156 OSBitOrAtomic(P_NOREMOTEHANG
, &p
->p_flag
);
4164 sysctl_vfs_generic_conf SYSCTL_HANDLER_ARGS
4167 struct vfstable
*vfsp
;
4168 struct vfsconf vfsc
= {};
4176 } else if (namelen
> 1) {
4181 for (vfsp
= vfsconf
; vfsp
; vfsp
= vfsp
->vfc_next
) {
4182 if (vfsp
->vfc_typenum
== name
[0]) {
4188 mount_list_unlock();
4192 vfsc
.vfc_reserved1
= 0;
4193 bcopy(vfsp
->vfc_name
, vfsc
.vfc_name
, sizeof(vfsc
.vfc_name
));
4194 vfsc
.vfc_typenum
= vfsp
->vfc_typenum
;
4195 vfsc
.vfc_refcount
= vfsp
->vfc_refcount
;
4196 vfsc
.vfc_flags
= vfsp
->vfc_flags
;
4197 vfsc
.vfc_reserved2
= 0;
4198 vfsc
.vfc_reserved3
= 0;
4200 mount_list_unlock();
4201 return SYSCTL_OUT(req
, &vfsc
, sizeof(struct vfsconf
));
4204 /* the vfs.generic. branch. */
4205 SYSCTL_NODE(_vfs
, VFS_GENERIC
, generic
, CTLFLAG_RW
| CTLFLAG_LOCKED
, NULL
, "vfs generic hinge");
4206 /* retreive a list of mounted filesystem fsid_t */
4207 SYSCTL_PROC(_vfs_generic
, OID_AUTO
, vfsidlist
,
4208 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
4209 NULL
, 0, sysctl_vfs_vfslist
, "S,fsid", "List of mounted filesystem ids");
4210 /* perform operations on filesystem via fsid_t */
4211 SYSCTL_NODE(_vfs_generic
, OID_AUTO
, ctlbyfsid
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
4212 sysctl_vfs_ctlbyfsid
, "ctlbyfsid");
4213 SYSCTL_PROC(_vfs_generic
, OID_AUTO
, noremotehang
, CTLFLAG_RW
| CTLFLAG_ANYBODY
,
4214 NULL
, 0, sysctl_vfs_noremotehang
, "I", "noremotehang");
4215 SYSCTL_INT(_vfs_generic
, VFS_MAXTYPENUM
, maxtypenum
,
4216 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
4217 &maxvfstypenum
, 0, "");
4218 SYSCTL_INT(_vfs_generic
, OID_AUTO
, sync_timeout
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &sync_timeout_seconds
, 0, "");
4219 SYSCTL_NODE(_vfs_generic
, VFS_CONF
, conf
,
4220 CTLFLAG_RD
| CTLFLAG_LOCKED
,
4221 sysctl_vfs_generic_conf
, "");
4222 #if DEVELOPMENT || DEBUG
4223 SYSCTL_INT(_vfs_generic
, OID_AUTO
, print_busy_vnodes
,
4224 CTLTYPE_INT
| CTLFLAG_RW
,
4225 &print_busy_vnodes
, 0,
4226 "VFS log busy vnodes blocking unmount");
4229 /* Indicate that the root file system unmounted cleanly */
4230 static int vfs_root_unmounted_cleanly
= 0;
4231 SYSCTL_INT(_vfs_generic
, OID_AUTO
, root_unmounted_cleanly
, CTLFLAG_RD
, &vfs_root_unmounted_cleanly
, 0, "Root filesystem was unmounted cleanly");
4234 vfs_set_root_unmounted_cleanly(void)
4236 vfs_root_unmounted_cleanly
= 1;
4240 * Print vnode state.
4243 vn_print_state(struct vnode
*vp
, const char *fmt
, ...)
4246 char perm_str
[] = "(VM_KERNEL_ADDRPERM pointer)";
4247 char fs_name
[MFSNAMELEN
];
4252 printf("vp 0x%0llx %s: ", (uint64_t)VM_KERNEL_ADDRPERM(vp
), perm_str
);
4253 printf("tag %d, type %d\n", vp
->v_tag
, vp
->v_type
);
4255 printf(" iocount %d, usecount %d, kusecount %d references %d\n",
4256 vp
->v_iocount
, vp
->v_usecount
, vp
->v_kusecount
, vp
->v_references
);
4257 printf(" writecount %d, numoutput %d\n", vp
->v_writecount
,
4260 printf(" flag 0x%x, lflag 0x%x, listflag 0x%x\n", vp
->v_flag
,
4261 vp
->v_lflag
, vp
->v_listflag
);
4263 if (vp
->v_mount
== NULL
|| vp
->v_mount
== dead_mountp
) {
4264 strlcpy(fs_name
, "deadfs", MFSNAMELEN
);
4266 vfs_name(vp
->v_mount
, fs_name
);
4269 printf(" v_data 0x%0llx %s\n",
4270 (vp
->v_data
? (uint64_t)VM_KERNEL_ADDRPERM(vp
->v_data
) : 0),
4272 printf(" v_mount 0x%0llx %s vfs_name %s\n",
4273 (vp
->v_mount
? (uint64_t)VM_KERNEL_ADDRPERM(vp
->v_mount
) : 0),
4277 long num_reusedvnodes
= 0;
4281 process_vp(vnode_t vp
, int want_vp
, int *deferred
)
4289 vnode_list_remove_locked(vp
);
4291 vnode_list_unlock();
4293 vnode_lock_spin(vp
);
4296 * We could wait for the vnode_lock after removing the vp from the freelist
4297 * and the vid is bumped only at the very end of reclaim. So it is possible
4298 * that we are looking at a vnode that is being terminated. If so skip it.
4300 if ((vpid
!= vp
->v_id
) || (vp
->v_usecount
!= 0) || (vp
->v_iocount
!= 0) ||
4301 VONLIST(vp
) || (vp
->v_lflag
& VL_TERMINATE
)) {
4303 * we lost the race between dropping the list lock
4304 * and picking up the vnode_lock... someone else
4305 * used this vnode and it is now in a new state
4311 if ((vp
->v_lflag
& (VL_NEEDINACTIVE
| VL_MARKTERM
)) == VL_NEEDINACTIVE
) {
4313 * we did a vnode_rele_ext that asked for
4314 * us not to reenter the filesystem during
4315 * the release even though VL_NEEDINACTIVE was
4316 * set... we'll do it here by doing a
4317 * vnode_get/vnode_put
4319 * pick up an iocount so that we can call
4320 * vnode_put and drive the VNOP_INACTIVE...
4321 * vnode_put will either leave us off
4322 * the freelist if a new ref comes in,
4323 * or put us back on the end of the freelist
4324 * or recycle us if we were marked for termination...
4325 * so we'll just go grab a new candidate
4331 vnode_put_locked(vp
);
4337 * Checks for anyone racing us for recycle
4339 if (vp
->v_type
!= VBAD
) {
4340 if (want_vp
&& (vnode_on_reliable_media(vp
) == FALSE
|| (vp
->v_flag
& VISDIRTY
))) {
4341 vnode_async_list_add(vp
);
4348 if (vp
->v_lflag
& VL_DEAD
) {
4349 panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp
);
4352 vnode_lock_convert(vp
);
4353 (void)vnode_reclaim_internal(vp
, 1, want_vp
, 0);
4356 if ((VONLIST(vp
))) {
4357 panic("new_vnode(%p): vp on list", vp
);
4359 if (vp
->v_usecount
|| vp
->v_iocount
|| vp
->v_kusecount
||
4360 (vp
->v_lflag
& (VNAMED_UBC
| VNAMED_MOUNT
| VNAMED_FSHASH
))) {
4361 panic("new_vnode(%p): free vnode still referenced", vp
);
4363 if ((vp
->v_mntvnodes
.tqe_prev
!= 0) && (vp
->v_mntvnodes
.tqe_next
!= 0)) {
4364 panic("new_vnode(%p): vnode seems to be on mount list", vp
);
4366 if (!LIST_EMPTY(&vp
->v_nclinks
) || !TAILQ_EMPTY(&vp
->v_ncchildren
)) {
4367 panic("new_vnode(%p): vnode still hooked into the name cache", vp
);
4377 __attribute__((noreturn
))
4379 async_work_continue(void)
4381 struct async_work_lst
*q
;
4385 q
= &vnode_async_work_list
;
4390 if (TAILQ_EMPTY(q
)) {
4391 assert_wait(q
, (THREAD_UNINT
));
4393 vnode_list_unlock();
4395 thread_block((thread_continue_t
)async_work_continue
);
4399 async_work_handled
++;
4401 vp
= TAILQ_FIRST(q
);
4403 vp
= process_vp(vp
, 0, &deferred
);
4406 panic("found VBAD vp (%p) on async queue", vp
);
4413 new_vnode(vnode_t
*vpp
)
4416 uint32_t retries
= 0, max_retries
= 100; /* retry incase of tablefull */
4417 int force_alloc
= 0, walk_count
= 0;
4418 boolean_t need_reliable_vp
= FALSE
;
4420 struct timeval initial_tv
;
4421 struct timeval current_tv
;
4422 proc_t curproc
= current_proc();
4424 initial_tv
.tv_sec
= 0;
4430 if (need_reliable_vp
== TRUE
) {
4431 async_work_timed_out
++;
4434 if ((numvnodes
- deadvnodes
) < desiredvnodes
|| force_alloc
) {
4437 if (!TAILQ_EMPTY(&vnode_dead_list
)) {
4439 * Can always reuse a dead one
4441 vp
= TAILQ_FIRST(&vnode_dead_list
);
4445 * no dead vnodes available... if we're under
4446 * the limit, we'll create a new vnode
4449 vnode_list_unlock();
4451 MALLOC_ZONE(vp
, struct vnode
*, sizeof(*vp
), M_VNODE
, M_WAITOK
);
4452 bzero((char *)vp
, sizeof(*vp
));
4453 VLISTNONE(vp
); /* avoid double queue removal */
4454 lck_mtx_init(&vp
->v_lock
, vnode_lck_grp
, vnode_lck_attr
);
4456 TAILQ_INIT(&vp
->v_ncchildren
);
4458 klist_init(&vp
->v_knotes
);
4460 vp
->v_id
= ts
.tv_nsec
;
4461 vp
->v_flag
= VSTANDARD
;
4464 if (mac_vnode_label_init_needed(vp
)) {
4465 mac_vnode_label_init(vp
);
4472 microuptime(¤t_tv
);
4474 #define MAX_WALK_COUNT 1000
4476 if (!TAILQ_EMPTY(&vnode_rage_list
) &&
4477 (ragevnodes
>= rage_limit
||
4478 (current_tv
.tv_sec
- rage_tv
.tv_sec
) >= RAGE_TIME_LIMIT
)) {
4479 TAILQ_FOREACH(vp
, &vnode_rage_list
, v_freelist
) {
4480 if (!(vp
->v_listflag
& VLIST_RAGE
)) {
4481 panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp
);
4484 // if we're a dependency-capable process, skip vnodes that can
4485 // cause recycling deadlocks. (i.e. this process is diskimages
4486 // helper and the vnode is in a disk image). Querying the
4487 // mnt_kern_flag for the mount's virtual device status
4488 // is safer than checking the mnt_dependent_process, which
4489 // may not be updated if there are multiple devnode layers
4490 // in between the disk image and the final consumer.
4492 if ((curproc
->p_flag
& P_DEPENDENCY_CAPABLE
) == 0 || vp
->v_mount
== NULL
||
4493 (vp
->v_mount
->mnt_kern_flag
& MNTK_VIRTUALDEV
) == 0) {
4495 * if need_reliable_vp == TRUE, then we've already sent one or more
4496 * non-reliable vnodes to the async thread for processing and timed
4497 * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT
4498 * mechanism to first scan for a reliable vnode before forcing
4499 * a new vnode to be created
4501 if (need_reliable_vp
== FALSE
|| vnode_on_reliable_media(vp
) == TRUE
) {
4506 // don't iterate more than MAX_WALK_COUNT vnodes to
4507 // avoid keeping the vnode list lock held for too long.
4509 if (walk_count
++ > MAX_WALK_COUNT
) {
4516 if (vp
== NULL
&& !TAILQ_EMPTY(&vnode_free_list
)) {
4518 * Pick the first vp for possible reuse
4521 TAILQ_FOREACH(vp
, &vnode_free_list
, v_freelist
) {
4522 // if we're a dependency-capable process, skip vnodes that can
4523 // cause recycling deadlocks. (i.e. this process is diskimages
4524 // helper and the vnode is in a disk image). Querying the
4525 // mnt_kern_flag for the mount's virtual device status
4526 // is safer than checking the mnt_dependent_process, which
4527 // may not be updated if there are multiple devnode layers
4528 // in between the disk image and the final consumer.
4530 if ((curproc
->p_flag
& P_DEPENDENCY_CAPABLE
) == 0 || vp
->v_mount
== NULL
||
4531 (vp
->v_mount
->mnt_kern_flag
& MNTK_VIRTUALDEV
) == 0) {
4533 * if need_reliable_vp == TRUE, then we've already sent one or more
4534 * non-reliable vnodes to the async thread for processing and timed
4535 * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT
4536 * mechanism to first scan for a reliable vnode before forcing
4537 * a new vnode to be created
4539 if (need_reliable_vp
== FALSE
|| vnode_on_reliable_media(vp
) == TRUE
) {
4544 // don't iterate more than MAX_WALK_COUNT vnodes to
4545 // avoid keeping the vnode list lock held for too long.
4547 if (walk_count
++ > MAX_WALK_COUNT
) {
4555 // if we don't have a vnode and the walk_count is >= MAX_WALK_COUNT
4556 // then we're trying to create a vnode on behalf of a
4557 // process like diskimages-helper that has file systems
4558 // mounted on top of itself (and thus we can't reclaim
4559 // vnodes in the file systems on top of us). if we can't
4560 // find a vnode to reclaim then we'll just have to force
4563 if (vp
== NULL
&& walk_count
>= MAX_WALK_COUNT
) {
4565 vnode_list_unlock();
4571 * we've reached the system imposed maximum number of vnodes
4572 * but there isn't a single one available
4573 * wait a bit and then retry... if we can't get a vnode
4574 * after our target number of retries, than log a complaint
4576 if (++retries
<= max_retries
) {
4577 vnode_list_unlock();
4578 delay_for_interval(1, 1000 * 1000);
4582 vnode_list_unlock();
4584 log(LOG_EMERG
, "%d desired, %d numvnodes, "
4585 "%d free, %d dead, %d async, %d rage\n",
4586 desiredvnodes
, numvnodes
, freevnodes
, deadvnodes
, async_work_vnodes
, ragevnodes
);
4589 #if DEVELOPMENT || DEBUG
4590 if (bootarg_no_vnode_jetsam
) {
4591 panic("vnode table is full\n");
4593 #endif /* DEVELOPMENT || DEBUG */
4596 * Running out of vnodes tends to make a system unusable. Start killing
4597 * processes that jetsam knows are killable.
4599 if (memorystatus_kill_on_vnode_limit() == FALSE
) {
4601 * If jetsam can't find any more processes to kill and there
4602 * still aren't any free vnodes, panic. Hopefully we'll get a
4603 * panic log to tell us why we ran out.
4605 panic("vnode table is full\n");
4609 * Now that we've killed someone, wait a bit and continue looking
4610 * (with fewer retries before trying another kill).
4612 delay_for_interval(3, 1000 * 1000);
4622 if ((vp
= process_vp(vp
, 1, &deferred
)) == NULLVP
) {
4625 struct timeval elapsed_tv
;
4627 if (initial_tv
.tv_sec
== 0) {
4628 microuptime(&initial_tv
);
4633 dead_vnode_waited
++;
4634 dead_vnode_wanted
++;
4637 * note that we're only going to explicitly wait 10ms
4638 * for a dead vnode to become available, since even if one
4639 * isn't available, a reliable vnode might now be available
4640 * at the head of the VRAGE or free lists... if so, we
4641 * can satisfy the new_vnode request with less latency then waiting
4642 * for the full 100ms duration we're ultimately willing to tolerate
4644 assert_wait_timeout((caddr_t
)&dead_vnode_wanted
, (THREAD_INTERRUPTIBLE
), 10000, NSEC_PER_USEC
);
4646 vnode_list_unlock();
4648 thread_block(THREAD_CONTINUE_NULL
);
4650 microuptime(&elapsed_tv
);
4652 timevalsub(&elapsed_tv
, &initial_tv
);
4653 elapsed_msecs
= elapsed_tv
.tv_sec
* 1000 + elapsed_tv
.tv_usec
/ 1000;
4655 if (elapsed_msecs
>= 100) {
4657 * we've waited long enough... 100ms is
4658 * somewhat arbitrary for this case, but the
4659 * normal worst case latency used for UI
4660 * interaction is 100ms, so I've chosen to
4663 * setting need_reliable_vp to TRUE
4664 * forces us to find a reliable vnode
4665 * that we can process synchronously, or
4666 * to create a new one if the scan for
4667 * a reliable one hits the scan limit
4669 need_reliable_vp
= TRUE
;
4674 OSAddAtomicLong(1, &num_reusedvnodes
);
4679 * We should never see VL_LABELWAIT or VL_LABEL here.
4680 * as those operations hold a reference.
4682 assert((vp
->v_lflag
& VL_LABELWAIT
) != VL_LABELWAIT
);
4683 assert((vp
->v_lflag
& VL_LABEL
) != VL_LABEL
);
4684 if (vp
->v_lflag
& VL_LABELED
|| vp
->v_label
!= NULL
) {
4685 vnode_lock_convert(vp
);
4686 mac_vnode_label_recycle(vp
);
4687 } else if (mac_vnode_label_init_needed(vp
)) {
4688 vnode_lock_convert(vp
);
4689 mac_vnode_label_init(vp
);
4696 vp
->v_writecount
= 0;
4697 vp
->v_references
= 0;
4698 vp
->v_iterblkflags
= 0;
4699 vp
->v_flag
= VSTANDARD
;
4700 /* vbad vnodes can point to dead_mountp */
4702 vp
->v_defer_reclaimlist
= (vnode_t
)0;
4713 vnode_lock(vnode_t vp
)
4715 lck_mtx_lock(&vp
->v_lock
);
4719 vnode_lock_spin(vnode_t vp
)
4721 lck_mtx_lock_spin(&vp
->v_lock
);
4725 vnode_unlock(vnode_t vp
)
4727 lck_mtx_unlock(&vp
->v_lock
);
4733 vnode_get(struct vnode
*vp
)
4737 vnode_lock_spin(vp
);
4738 retval
= vnode_get_locked(vp
);
4745 vnode_get_locked(struct vnode
*vp
)
4748 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
4750 if ((vp
->v_iocount
== 0) && (vp
->v_lflag
& (VL_TERMINATE
| VL_DEAD
))) {
4754 if (os_add_overflow(vp
->v_iocount
, 1, &vp
->v_iocount
)) {
4755 panic("v_iocount overflow");
4765 * vnode_getwithvid() cuts in line in front of a vnode drain (that is,
4766 * while the vnode is draining, but at no point after that) to prevent
4767 * deadlocks when getting vnodes from filesystem hashes while holding
4768 * resources that may prevent other iocounts from being released.
4771 vnode_getwithvid(vnode_t vp
, uint32_t vid
)
4773 return vget_internal(vp
, vid
, (VNODE_NODEAD
| VNODE_WITHID
| VNODE_DRAINO
));
4777 * vnode_getwithvid_drainok() is like vnode_getwithvid(), but *does* block behind a vnode
4778 * drain; it exists for use in the VFS name cache, where we really do want to block behind
4779 * vnode drain to prevent holding off an unmount.
4782 vnode_getwithvid_drainok(vnode_t vp
, uint32_t vid
)
4784 return vget_internal(vp
, vid
, (VNODE_NODEAD
| VNODE_WITHID
));
4788 vnode_getwithref(vnode_t vp
)
4790 return vget_internal(vp
, 0, 0);
4794 __private_extern__
int
4795 vnode_getalways(vnode_t vp
)
4797 return vget_internal(vp
, 0, VNODE_ALWAYS
);
4801 vnode_put(vnode_t vp
)
4805 vnode_lock_spin(vp
);
4806 retval
= vnode_put_locked(vp
);
4813 vn_set_dead(vnode_t vp
)
4816 vp
->v_op
= dead_vnodeop_p
;
4820 vp
->v_lflag
|= VL_DEAD
;
4824 vnode_put_locked(vnode_t vp
)
4826 vfs_context_t ctx
= vfs_context_current(); /* hoist outside loop */
4829 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
4832 if (vp
->v_iocount
< 1) {
4833 panic("vnode_put(%p): iocount < 1", vp
);
4836 if ((vp
->v_usecount
> 0) || (vp
->v_iocount
> 1)) {
4837 vnode_dropiocount(vp
);
4840 if ((vp
->v_lflag
& (VL_DEAD
| VL_NEEDINACTIVE
)) == VL_NEEDINACTIVE
) {
4841 vp
->v_lflag
&= ~VL_NEEDINACTIVE
;
4844 VNOP_INACTIVE(vp
, ctx
);
4846 vnode_lock_spin(vp
);
4848 * because we had to drop the vnode lock before calling
4849 * VNOP_INACTIVE, the state of this vnode may have changed...
4850 * we may pick up both VL_MARTERM and either
4851 * an iocount or a usecount while in the VNOP_INACTIVE call
4852 * we don't want to call vnode_reclaim_internal on a vnode
4853 * that has active references on it... so loop back around
4854 * and reevaluate the state
4858 vp
->v_lflag
&= ~VL_NEEDINACTIVE
;
4860 if ((vp
->v_lflag
& (VL_MARKTERM
| VL_TERMINATE
| VL_DEAD
)) == VL_MARKTERM
) {
4861 vnode_lock_convert(vp
);
4862 vnode_reclaim_internal(vp
, 1, 1, 0);
4864 vnode_dropiocount(vp
);
4870 /* is vnode_t in use by others? */
4872 vnode_isinuse(vnode_t vp
, int refcnt
)
4874 return vnode_isinuse_locked(vp
, refcnt
, 0);
4878 vnode_usecount(vnode_t vp
)
4880 return vp
->v_usecount
;
4884 vnode_iocount(vnode_t vp
)
4886 return vp
->v_iocount
;
4890 vnode_isinuse_locked(vnode_t vp
, int refcnt
, int locked
)
4895 vnode_lock_spin(vp
);
4897 if ((vp
->v_type
!= VREG
) && ((vp
->v_usecount
- vp
->v_kusecount
) > refcnt
)) {
4901 if (vp
->v_type
== VREG
) {
4902 retval
= ubc_isinuse_locked(vp
, refcnt
, 1);
4913 /* resume vnode_t */
4915 vnode_resume(vnode_t vp
)
4917 if ((vp
->v_lflag
& VL_SUSPENDED
) && vp
->v_owner
== current_thread()) {
4918 vnode_lock_spin(vp
);
4919 vp
->v_lflag
&= ~VL_SUSPENDED
;
4923 wakeup(&vp
->v_iocount
);
4929 * Please do not use on more than one vnode at a time as it may
4931 * xxx should we explicity prevent this from happening?
4935 vnode_suspend(vnode_t vp
)
4937 if (vp
->v_lflag
& VL_SUSPENDED
) {
4941 vnode_lock_spin(vp
);
4944 * xxx is this sufficient to check if a vnode_drain is
4948 if (vp
->v_owner
== NULL
) {
4949 vp
->v_lflag
|= VL_SUSPENDED
;
4950 vp
->v_owner
= current_thread();
4958 * Release any blocked locking requests on the vnode.
4959 * Used for forced-unmounts.
4961 * XXX What about network filesystems?
4964 vnode_abort_advlocks(vnode_t vp
)
4966 if (vp
->v_flag
& VLOCKLOCAL
) {
4967 lf_abort_advlocks(vp
);
4973 vnode_drain(vnode_t vp
)
4975 if (vp
->v_lflag
& VL_DRAIN
) {
4976 panic("vnode_drain: recursive drain");
4979 vp
->v_lflag
|= VL_DRAIN
;
4980 vp
->v_owner
= current_thread();
4982 while (vp
->v_iocount
> 1) {
4983 if (bootarg_no_vnode_drain
) {
4984 struct timespec ts
= {.tv_sec
= 10, .tv_nsec
= 0};
4987 if (vfs_unmountall_started
) {
4991 error
= msleep(&vp
->v_iocount
, &vp
->v_lock
, PVFS
, "vnode_drain_with_timeout", &ts
);
4993 /* Try to deal with leaked iocounts under bootarg and shutting down */
4994 if (vp
->v_iocount
> 1 && error
== EWOULDBLOCK
&&
4995 ts
.tv_sec
== 1 && vp
->v_numoutput
== 0) {
5000 msleep(&vp
->v_iocount
, &vp
->v_lock
, PVFS
, "vnode_drain", NULL
);
5004 vp
->v_lflag
&= ~VL_DRAIN
;
5011 * if the number of recent references via vnode_getwithvid or vnode_getwithref
5012 * exceeds this threshold, than 'UN-AGE' the vnode by removing it from
5013 * the LRU list if it's currently on it... once the iocount and usecount both drop
5014 * to 0, it will get put back on the end of the list, effectively making it younger
5015 * this allows us to keep actively referenced vnodes in the list without having
5016 * to constantly remove and add to the list each time a vnode w/o a usecount is
5017 * referenced which costs us taking and dropping a global lock twice.
5018 * However, if the vnode is marked DIRTY, we want to pull it out much earlier
5020 #define UNAGE_THRESHHOLD 25
5021 #define UNAGE_DIRTYTHRESHHOLD 6
5024 vnode_getiocount(vnode_t vp
, unsigned int vid
, int vflags
)
5026 int nodead
= vflags
& VNODE_NODEAD
;
5027 int nosusp
= vflags
& VNODE_NOSUSPEND
;
5028 int always
= vflags
& VNODE_ALWAYS
;
5029 int beatdrain
= vflags
& VNODE_DRAINO
;
5030 int withvid
= vflags
& VNODE_WITHID
;
5036 * if it is a dead vnode with deadfs
5038 if (nodead
&& (vp
->v_lflag
& VL_DEAD
) && ((vp
->v_type
== VBAD
) || (vp
->v_data
== 0))) {
5042 * will return VL_DEAD ones
5044 if ((vp
->v_lflag
& (VL_SUSPENDED
| VL_DRAIN
| VL_TERMINATE
)) == 0) {
5048 * if suspended vnodes are to be failed
5050 if (nosusp
&& (vp
->v_lflag
& VL_SUSPENDED
)) {
5054 * if you are the owner of drain/suspend/termination , can acquire iocount
5055 * check for VL_TERMINATE; it does not set owner
5057 if ((vp
->v_lflag
& (VL_DRAIN
| VL_SUSPENDED
| VL_TERMINATE
)) &&
5058 (vp
->v_owner
== current_thread())) {
5067 * If this vnode is getting drained, there are some cases where
5068 * we can't block or, in case of tty vnodes, want to be
5071 if (vp
->v_lflag
& VL_DRAIN
) {
5073 * In some situations, we want to get an iocount
5074 * even if the vnode is draining to prevent deadlock,
5075 * e.g. if we're in the filesystem, potentially holding
5076 * resources that could prevent other iocounts from
5083 * Don't block if the vnode's mount point is unmounting as
5084 * we may be the thread the unmount is itself waiting on
5085 * Only callers who pass in vids (at this point, we've already
5086 * handled nosusp and nodead) are expecting error returns
5087 * from this function, so only we can only return errors for
5088 * those. ENODEV is intended to inform callers that the call
5089 * failed because an unmount is in progress.
5091 if (withvid
&& (vp
->v_mount
) && vfs_isunmount(vp
->v_mount
)) {
5095 if (vnode_istty(vp
)) {
5100 vnode_lock_convert(vp
);
5102 if (vp
->v_lflag
& VL_TERMINATE
) {
5105 vp
->v_lflag
|= VL_TERMWANT
;
5107 error
= msleep(&vp
->v_lflag
, &vp
->v_lock
,
5108 (PVFS
| sleepflg
), "vnode getiocount", NULL
);
5113 msleep(&vp
->v_iocount
, &vp
->v_lock
, PVFS
, "vnode_getiocount", NULL
);
5116 if (withvid
&& vid
!= vp
->v_id
) {
5119 if (++vp
->v_references
>= UNAGE_THRESHHOLD
||
5120 (vp
->v_flag
& VISDIRTY
&& vp
->v_references
>= UNAGE_DIRTYTHRESHHOLD
)) {
5121 vp
->v_references
= 0;
5122 vnode_list_remove(vp
);
5132 vnode_dropiocount(vnode_t vp
)
5134 if (vp
->v_iocount
< 1) {
5135 panic("vnode_dropiocount(%p): v_iocount < 1", vp
);
5142 if ((vp
->v_lflag
& (VL_DRAIN
| VL_SUSPENDED
)) && (vp
->v_iocount
<= 1)) {
5143 wakeup(&vp
->v_iocount
);
5149 vnode_reclaim(struct vnode
* vp
)
5151 vnode_reclaim_internal(vp
, 0, 0, 0);
5156 vnode_reclaim_internal(struct vnode
* vp
, int locked
, int reuse
, int flags
)
5164 if (vp
->v_lflag
& VL_TERMINATE
) {
5165 panic("vnode reclaim in progress");
5167 vp
->v_lflag
|= VL_TERMINATE
;
5169 vn_clearunionwait(vp
, 1);
5171 if (vnode_istty(vp
) && (flags
& REVOKEALL
) && vp
->v_usecount
&&
5172 (vp
->v_iocount
> 1)) {
5174 VNOP_IOCTL(vp
, TIOCREVOKE
, (caddr_t
)NULL
, 0, vfs_context_kernel());
5180 isfifo
= (vp
->v_type
== VFIFO
);
5182 if (vp
->v_type
!= VBAD
) {
5183 vgone(vp
, flags
); /* clean and reclaim the vnode */
5186 * give the vnode a new identity so that vnode_getwithvid will fail
5187 * on any stale cache accesses...
5188 * grab the list_lock so that if we're in "new_vnode"
5189 * behind the list_lock trying to steal this vnode, the v_id is stable...
5190 * once new_vnode drops the list_lock, it will block trying to take
5191 * the vnode lock until we release it... at that point it will evaluate
5192 * whether the v_vid has changed
5193 * also need to make sure that the vnode isn't on a list where "new_vnode"
5194 * can find it after the v_id has been bumped until we are completely done
5195 * with the vnode (i.e. putting it back on a list has to be the very last
5196 * thing we do to this vnode... many of the callers of vnode_reclaim_internal
5197 * are holding an io_count on the vnode... they need to drop the io_count
5198 * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
5199 * they are completely done with the vnode
5203 vnode_list_remove_locked(vp
);
5206 vnode_list_unlock();
5209 struct fifoinfo
* fip
;
5211 fip
= vp
->v_fifoinfo
;
5212 vp
->v_fifoinfo
= NULL
;
5218 panic("vnode_reclaim_internal: cleaned vnode isn't");
5220 if (vp
->v_numoutput
) {
5221 panic("vnode_reclaim_internal: clean vnode has pending I/O's");
5223 if (UBCINFOEXISTS(vp
)) {
5224 panic("vnode_reclaim_internal: ubcinfo not cleaned");
5227 panic("vnode_reclaim_internal: vparent not removed");
5230 panic("vnode_reclaim_internal: vname not removed");
5233 vp
->v_socket
= NULL
;
5235 vp
->v_lflag
&= ~VL_TERMINATE
;
5238 KNOTE(&vp
->v_knotes
, NOTE_REVOKE
);
5240 /* Make sure that when we reuse the vnode, no knotes left over */
5241 klist_init(&vp
->v_knotes
);
5243 if (vp
->v_lflag
& VL_TERMWANT
) {
5244 vp
->v_lflag
&= ~VL_TERMWANT
;
5245 wakeup(&vp
->v_lflag
);
5249 * make sure we get on the
5250 * dead list if appropriate
5260 vnode_create_internal(uint32_t flavor
, uint32_t size
, void *data
, vnode_t
*vpp
,
5270 struct componentname
*cnp
;
5271 struct vnode_fsparam
*param
= (struct vnode_fsparam
*)data
;
5273 struct vnode_trigger_param
*tinfo
= NULL
;
5284 /* Do quick sanity check on the parameters. */
5285 if ((param
== NULL
) || (param
->vnfs_vtype
== VBAD
)) {
5291 if ((flavor
== VNCREATE_TRIGGER
) && (size
== VNCREATE_TRIGGER_SIZE
)) {
5292 tinfo
= (struct vnode_trigger_param
*)data
;
5294 /* Validate trigger vnode input */
5295 if ((param
->vnfs_vtype
!= VDIR
) ||
5296 (tinfo
->vnt_resolve_func
== NULL
) ||
5297 (tinfo
->vnt_flags
& ~VNT_VALID_MASK
)) {
5301 /* Fall through a normal create (params will be the same) */
5302 flavor
= VNCREATE_FLAVOR
;
5306 if ((flavor
!= VNCREATE_FLAVOR
) || (size
!= VCREATESIZE
)) {
5312 if (!existing_vnode
) {
5313 if ((error
= new_vnode(&vp
))) {
5317 /* Make it so that it can be released by a vnode_put) */
5324 * A vnode obtained by vnode_create_empty has been passed to
5325 * vnode_initialize - Unset VL_DEAD set by vn_set_dead. After
5326 * this point, it is set back on any error.
5328 * N.B. vnode locking - We make the same assumptions as the
5329 * "unsplit" vnode_create did - i.e. it is safe to update the
5330 * vnode's fields without the vnode lock. This vnode has been
5331 * out and about with the filesystem and hopefully nothing
5332 * was done to the vnode between the vnode_create_empty and
5333 * now when it has come in through vnode_initialize.
5335 vp
->v_lflag
&= ~VL_DEAD
;
5338 dvp
= param
->vnfs_dvp
;
5339 cnp
= param
->vnfs_cnp
;
5341 vp
->v_op
= param
->vnfs_vops
;
5342 vp
->v_type
= param
->vnfs_vtype
;
5343 vp
->v_data
= param
->vnfs_fsnode
;
5345 if (param
->vnfs_markroot
) {
5346 vp
->v_flag
|= VROOT
;
5348 if (param
->vnfs_marksystem
) {
5349 vp
->v_flag
|= VSYSTEM
;
5351 if (vp
->v_type
== VREG
) {
5352 error
= ubc_info_init_withsize(vp
, param
->vnfs_filesize
);
5362 if (param
->vnfs_mp
->mnt_ioflags
& MNT_IOFLAGS_IOSCHED_SUPPORTED
) {
5363 memory_object_mark_io_tracking(vp
->v_ubcinfo
->ui_control
);
5370 #if CONFIG_FIRMLINKS
5371 vp
->v_fmlink
= NULLVP
;
5373 vp
->v_flag
&= ~VFMLINKTARGET
;
5377 * For trigger vnodes, attach trigger info to vnode
5379 if ((vp
->v_type
== VDIR
) && (tinfo
!= NULL
)) {
5381 * Note: has a side effect of incrementing trigger count on the
5382 * mount if successful, which we would need to undo on a
5383 * subsequent failure.
5388 error
= vnode_resolver_create(param
->vnfs_mp
, vp
, tinfo
, FALSE
);
5390 printf("vnode_create: vnode_resolver_create() err %d\n", error
);
5400 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
) {
5401 vp
->v_tag
= VT_DEVFS
; /* callers will reset if needed (bdevvp) */
5403 if ((nvp
= checkalias(vp
, param
->vnfs_rdev
))) {
5405 * if checkalias returns a vnode, it will be locked
5407 * first get rid of the unneeded vnode we acquired
5410 vp
->v_op
= spec_vnodeop_p
;
5412 vp
->v_lflag
= VL_DEAD
;
5418 * switch to aliased vnode and finish
5424 vp
->v_op
= param
->vnfs_vops
;
5425 vp
->v_type
= param
->vnfs_vtype
;
5426 vp
->v_data
= param
->vnfs_fsnode
;
5429 insmntque(vp
, param
->vnfs_mp
);
5434 if (VCHR
== vp
->v_type
) {
5435 u_int maj
= major(vp
->v_rdev
);
5437 if (maj
< (u_int
)nchrdev
&& cdevsw
[maj
].d_type
== D_TTY
) {
5438 vp
->v_flag
|= VISTTY
;
5443 if (vp
->v_type
== VFIFO
) {
5444 struct fifoinfo
*fip
;
5446 MALLOC(fip
, struct fifoinfo
*,
5447 sizeof(*fip
), M_TEMP
, M_WAITOK
);
5448 bzero(fip
, sizeof(struct fifoinfo
));
5449 vp
->v_fifoinfo
= fip
;
5451 /* The file systems must pass the address of the location where
5452 * they store the vnode pointer. When we add the vnode into the mount
5453 * list and name cache they become discoverable. So the file system node
5454 * must have the connection to vnode setup by then
5458 /* Add fs named reference. */
5459 if (param
->vnfs_flags
& VNFS_ADDFSREF
) {
5460 vp
->v_lflag
|= VNAMED_FSHASH
;
5462 if (param
->vnfs_mp
) {
5463 if (param
->vnfs_mp
->mnt_kern_flag
& MNTK_LOCK_LOCAL
) {
5464 vp
->v_flag
|= VLOCKLOCAL
;
5467 if ((vp
->v_freelist
.tqe_prev
!= (struct vnode
**)0xdeadb)) {
5468 panic("insmntque: vp on the free list\n");
5472 * enter in mount vnode list
5474 insmntque(vp
, param
->vnfs_mp
);
5477 if (dvp
&& vnode_ref(dvp
) == 0) {
5481 if (dvp
&& ((param
->vnfs_flags
& (VNFS_NOCACHE
| VNFS_CANTCACHE
)) == 0)) {
5483 * enter into name cache
5484 * we've got the info to enter it into the name cache now
5485 * cache_enter_create will pick up an extra reference on
5486 * the name entered into the string cache
5488 vp
->v_name
= cache_enter_create(dvp
, vp
, cnp
);
5490 vp
->v_name
= vfs_addname(cnp
->cn_nameptr
, cnp
->cn_namelen
, cnp
->cn_hash
, 0);
5493 if ((cnp
->cn_flags
& UNIONCREATED
) == UNIONCREATED
) {
5494 vp
->v_flag
|= VISUNION
;
5497 if ((param
->vnfs_flags
& VNFS_CANTCACHE
) == 0) {
5499 * this vnode is being created as cacheable in the name cache
5500 * this allows us to re-enter it in the cache
5502 vp
->v_flag
|= VNCACHEABLE
;
5504 ut
= get_bsdthread_info(current_thread());
5506 if ((current_proc()->p_lflag
& P_LRAGE_VNODES
) ||
5507 (ut
->uu_flag
& (UT_RAGE_VNODES
| UT_KERN_RAGE_VNODES
))) {
5509 * process has indicated that it wants any
5510 * vnodes created on its behalf to be rapidly
5511 * aged to reduce the impact on the cached set
5514 * if UT_KERN_RAGE_VNODES is set, then the
5515 * kernel internally wants vnodes to be rapidly
5516 * aged, even if the process hasn't requested
5519 vp
->v_flag
|= VRAGE
;
5522 #if CONFIG_SECLUDED_MEMORY
5523 switch (secluded_for_filecache
) {
5526 * secluded_for_filecache == 0:
5527 * + no file contents in secluded pool
5532 * secluded_for_filecache == 1:
5534 * + files from /Applications/ are OK
5535 * + files from /Applications/Camera are not OK
5536 * + no files that are open for write
5538 if (vnode_vtype(vp
) == VREG
&&
5539 vnode_mount(vp
) != NULL
&&
5540 (!(vfs_flags(vnode_mount(vp
)) & MNT_ROOTFS
))) {
5541 /* not from root filesystem: eligible for secluded pages */
5542 memory_object_mark_eligible_for_secluded(
5543 ubc_getobject(vp
, UBC_FLAGS_NONE
),
5549 * secluded_for_filecache == 2:
5550 * + all read-only files OK, except:
5551 * + dyld_shared_cache_arm64*
5555 if (vnode_vtype(vp
) == VREG
) {
5556 memory_object_mark_eligible_for_secluded(
5557 ubc_getobject(vp
, UBC_FLAGS_NONE
),
5564 #endif /* CONFIG_SECLUDED_MEMORY */
5569 if (existing_vnode
) {
5576 * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
5577 * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
5578 * is obsoleted by this.
5581 vnode_create(uint32_t flavor
, uint32_t size
, void *data
, vnode_t
*vpp
)
5584 return vnode_create_internal(flavor
, size
, data
, vpp
, 1);
5588 vnode_create_empty(vnode_t
*vpp
)
5591 return vnode_create_internal(VNCREATE_FLAVOR
, VCREATESIZE
, NULL
,
5596 vnode_initialize(uint32_t flavor
, uint32_t size
, void *data
, vnode_t
*vpp
)
5598 if (*vpp
== NULLVP
) {
5599 panic("NULL vnode passed to vnode_initialize");
5601 #if DEVELOPMENT || DEBUG
5603 * We lock to check that vnode is fit for unlocked use in
5604 * vnode_create_internal.
5606 vnode_lock_spin(*vpp
);
5607 VNASSERT(((*vpp
)->v_iocount
== 1), *vpp
,
5608 ("vnode_initialize : iocount not 1, is %d", (*vpp
)->v_iocount
));
5609 VNASSERT(((*vpp
)->v_usecount
== 0), *vpp
,
5610 ("vnode_initialize : usecount not 0, is %d", (*vpp
)->v_usecount
));
5611 VNASSERT(((*vpp
)->v_lflag
& VL_DEAD
), *vpp
,
5612 ("vnode_initialize : v_lflag does not have VL_DEAD, is 0x%x",
5614 VNASSERT(((*vpp
)->v_data
== NULL
), *vpp
,
5615 ("vnode_initialize : v_data not NULL"));
5618 return vnode_create_internal(flavor
, size
, data
, vpp
, 1);
5622 vnode_addfsref(vnode_t vp
)
5624 vnode_lock_spin(vp
);
5625 if (vp
->v_lflag
& VNAMED_FSHASH
) {
5626 panic("add_fsref: vp already has named reference");
5628 if ((vp
->v_freelist
.tqe_prev
!= (struct vnode
**)0xdeadb)) {
5629 panic("addfsref: vp on the free list\n");
5631 vp
->v_lflag
|= VNAMED_FSHASH
;
5636 vnode_removefsref(vnode_t vp
)
5638 vnode_lock_spin(vp
);
5639 if ((vp
->v_lflag
& VNAMED_FSHASH
) == 0) {
5640 panic("remove_fsref: no named reference");
5642 vp
->v_lflag
&= ~VNAMED_FSHASH
;
5649 vfs_iterate(int flags
, int (*callout
)(mount_t
, void *), void *arg
)
5654 int count
, actualcount
, i
;
5656 int indx_start
, indx_stop
, indx_incr
;
5657 int cb_dropref
= (flags
& VFS_ITERATE_CB_DROPREF
);
5658 int noskip_unmount
= (flags
& VFS_ITERATE_NOSKIP_UNMOUNT
);
5660 count
= mount_getvfscnt();
5663 fsid_list
= (fsid_t
*)kalloc(count
* sizeof(fsid_t
));
5664 allocmem
= (void *)fsid_list
;
5666 actualcount
= mount_fillfsids(fsid_list
, count
);
5669 * Establish the iteration direction
5670 * VFS_ITERATE_TAIL_FIRST overrides default head first order (oldest first)
5672 if (flags
& VFS_ITERATE_TAIL_FIRST
) {
5673 indx_start
= actualcount
- 1;
5676 } else { /* Head first by default */
5678 indx_stop
= actualcount
;
5682 for (i
= indx_start
; i
!= indx_stop
; i
+= indx_incr
) {
5683 /* obtain the mount point with iteration reference */
5684 mp
= mount_list_lookupby_fsid(&fsid_list
[i
], 0, 1);
5686 if (mp
== (struct mount
*)0) {
5690 if ((mp
->mnt_lflag
& MNT_LDEAD
) ||
5691 (!noskip_unmount
&& (mp
->mnt_lflag
& MNT_LUNMOUNT
))) {
5698 /* iterate over all the vnodes */
5699 ret
= callout(mp
, arg
);
5702 * Drop the iterref here if the callback didn't do it.
5703 * Note: If cb_dropref is set the mp may no longer exist.
5711 case VFS_RETURNED_DONE
:
5712 if (ret
== VFS_RETURNED_DONE
) {
5718 case VFS_CLAIMED_DONE
:
5729 kfree(allocmem
, (count
* sizeof(fsid_t
)));
5734 * Update the vfsstatfs structure in the mountpoint.
5735 * MAC: Parameter eventtype added, indicating whether the event that
5736 * triggered this update came from user space, via a system call
5737 * (VFS_USER_EVENT) or an internal kernel call (VFS_KERNEL_EVENT).
5740 vfs_update_vfsstat(mount_t mp
, vfs_context_t ctx
, __unused
int eventtype
)
5746 * Request the attributes we want to propagate into
5747 * the per-mount vfsstat structure.
5750 VFSATTR_WANTED(&va
, f_iosize
);
5751 VFSATTR_WANTED(&va
, f_blocks
);
5752 VFSATTR_WANTED(&va
, f_bfree
);
5753 VFSATTR_WANTED(&va
, f_bavail
);
5754 VFSATTR_WANTED(&va
, f_bused
);
5755 VFSATTR_WANTED(&va
, f_files
);
5756 VFSATTR_WANTED(&va
, f_ffree
);
5757 VFSATTR_WANTED(&va
, f_bsize
);
5758 VFSATTR_WANTED(&va
, f_fssubtype
);
5760 if ((error
= vfs_getattr(mp
, &va
, ctx
)) != 0) {
5761 KAUTH_DEBUG("STAT - filesystem returned error %d", error
);
5765 if (eventtype
== VFS_USER_EVENT
) {
5766 error
= mac_mount_check_getattr(ctx
, mp
, &va
);
5773 * Unpack into the per-mount structure.
5775 * We only overwrite these fields, which are likely to change:
5783 * And these which are not, but which the FS has no other way
5784 * of providing to us:
5790 if (VFSATTR_IS_SUPPORTED(&va
, f_bsize
)) {
5791 /* 4822056 - protect against malformed server mount */
5792 mp
->mnt_vfsstat
.f_bsize
= (va
.f_bsize
> 0 ? va
.f_bsize
: 512);
5794 mp
->mnt_vfsstat
.f_bsize
= mp
->mnt_devblocksize
; /* default from the device block size */
5796 if (VFSATTR_IS_SUPPORTED(&va
, f_iosize
)) {
5797 mp
->mnt_vfsstat
.f_iosize
= va
.f_iosize
;
5799 mp
->mnt_vfsstat
.f_iosize
= 1024 * 1024; /* 1MB sensible I/O size */
5801 if (VFSATTR_IS_SUPPORTED(&va
, f_blocks
)) {
5802 mp
->mnt_vfsstat
.f_blocks
= va
.f_blocks
;
5804 if (VFSATTR_IS_SUPPORTED(&va
, f_bfree
)) {
5805 mp
->mnt_vfsstat
.f_bfree
= va
.f_bfree
;
5807 if (VFSATTR_IS_SUPPORTED(&va
, f_bavail
)) {
5808 mp
->mnt_vfsstat
.f_bavail
= va
.f_bavail
;
5810 if (VFSATTR_IS_SUPPORTED(&va
, f_bused
)) {
5811 mp
->mnt_vfsstat
.f_bused
= va
.f_bused
;
5813 if (VFSATTR_IS_SUPPORTED(&va
, f_files
)) {
5814 mp
->mnt_vfsstat
.f_files
= va
.f_files
;
5816 if (VFSATTR_IS_SUPPORTED(&va
, f_ffree
)) {
5817 mp
->mnt_vfsstat
.f_ffree
= va
.f_ffree
;
5820 /* this is unlikely to change, but has to be queried for */
5821 if (VFSATTR_IS_SUPPORTED(&va
, f_fssubtype
)) {
5822 mp
->mnt_vfsstat
.f_fssubtype
= va
.f_fssubtype
;
5829 mount_list_add(mount_t mp
)
5834 if (system_inshutdown
!= 0) {
5837 TAILQ_INSERT_TAIL(&mountlist
, mp
, mnt_list
);
5841 mount_list_unlock();
5847 mount_list_remove(mount_t mp
)
5850 TAILQ_REMOVE(&mountlist
, mp
, mnt_list
);
5852 mp
->mnt_list
.tqe_next
= NULL
;
5853 mp
->mnt_list
.tqe_prev
= NULL
;
5854 mount_list_unlock();
5858 mount_lookupby_volfsid(int volfs_id
, int withref
)
5860 mount_t cur_mount
= (mount_t
)0;
5864 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
) {
5865 if (!(mp
->mnt_kern_flag
& MNTK_UNMOUNT
) &&
5866 (mp
->mnt_kern_flag
& MNTK_PATH_FROM_ID
) &&
5867 (mp
->mnt_vfsstat
.f_fsid
.val
[0] == volfs_id
)) {
5870 if (mount_iterref(cur_mount
, 1)) {
5871 cur_mount
= (mount_t
)0;
5872 mount_list_unlock();
5879 mount_list_unlock();
5880 if (withref
&& (cur_mount
!= (mount_t
)0)) {
5882 if (vfs_busy(mp
, LK_NOWAIT
) != 0) {
5883 cur_mount
= (mount_t
)0;
5892 mount_list_lookupby_fsid(fsid_t
*fsid
, int locked
, int withref
)
5894 mount_t retmp
= (mount_t
)0;
5900 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
)
5901 if (mp
->mnt_vfsstat
.f_fsid
.val
[0] == fsid
->val
[0] &&
5902 mp
->mnt_vfsstat
.f_fsid
.val
[1] == fsid
->val
[1]) {
5905 if (mount_iterref(retmp
, 1)) {
5913 mount_list_unlock();
5919 vnode_lookupat(const char *path
, int flags
, vnode_t
*vpp
, vfs_context_t ctx
,
5922 struct nameidata nd
;
5924 u_int32_t ndflags
= 0;
5930 if (flags
& VNODE_LOOKUP_NOFOLLOW
) {
5936 if (flags
& VNODE_LOOKUP_NOCROSSMOUNT
) {
5937 ndflags
|= NOCROSSMOUNT
;
5940 if (flags
& VNODE_LOOKUP_CROSSMOUNTNOWAIT
) {
5941 ndflags
|= CN_NBMOUNTLOOK
;
5944 /* XXX AUDITVNPATH1 needed ? */
5945 NDINIT(&nd
, LOOKUP
, OP_LOOKUP
, ndflags
, UIO_SYSSPACE
,
5946 CAST_USER_ADDR_T(path
), ctx
);
5948 if (start_dvp
&& (path
[0] != '/')) {
5949 nd
.ni_dvp
= start_dvp
;
5950 nd
.ni_cnd
.cn_flags
|= USEDVP
;
5953 if ((error
= namei(&nd
))) {
5957 nd
.ni_cnd
.cn_flags
&= ~USEDVP
;
5966 vnode_lookup(const char *path
, int flags
, vnode_t
*vpp
, vfs_context_t ctx
)
5968 return vnode_lookupat(path
, flags
, vpp
, ctx
, NULLVP
);
5972 vnode_open(const char *path
, int fmode
, int cmode
, int flags
, vnode_t
*vpp
, vfs_context_t ctx
)
5974 struct nameidata nd
;
5976 u_int32_t ndflags
= 0;
5979 if (ctx
== NULL
) { /* XXX technically an error */
5980 ctx
= vfs_context_current();
5983 if (fmode
& O_NOFOLLOW
) {
5984 lflags
|= VNODE_LOOKUP_NOFOLLOW
;
5987 if (lflags
& VNODE_LOOKUP_NOFOLLOW
) {
5993 if (lflags
& VNODE_LOOKUP_NOCROSSMOUNT
) {
5994 ndflags
|= NOCROSSMOUNT
;
5997 if (lflags
& VNODE_LOOKUP_CROSSMOUNTNOWAIT
) {
5998 ndflags
|= CN_NBMOUNTLOOK
;
6001 /* XXX AUDITVNPATH1 needed ? */
6002 NDINIT(&nd
, LOOKUP
, OP_OPEN
, ndflags
, UIO_SYSSPACE
,
6003 CAST_USER_ADDR_T(path
), ctx
);
6005 if ((error
= vn_open(&nd
, fmode
, cmode
))) {
6015 vnode_close(vnode_t vp
, int flags
, vfs_context_t ctx
)
6020 ctx
= vfs_context_current();
6023 error
= vn_close(vp
, flags
, ctx
);
6029 vnode_mtime(vnode_t vp
, struct timespec
*mtime
, vfs_context_t ctx
)
6031 struct vnode_attr va
;
6035 VATTR_WANTED(&va
, va_modify_time
);
6036 error
= vnode_getattr(vp
, &va
, ctx
);
6038 *mtime
= va
.va_modify_time
;
6044 vnode_flags(vnode_t vp
, uint32_t *flags
, vfs_context_t ctx
)
6046 struct vnode_attr va
;
6050 VATTR_WANTED(&va
, va_flags
);
6051 error
= vnode_getattr(vp
, &va
, ctx
);
6053 *flags
= va
.va_flags
;
6059 * Returns: 0 Success
6063 vnode_size(vnode_t vp
, off_t
*sizep
, vfs_context_t ctx
)
6065 struct vnode_attr va
;
6069 VATTR_WANTED(&va
, va_data_size
);
6070 error
= vnode_getattr(vp
, &va
, ctx
);
6072 *sizep
= va
.va_data_size
;
6078 vnode_setsize(vnode_t vp
, off_t size
, int ioflag
, vfs_context_t ctx
)
6080 struct vnode_attr va
;
6083 VATTR_SET(&va
, va_data_size
, size
);
6084 va
.va_vaflags
= ioflag
& 0xffff;
6085 return vnode_setattr(vp
, &va
, ctx
);
6089 vnode_setdirty(vnode_t vp
)
6091 vnode_lock_spin(vp
);
6092 vp
->v_flag
|= VISDIRTY
;
6098 vnode_cleardirty(vnode_t vp
)
6100 vnode_lock_spin(vp
);
6101 vp
->v_flag
&= ~VISDIRTY
;
6107 vnode_isdirty(vnode_t vp
)
6111 vnode_lock_spin(vp
);
6112 dirty
= (vp
->v_flag
& VISDIRTY
) ? 1 : 0;
6119 vn_create_reg(vnode_t dvp
, vnode_t
*vpp
, struct nameidata
*ndp
, struct vnode_attr
*vap
, uint32_t flags
, int fmode
, uint32_t *statusp
, vfs_context_t ctx
)
6121 /* Only use compound VNOP for compound operation */
6122 if (vnode_compound_open_available(dvp
) && ((flags
& VN_CREATE_DOOPEN
) != 0)) {
6124 return VNOP_COMPOUND_OPEN(dvp
, vpp
, ndp
, O_CREAT
, fmode
, statusp
, vap
, ctx
);
6126 return VNOP_CREATE(dvp
, vpp
, &ndp
->ni_cnd
, vap
, ctx
);
6131 * Create a filesystem object of arbitrary type with arbitrary attributes in
6132 * the spevied directory with the specified name.
6134 * Parameters: dvp Pointer to the vnode of the directory
6135 * in which to create the object.
6136 * vpp Pointer to the area into which to
6137 * return the vnode of the created object.
6138 * cnp Component name pointer from the namei
6139 * data structure, containing the name to
6140 * use for the create object.
6141 * vap Pointer to the vnode_attr structure
6142 * describing the object to be created,
6143 * including the type of object.
6144 * flags VN_* flags controlling ACL inheritance
6145 * and whether or not authorization is to
6146 * be required for the operation.
6148 * Returns: 0 Success
6151 * Implicit: *vpp Contains the vnode of the object that
6152 * was created, if successful.
6153 * *cnp May be modified by the underlying VFS.
6154 * *vap May be modified by the underlying VFS.
6155 * modified by either ACL inheritance or
6158 * be modified, even if the operation is
6161 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
6163 * Modification of '*cnp' and '*vap' by the underlying VFS is
6164 * strongly discouraged.
6166 * XXX: This function is a 'vn_*' function; it belongs in vfs_vnops.c
6168 * XXX: We should enummerate the possible errno values here, and where
6169 * in the code they originated.
6172 vn_create(vnode_t dvp
, vnode_t
*vpp
, struct nameidata
*ndp
, struct vnode_attr
*vap
, uint32_t flags
, int fmode
, uint32_t *statusp
, vfs_context_t ctx
)
6174 errno_t error
, old_error
;
6175 vnode_t vp
= (vnode_t
)0;
6177 struct componentname
*cnp
;
6182 batched
= namei_compound_available(dvp
, ndp
) ? TRUE
: FALSE
;
6184 KAUTH_DEBUG("%p CREATE - '%s'", dvp
, cnp
->cn_nameptr
);
6186 if (flags
& VN_CREATE_NOINHERIT
) {
6187 vap
->va_vaflags
|= VA_NOINHERIT
;
6189 if (flags
& VN_CREATE_NOAUTH
) {
6190 vap
->va_vaflags
|= VA_NOAUTH
;
6193 * Handle ACL inheritance, initialize vap.
6195 error
= vn_attribute_prepare(dvp
, vap
, &defaulted
, ctx
);
6200 if (vap
->va_type
!= VREG
&& (fmode
!= 0 || (flags
& VN_CREATE_DOOPEN
) || statusp
)) {
6201 panic("Open parameters, but not a regular file.");
6203 if ((fmode
!= 0) && ((flags
& VN_CREATE_DOOPEN
) == 0)) {
6204 panic("Mode for open, but not trying to open...");
6209 * Create the requested node.
6211 switch (vap
->va_type
) {
6213 error
= vn_create_reg(dvp
, vpp
, ndp
, vap
, flags
, fmode
, statusp
, ctx
);
6216 error
= vn_mkdir(dvp
, vpp
, ndp
, vap
, ctx
);
6222 error
= VNOP_MKNOD(dvp
, vpp
, cnp
, vap
, ctx
);
6225 panic("vnode_create: unknown vtype %d", vap
->va_type
);
6228 KAUTH_DEBUG("%p CREATE - error %d returned by filesystem", dvp
, error
);
6236 * If some of the requested attributes weren't handled by the VNOP,
6237 * use our fallback code.
6239 if ((error
== 0) && !VATTR_ALL_SUPPORTED(vap
) && *vpp
) {
6240 KAUTH_DEBUG(" CREATE - doing fallback with ACL %p", vap
->va_acl
);
6241 error
= vnode_setattr_fallback(*vpp
, vap
, ctx
);
6245 if ((error
== 0) && !(flags
& VN_CREATE_NOLABEL
)) {
6246 error
= vnode_label(vnode_mount(vp
), dvp
, vp
, cnp
, VNODE_LABEL_CREATE
, ctx
);
6250 if ((error
!= 0) && (vp
!= (vnode_t
)0)) {
6251 /* If we've done a compound open, close */
6252 if (batched
&& (old_error
== 0) && (vap
->va_type
== VREG
)) {
6253 VNOP_CLOSE(vp
, fmode
, ctx
);
6256 /* Need to provide notifications if a create succeeded */
6265 * For creation VNOPs, this is the equivalent of
6266 * lookup_handle_found_vnode.
6268 if (kdebug_enable
&& *vpp
) {
6269 kdebug_lookup(*vpp
, cnp
);
6273 vn_attribute_cleanup(vap
, defaulted
);
6278 static kauth_scope_t vnode_scope
;
6279 static int vnode_authorize_callback(kauth_cred_t credential
, void *idata
, kauth_action_t action
,
6280 uintptr_t arg0
, uintptr_t arg1
, uintptr_t arg2
, uintptr_t arg3
);
6281 static int vnode_authorize_callback_int(kauth_action_t action
, vfs_context_t ctx
,
6282 vnode_t vp
, vnode_t dvp
, int *errorp
);
6284 typedef struct _vnode_authorize_context
{
6286 struct vnode_attr
*vap
;
6288 struct vnode_attr
*dvap
;
6292 #define _VAC_IS_OWNER (1<<0)
6293 #define _VAC_IN_GROUP (1<<1)
6294 #define _VAC_IS_DIR_OWNER (1<<2)
6295 #define _VAC_IN_DIR_GROUP (1<<3)
6296 #define _VAC_NO_VNODE_POINTERS (1<<4)
6300 vnode_authorize_init(void)
6302 vnode_scope
= kauth_register_scope(KAUTH_SCOPE_VNODE
, vnode_authorize_callback
, NULL
);
6305 #define VATTR_PREPARE_DEFAULTED_UID 0x1
6306 #define VATTR_PREPARE_DEFAULTED_GID 0x2
6307 #define VATTR_PREPARE_DEFAULTED_MODE 0x4
6310 vn_attribute_prepare(vnode_t dvp
, struct vnode_attr
*vap
, uint32_t *defaulted_fieldsp
, vfs_context_t ctx
)
6312 kauth_acl_t nacl
= NULL
, oacl
= NULL
;
6316 * Handle ACL inheritance.
6318 if (!(vap
->va_vaflags
& VA_NOINHERIT
) && vfs_extendedsecurity(dvp
->v_mount
)) {
6319 /* save the original filesec */
6320 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
6325 if ((error
= kauth_acl_inherit(dvp
,
6328 vap
->va_type
== VDIR
,
6330 KAUTH_DEBUG("%p CREATE - error %d processing inheritance", dvp
, error
);
6335 * If the generated ACL is NULL, then we can save ourselves some effort
6336 * by clearing the active bit.
6339 VATTR_CLEAR_ACTIVE(vap
, va_acl
);
6341 vap
->va_base_acl
= oacl
;
6342 VATTR_SET(vap
, va_acl
, nacl
);
6346 error
= vnode_authattr_new_internal(dvp
, vap
, (vap
->va_vaflags
& VA_NOAUTH
), defaulted_fieldsp
, ctx
);
6348 vn_attribute_cleanup(vap
, *defaulted_fieldsp
);
6355 vn_attribute_cleanup(struct vnode_attr
*vap
, uint32_t defaulted_fields
)
6358 * If the caller supplied a filesec in vap, it has been replaced
6359 * now by the post-inheritance copy. We need to put the original back
6360 * and free the inherited product.
6362 kauth_acl_t nacl
, oacl
;
6364 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
6366 oacl
= vap
->va_base_acl
;
6369 VATTR_SET(vap
, va_acl
, oacl
);
6370 vap
->va_base_acl
= NULL
;
6372 VATTR_CLEAR_ACTIVE(vap
, va_acl
);
6376 kauth_acl_free(nacl
);
6380 if ((defaulted_fields
& VATTR_PREPARE_DEFAULTED_MODE
) != 0) {
6381 VATTR_CLEAR_ACTIVE(vap
, va_mode
);
6383 if ((defaulted_fields
& VATTR_PREPARE_DEFAULTED_GID
) != 0) {
6384 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
6386 if ((defaulted_fields
& VATTR_PREPARE_DEFAULTED_UID
) != 0) {
6387 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
6394 vn_authorize_unlink(vnode_t dvp
, vnode_t vp
, struct componentname
*cnp
, vfs_context_t ctx
, __unused
void *reserved
)
6402 * Normally, unlinking of directories is not supported.
6403 * However, some file systems may have limited support.
6405 if ((vp
->v_type
== VDIR
) &&
6406 !(vp
->v_mount
->mnt_kern_flag
& MNTK_DIR_HARDLINKS
)) {
6407 return EPERM
; /* POSIX */
6410 /* authorize the delete operation */
6413 error
= mac_vnode_check_unlink(ctx
, dvp
, vp
, cnp
);
6417 error
= vnode_authorize(vp
, dvp
, KAUTH_VNODE_DELETE
, ctx
);
6424 vn_authorize_open_existing(vnode_t vp
, struct componentname
*cnp
, int fmode
, vfs_context_t ctx
, void *reserved
)
6426 /* Open of existing case */
6427 kauth_action_t action
;
6429 if (cnp
->cn_ndp
== NULL
) {
6432 if (reserved
!= NULL
) {
6433 panic("reserved not NULL.");
6437 /* XXX may do duplicate work here, but ignore that for now (idempotent) */
6438 if (vfs_flags(vnode_mount(vp
)) & MNT_MULTILABEL
) {
6439 error
= vnode_label(vnode_mount(vp
), NULL
, vp
, NULL
, 0, ctx
);
6446 if ((fmode
& O_DIRECTORY
) && vp
->v_type
!= VDIR
) {
6450 if (vp
->v_type
== VSOCK
&& vp
->v_tag
!= VT_FDESC
) {
6451 return EOPNOTSUPP
; /* Operation not supported on socket */
6454 if (vp
->v_type
== VLNK
&& (fmode
& O_NOFOLLOW
) != 0) {
6455 return ELOOP
; /* O_NOFOLLOW was specified and the target is a symbolic link */
6458 /* disallow write operations on directories */
6459 if (vnode_isdir(vp
) && (fmode
& (FWRITE
| O_TRUNC
))) {
6463 if ((cnp
->cn_ndp
->ni_flag
& NAMEI_TRAILINGSLASH
)) {
6464 if (vp
->v_type
!= VDIR
) {
6470 /* If a file being opened is a shadow file containing
6471 * namedstream data, ignore the macf checks because it
6472 * is a kernel internal file and access should always
6475 if (!(vnode_isshadow(vp
) && vnode_isnamedstream(vp
))) {
6476 error
= mac_vnode_check_open(ctx
, vp
, fmode
);
6483 /* compute action to be authorized */
6485 if (fmode
& FREAD
) {
6486 action
|= KAUTH_VNODE_READ_DATA
;
6488 if (fmode
& (FWRITE
| O_TRUNC
)) {
6490 * If we are writing, appending, and not truncating,
6491 * indicate that we are appending so that if the
6492 * UF_APPEND or SF_APPEND bits are set, we do not deny
6495 if ((fmode
& O_APPEND
) && !(fmode
& O_TRUNC
)) {
6496 action
|= KAUTH_VNODE_APPEND_DATA
;
6498 action
|= KAUTH_VNODE_WRITE_DATA
;
6501 error
= vnode_authorize(vp
, NULL
, action
, ctx
);
6503 if (error
== EACCES
) {
6505 * Shadow files may exist on-disk with a different UID/GID
6506 * than that of the current context. Verify that this file
6507 * is really a shadow file. If it was created successfully
6508 * then it should be authorized.
6510 if (vnode_isshadow(vp
) && vnode_isnamedstream(vp
)) {
6511 error
= vnode_verifynamedstream(vp
);
6520 vn_authorize_create(vnode_t dvp
, struct componentname
*cnp
, struct vnode_attr
*vap
, vfs_context_t ctx
, void *reserved
)
6528 if (cnp
->cn_ndp
== NULL
) {
6529 panic("NULL cn_ndp");
6531 if (reserved
!= NULL
) {
6532 panic("reserved not NULL.");
6535 /* Only validate path for creation if we didn't do a complete lookup */
6536 if (cnp
->cn_ndp
->ni_flag
& NAMEI_UNFINISHED
) {
6537 error
= lookup_validate_creation_path(cnp
->cn_ndp
);
6544 error
= mac_vnode_check_create(ctx
, dvp
, cnp
, vap
);
6548 #endif /* CONFIG_MACF */
6550 return vnode_authorize(dvp
, NULL
, KAUTH_VNODE_ADD_FILE
, ctx
);
6554 vn_authorize_rename(struct vnode
*fdvp
, struct vnode
*fvp
, struct componentname
*fcnp
,
6555 struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
6556 vfs_context_t ctx
, void *reserved
)
6558 return vn_authorize_renamex(fdvp
, fvp
, fcnp
, tdvp
, tvp
, tcnp
, ctx
, 0, reserved
);
6562 vn_authorize_renamex(struct vnode
*fdvp
, struct vnode
*fvp
, struct componentname
*fcnp
,
6563 struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
6564 vfs_context_t ctx
, vfs_rename_flags_t flags
, void *reserved
)
6566 return vn_authorize_renamex_with_paths(fdvp
, fvp
, fcnp
, NULL
, tdvp
, tvp
, tcnp
, NULL
, ctx
, flags
, reserved
);
6570 vn_authorize_renamex_with_paths(struct vnode
*fdvp
, struct vnode
*fvp
, struct componentname
*fcnp
, const char *from_path
,
6571 struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
, const char *to_path
,
6572 vfs_context_t ctx
, vfs_rename_flags_t flags
, void *reserved
)
6576 bool swap
= flags
& VFS_RENAME_SWAP
;
6578 if (reserved
!= NULL
) {
6579 panic("Passed something other than NULL as reserved field!");
6583 * Avoid renaming "." and "..".
6585 * XXX No need to check for this in the FS. We should always have the leaves
6586 * in VFS in this case.
6588 if (fvp
->v_type
== VDIR
&&
6590 (fcnp
->cn_namelen
== 1 && fcnp
->cn_nameptr
[0] == '.') ||
6591 ((fcnp
->cn_flags
| tcnp
->cn_flags
) & ISDOTDOT
))) {
6596 if (tvp
== NULLVP
&& vnode_compound_rename_available(tdvp
)) {
6597 error
= lookup_validate_creation_path(tcnp
->cn_ndp
);
6603 /***** <MACF> *****/
6605 error
= mac_vnode_check_rename(ctx
, fdvp
, fvp
, fcnp
, tdvp
, tvp
, tcnp
);
6610 error
= mac_vnode_check_rename(ctx
, tdvp
, tvp
, tcnp
, fdvp
, fvp
, fcnp
);
6616 /***** </MACF> *****/
6618 /***** <MiscChecks> *****/
6621 if (fvp
->v_type
== VDIR
&& tvp
->v_type
!= VDIR
) {
6624 } else if (fvp
->v_type
!= VDIR
&& tvp
->v_type
== VDIR
) {
6631 * Caller should have already checked this and returned
6632 * ENOENT. If we send back ENOENT here, caller will retry
6633 * which isn't what we want so we send back EINVAL here
6646 * The following edge case is caught here:
6647 * (to cannot be a descendent of from)
6660 if (tdvp
->v_parent
== fvp
) {
6665 if (swap
&& fdvp
->v_parent
== tvp
) {
6669 /***** </MiscChecks> *****/
6671 /***** <Kauth> *****/
6674 * As part of the Kauth step, we call out to allow 3rd-party
6675 * fileop notification of "about to rename". This is needed
6676 * in the event that 3rd-parties need to know that the DELETE
6677 * authorization is actually part of a rename. It's important
6678 * that we guarantee that the DELETE call-out will always be
6679 * made if the WILL_RENAME call-out is made. Another fileop
6680 * call-out will be performed once the operation is completed.
6681 * We can ignore the result of kauth_authorize_fileop().
6683 * N.B. We are passing the vnode and *both* paths to each
6684 * call; kauth_authorize_fileop() extracts the "from" path
6685 * when posting a KAUTH_FILEOP_WILL_RENAME notification.
6686 * As such, we only post these notifications if all of the
6687 * information we need is provided.
6691 kauth_action_t f
= 0, t
= 0;
6694 * Directories changing parents need ...ADD_SUBDIR... to
6695 * permit changing ".."
6698 if (vnode_isdir(fvp
)) {
6699 f
= KAUTH_VNODE_ADD_SUBDIRECTORY
;
6701 if (vnode_isdir(tvp
)) {
6702 t
= KAUTH_VNODE_ADD_SUBDIRECTORY
;
6705 if (to_path
!= NULL
) {
6706 kauth_authorize_fileop(vfs_context_ucred(ctx
),
6707 KAUTH_FILEOP_WILL_RENAME
,
6709 (uintptr_t)to_path
);
6711 error
= vnode_authorize(fvp
, fdvp
, KAUTH_VNODE_DELETE
| f
, ctx
);
6715 if (from_path
!= NULL
) {
6716 kauth_authorize_fileop(vfs_context_ucred(ctx
),
6717 KAUTH_FILEOP_WILL_RENAME
,
6719 (uintptr_t)from_path
);
6721 error
= vnode_authorize(tvp
, tdvp
, KAUTH_VNODE_DELETE
| t
, ctx
);
6725 f
= vnode_isdir(fvp
) ? KAUTH_VNODE_ADD_SUBDIRECTORY
: KAUTH_VNODE_ADD_FILE
;
6726 t
= vnode_isdir(tvp
) ? KAUTH_VNODE_ADD_SUBDIRECTORY
: KAUTH_VNODE_ADD_FILE
;
6728 error
= vnode_authorize(fdvp
, NULL
, f
| t
, ctx
);
6730 error
= vnode_authorize(fdvp
, NULL
, t
, ctx
);
6734 error
= vnode_authorize(tdvp
, NULL
, f
, ctx
);
6741 if ((tvp
!= NULL
) && vnode_isdir(tvp
)) {
6745 } else if (tdvp
!= fdvp
) {
6750 * must have delete rights to remove the old name even in
6751 * the simple case of fdvp == tdvp.
6753 * If fvp is a directory, and we are changing it's parent,
6754 * then we also need rights to rewrite its ".." entry as well.
6756 if (to_path
!= NULL
) {
6757 kauth_authorize_fileop(vfs_context_ucred(ctx
),
6758 KAUTH_FILEOP_WILL_RENAME
,
6760 (uintptr_t)to_path
);
6762 if (vnode_isdir(fvp
)) {
6763 if ((error
= vnode_authorize(fvp
, fdvp
, KAUTH_VNODE_DELETE
| KAUTH_VNODE_ADD_SUBDIRECTORY
, ctx
)) != 0) {
6767 if ((error
= vnode_authorize(fvp
, fdvp
, KAUTH_VNODE_DELETE
, ctx
)) != 0) {
6772 /* moving into tdvp or tvp, must have rights to add */
6773 if ((error
= vnode_authorize(((tvp
!= NULL
) && vnode_isdir(tvp
)) ? tvp
: tdvp
,
6775 vnode_isdir(fvp
) ? KAUTH_VNODE_ADD_SUBDIRECTORY
: KAUTH_VNODE_ADD_FILE
,
6780 /* node staying in same directory, must be allowed to add new name */
6781 if ((error
= vnode_authorize(fdvp
, NULL
,
6782 vnode_isdir(fvp
) ? KAUTH_VNODE_ADD_SUBDIRECTORY
: KAUTH_VNODE_ADD_FILE
, ctx
)) != 0) {
6786 /* overwriting tvp */
6787 if ((tvp
!= NULL
) && !vnode_isdir(tvp
) &&
6788 ((error
= vnode_authorize(tvp
, tdvp
, KAUTH_VNODE_DELETE
, ctx
)) != 0)) {
6793 /***** </Kauth> *****/
6795 /* XXX more checks? */
6801 vn_authorize_mkdir(vnode_t dvp
, struct componentname
*cnp
, struct vnode_attr
*vap
, vfs_context_t ctx
, void *reserved
)
6808 if (reserved
!= NULL
) {
6809 panic("reserved not NULL in vn_authorize_mkdir()");
6812 /* XXX A hack for now, to make shadow files work */
6813 if (cnp
->cn_ndp
== NULL
) {
6817 if (vnode_compound_mkdir_available(dvp
)) {
6818 error
= lookup_validate_creation_path(cnp
->cn_ndp
);
6825 error
= mac_vnode_check_create(ctx
,
6832 /* authorize addition of a directory to the parent */
6833 if ((error
= vnode_authorize(dvp
, NULL
, KAUTH_VNODE_ADD_SUBDIRECTORY
, ctx
)) != 0) {
6842 vn_authorize_rmdir(vnode_t dvp
, vnode_t vp
, struct componentname
*cnp
, vfs_context_t ctx
, void *reserved
)
6849 if (reserved
!= NULL
) {
6850 panic("Non-NULL reserved argument to vn_authorize_rmdir()");
6853 if (vp
->v_type
!= VDIR
) {
6855 * rmdir only deals with directories
6862 * No rmdir "." please.
6868 error
= mac_vnode_check_unlink(ctx
, dvp
,
6875 return vnode_authorize(vp
, dvp
, KAUTH_VNODE_DELETE
, ctx
);
6879 * Authorizer for directory cloning. This does not use vnodes but instead
6880 * uses prefilled vnode attributes from the filesystem.
6882 * The same function is called to set up the attributes required, perform the
6883 * authorization and cleanup (if required)
6886 vnode_attr_authorize_dir_clone(struct vnode_attr
*vap
, kauth_action_t action
,
6887 struct vnode_attr
*dvap
, __unused vnode_t sdvp
, mount_t mp
,
6888 dir_clone_authorizer_op_t vattr_op
, uint32_t flags
, vfs_context_t ctx
,
6889 __unused
void *reserved
)
6892 int is_suser
= vfs_context_issuser(ctx
);
6894 if (vattr_op
== OP_VATTR_SETUP
) {
6898 * When ACL inheritence is implemented, both vap->va_acl and
6899 * dvap->va_acl will be required (even as superuser).
6901 VATTR_WANTED(vap
, va_type
);
6902 VATTR_WANTED(vap
, va_mode
);
6903 VATTR_WANTED(vap
, va_flags
);
6904 VATTR_WANTED(vap
, va_uid
);
6905 VATTR_WANTED(vap
, va_gid
);
6908 VATTR_WANTED(dvap
, va_flags
);
6913 * If not superuser, we have to evaluate ACLs and
6914 * need the target directory gid to set the initial
6915 * gid of the new object.
6917 VATTR_WANTED(vap
, va_acl
);
6919 VATTR_WANTED(dvap
, va_gid
);
6921 } else if (dvap
&& (flags
& VNODE_CLONEFILE_NOOWNERCOPY
)) {
6922 VATTR_WANTED(dvap
, va_gid
);
6925 } else if (vattr_op
== OP_VATTR_CLEANUP
) {
6926 return 0; /* Nothing to do for now */
6929 /* dvap isn't used for authorization */
6930 error
= vnode_attr_authorize(vap
, NULL
, mp
, action
, ctx
);
6937 * vn_attribute_prepare should be able to accept attributes as well as
6938 * vnodes but for now we do this inline.
6940 if (!is_suser
|| (flags
& VNODE_CLONEFILE_NOOWNERCOPY
)) {
6942 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit
6943 * owner is set, that owner takes ownership of all new files.
6945 if ((mp
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) &&
6946 (mp
->mnt_fsowner
!= KAUTH_UID_NONE
)) {
6947 VATTR_SET(vap
, va_uid
, mp
->mnt_fsowner
);
6949 /* default owner is current user */
6950 VATTR_SET(vap
, va_uid
,
6951 kauth_cred_getuid(vfs_context_ucred(ctx
)));
6954 if ((mp
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) &&
6955 (mp
->mnt_fsgroup
!= KAUTH_GID_NONE
)) {
6956 VATTR_SET(vap
, va_gid
, mp
->mnt_fsgroup
);
6959 * default group comes from parent object,
6960 * fallback to current user
6962 if (VATTR_IS_SUPPORTED(dvap
, va_gid
)) {
6963 VATTR_SET(vap
, va_gid
, dvap
->va_gid
);
6965 VATTR_SET(vap
, va_gid
,
6966 kauth_cred_getgid(vfs_context_ucred(ctx
)));
6971 /* Inherit SF_RESTRICTED bit from destination directory only */
6972 if (VATTR_IS_ACTIVE(vap
, va_flags
)) {
6973 VATTR_SET(vap
, va_flags
,
6974 ((vap
->va_flags
& ~(UF_DATAVAULT
| SF_RESTRICTED
)))); /* Turn off from source */
6975 if (VATTR_IS_ACTIVE(dvap
, va_flags
)) {
6976 VATTR_SET(vap
, va_flags
,
6977 vap
->va_flags
| (dvap
->va_flags
& (UF_DATAVAULT
| SF_RESTRICTED
)));
6979 } else if (VATTR_IS_ACTIVE(dvap
, va_flags
)) {
6980 VATTR_SET(vap
, va_flags
, (dvap
->va_flags
& (UF_DATAVAULT
| SF_RESTRICTED
)));
6988 * Authorize an operation on a vnode.
6990 * This is KPI, but here because it needs vnode_scope.
6992 * Returns: 0 Success
6993 * kauth_authorize_action:EPERM ...
6994 * xlate => EACCES Permission denied
6995 * kauth_authorize_action:0 Success
6996 * kauth_authorize_action: Depends on callback return; this is
6997 * usually only vnode_authorize_callback(),
6998 * but may include other listerners, if any
7006 vnode_authorize(vnode_t vp
, vnode_t dvp
, kauth_action_t action
, vfs_context_t ctx
)
7011 * We can't authorize against a dead vnode; allow all operations through so that
7012 * the correct error can be returned.
7014 if (vp
->v_type
== VBAD
) {
7019 result
= kauth_authorize_action(vnode_scope
, vfs_context_ucred(ctx
), action
,
7020 (uintptr_t)ctx
, (uintptr_t)vp
, (uintptr_t)dvp
, (uintptr_t)&error
);
7021 if (result
== EPERM
) { /* traditional behaviour */
7024 /* did the lower layers give a better error return? */
7025 if ((result
!= 0) && (error
!= 0)) {
7032 * Test for vnode immutability.
7034 * The 'append' flag is set when the authorization request is constrained
7035 * to operations which only request the right to append to a file.
7037 * The 'ignore' flag is set when an operation modifying the immutability flags
7038 * is being authorized. We check the system securelevel to determine which
7039 * immutability flags we can ignore.
7042 vnode_immutable(struct vnode_attr
*vap
, int append
, int ignore
)
7046 /* start with all bits precluding the operation */
7047 mask
= IMMUTABLE
| APPEND
;
7049 /* if appending only, remove the append-only bits */
7054 /* ignore only set when authorizing flags changes */
7056 if (securelevel
<= 0) {
7057 /* in insecure state, flags do not inhibit changes */
7060 /* in secure state, user flags don't inhibit */
7061 mask
&= ~(UF_IMMUTABLE
| UF_APPEND
);
7064 KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap
->va_flags
, mask
, append
, ignore
);
7065 if ((vap
->va_flags
& mask
) != 0) {
7072 vauth_node_owner(struct vnode_attr
*vap
, kauth_cred_t cred
)
7076 /* default assumption is not-owner */
7080 * If the filesystem has given us a UID, we treat this as authoritative.
7082 if (vap
&& VATTR_IS_SUPPORTED(vap
, va_uid
)) {
7083 result
= (vap
->va_uid
== kauth_cred_getuid(cred
)) ? 1 : 0;
7085 /* we could test the owner UUID here if we had a policy for it */
7093 * Description: Ask if a cred is a member of the group owning the vnode object
7095 * Parameters: vap vnode attribute
7096 * vap->va_gid group owner of vnode object
7097 * cred credential to check
7098 * ismember pointer to where to put the answer
7099 * idontknow Return this if we can't get an answer
7101 * Returns: 0 Success
7102 * idontknow Can't get information
7103 * kauth_cred_ismember_gid:? Error from kauth subsystem
7104 * kauth_cred_ismember_gid:? Error from kauth subsystem
7107 vauth_node_group(struct vnode_attr
*vap
, kauth_cred_t cred
, int *ismember
, int idontknow
)
7116 * The caller is expected to have asked the filesystem for a group
7117 * at some point prior to calling this function. The answer may
7118 * have been that there is no group ownership supported for the
7119 * vnode object, in which case we return
7121 if (vap
&& VATTR_IS_SUPPORTED(vap
, va_gid
)) {
7122 error
= kauth_cred_ismember_gid(cred
, vap
->va_gid
, &result
);
7124 * Credentials which are opted into external group membership
7125 * resolution which are not known to the external resolver
7126 * will result in an ENOENT error. We translate this into
7127 * the appropriate 'idontknow' response for our caller.
7129 * XXX We do not make a distinction here between an ENOENT
7130 * XXX arising from a response from the external resolver,
7131 * XXX and an ENOENT which is internally generated. This is
7132 * XXX a deficiency of the published kauth_cred_ismember_gid()
7133 * XXX KPI which can not be overcome without new KPI. For
7134 * XXX all currently known cases, however, this wil result
7135 * XXX in correct behaviour.
7137 if (error
== ENOENT
) {
7142 * XXX We could test the group UUID here if we had a policy for it,
7143 * XXX but this is problematic from the perspective of synchronizing
7144 * XXX group UUID and POSIX GID ownership of a file and keeping the
7145 * XXX values coherent over time. The problem is that the local
7146 * XXX system will vend transient group UUIDs for unknown POSIX GID
7147 * XXX values, and these are not persistent, whereas storage of values
7148 * XXX is persistent. One potential solution to this is a local
7149 * XXX (persistent) replica of remote directory entries and vended
7150 * XXX local ids in a local directory server (think in terms of a
7151 * XXX caching DNS server).
7161 vauth_file_owner(vauth_ctx vcp
)
7165 if (vcp
->flags_valid
& _VAC_IS_OWNER
) {
7166 result
= (vcp
->flags
& _VAC_IS_OWNER
) ? 1 : 0;
7168 result
= vauth_node_owner(vcp
->vap
, vcp
->ctx
->vc_ucred
);
7170 /* cache our result */
7171 vcp
->flags_valid
|= _VAC_IS_OWNER
;
7173 vcp
->flags
|= _VAC_IS_OWNER
;
7175 vcp
->flags
&= ~_VAC_IS_OWNER
;
7183 * vauth_file_ingroup
7185 * Description: Ask if a user is a member of the group owning the directory
7187 * Parameters: vcp The vnode authorization context that
7188 * contains the user and directory info
7189 * vcp->flags_valid Valid flags
7190 * vcp->flags Flags values
7191 * vcp->vap File vnode attributes
7192 * vcp->ctx VFS Context (for user)
7193 * ismember pointer to where to put the answer
7194 * idontknow Return this if we can't get an answer
7196 * Returns: 0 Success
7197 * vauth_node_group:? Error from vauth_node_group()
7199 * Implicit returns: *ismember 0 The user is not a group member
7200 * 1 The user is a group member
7203 vauth_file_ingroup(vauth_ctx vcp
, int *ismember
, int idontknow
)
7207 /* Check for a cached answer first, to avoid the check if possible */
7208 if (vcp
->flags_valid
& _VAC_IN_GROUP
) {
7209 *ismember
= (vcp
->flags
& _VAC_IN_GROUP
) ? 1 : 0;
7212 /* Otherwise, go look for it */
7213 error
= vauth_node_group(vcp
->vap
, vcp
->ctx
->vc_ucred
, ismember
, idontknow
);
7216 /* cache our result */
7217 vcp
->flags_valid
|= _VAC_IN_GROUP
;
7219 vcp
->flags
|= _VAC_IN_GROUP
;
7221 vcp
->flags
&= ~_VAC_IN_GROUP
;
7229 vauth_dir_owner(vauth_ctx vcp
)
7233 if (vcp
->flags_valid
& _VAC_IS_DIR_OWNER
) {
7234 result
= (vcp
->flags
& _VAC_IS_DIR_OWNER
) ? 1 : 0;
7236 result
= vauth_node_owner(vcp
->dvap
, vcp
->ctx
->vc_ucred
);
7238 /* cache our result */
7239 vcp
->flags_valid
|= _VAC_IS_DIR_OWNER
;
7241 vcp
->flags
|= _VAC_IS_DIR_OWNER
;
7243 vcp
->flags
&= ~_VAC_IS_DIR_OWNER
;
7252 * Description: Ask if a user is a member of the group owning the directory
7254 * Parameters: vcp The vnode authorization context that
7255 * contains the user and directory info
7256 * vcp->flags_valid Valid flags
7257 * vcp->flags Flags values
7258 * vcp->dvap Dir vnode attributes
7259 * vcp->ctx VFS Context (for user)
7260 * ismember pointer to where to put the answer
7261 * idontknow Return this if we can't get an answer
7263 * Returns: 0 Success
7264 * vauth_node_group:? Error from vauth_node_group()
7266 * Implicit returns: *ismember 0 The user is not a group member
7267 * 1 The user is a group member
7270 vauth_dir_ingroup(vauth_ctx vcp
, int *ismember
, int idontknow
)
7274 /* Check for a cached answer first, to avoid the check if possible */
7275 if (vcp
->flags_valid
& _VAC_IN_DIR_GROUP
) {
7276 *ismember
= (vcp
->flags
& _VAC_IN_DIR_GROUP
) ? 1 : 0;
7279 /* Otherwise, go look for it */
7280 error
= vauth_node_group(vcp
->dvap
, vcp
->ctx
->vc_ucred
, ismember
, idontknow
);
7283 /* cache our result */
7284 vcp
->flags_valid
|= _VAC_IN_DIR_GROUP
;
7286 vcp
->flags
|= _VAC_IN_DIR_GROUP
;
7288 vcp
->flags
&= ~_VAC_IN_DIR_GROUP
;
7296 * Test the posix permissions in (vap) to determine whether (credential)
7297 * may perform (action)
7300 vnode_authorize_posix(vauth_ctx vcp
, int action
, int on_dir
)
7302 struct vnode_attr
*vap
;
7303 int needed
, error
, owner_ok
, group_ok
, world_ok
, ismember
;
7304 #ifdef KAUTH_DEBUG_ENABLE
7305 const char *where
= "uninitialized";
7306 # define _SETWHERE(c) where = c;
7308 # define _SETWHERE(c)
7311 /* checking file or directory? */
7321 * We want to do as little work here as possible. So first we check
7322 * which sets of permissions grant us the access we need, and avoid checking
7323 * whether specific permissions grant access when more generic ones would.
7326 /* owner permissions */
7328 if (action
& VREAD
) {
7331 if (action
& VWRITE
) {
7334 if (action
& VEXEC
) {
7337 owner_ok
= (needed
& vap
->va_mode
) == needed
;
7339 /* group permissions */
7341 if (action
& VREAD
) {
7344 if (action
& VWRITE
) {
7347 if (action
& VEXEC
) {
7350 group_ok
= (needed
& vap
->va_mode
) == needed
;
7352 /* world permissions */
7354 if (action
& VREAD
) {
7357 if (action
& VWRITE
) {
7360 if (action
& VEXEC
) {
7363 world_ok
= (needed
& vap
->va_mode
) == needed
;
7365 /* If granted/denied by all three, we're done */
7366 if (owner_ok
&& group_ok
&& world_ok
) {
7370 if (!owner_ok
&& !group_ok
&& !world_ok
) {
7376 /* Check ownership (relatively cheap) */
7377 if ((on_dir
&& vauth_dir_owner(vcp
)) ||
7378 (!on_dir
&& vauth_file_owner(vcp
))) {
7386 /* Not owner; if group and world both grant it we're done */
7387 if (group_ok
&& world_ok
) {
7388 _SETWHERE("group/world");
7391 if (!group_ok
&& !world_ok
) {
7392 _SETWHERE("group/world");
7397 /* Check group membership (most expensive) */
7398 ismember
= 0; /* Default to allow, if the target has no group owner */
7401 * In the case we can't get an answer about the user from the call to
7402 * vauth_dir_ingroup() or vauth_file_ingroup(), we want to fail on
7403 * the side of caution, rather than simply granting access, or we will
7404 * fail to correctly implement exclusion groups, so we set the third
7405 * parameter on the basis of the state of 'group_ok'.
7408 error
= vauth_dir_ingroup(vcp
, &ismember
, (!group_ok
? EACCES
: 0));
7410 error
= vauth_file_ingroup(vcp
, &ismember
, (!group_ok
? EACCES
: 0));
7426 /* Not owner, not in group, use world result */
7435 KAUTH_DEBUG("%p %s - posix %s permissions : need %s%s%s %x have %s%s%s%s%s%s%s%s%s UID = %d file = %d,%d",
7436 vcp
->vp
, (error
== 0) ? "ALLOWED" : "DENIED", where
,
7437 (action
& VREAD
) ? "r" : "-",
7438 (action
& VWRITE
) ? "w" : "-",
7439 (action
& VEXEC
) ? "x" : "-",
7441 (vap
->va_mode
& S_IRUSR
) ? "r" : "-",
7442 (vap
->va_mode
& S_IWUSR
) ? "w" : "-",
7443 (vap
->va_mode
& S_IXUSR
) ? "x" : "-",
7444 (vap
->va_mode
& S_IRGRP
) ? "r" : "-",
7445 (vap
->va_mode
& S_IWGRP
) ? "w" : "-",
7446 (vap
->va_mode
& S_IXGRP
) ? "x" : "-",
7447 (vap
->va_mode
& S_IROTH
) ? "r" : "-",
7448 (vap
->va_mode
& S_IWOTH
) ? "w" : "-",
7449 (vap
->va_mode
& S_IXOTH
) ? "x" : "-",
7450 kauth_cred_getuid(vcp
->ctx
->vc_ucred
),
7451 on_dir
? vcp
->dvap
->va_uid
: vcp
->vap
->va_uid
,
7452 on_dir
? vcp
->dvap
->va_gid
: vcp
->vap
->va_gid
);
7457 * Authorize the deletion of the node vp from the directory dvp.
7460 * - Neither the node nor the directory are immutable.
7461 * - The user is not the superuser.
7463 * The precedence of factors for authorizing or denying delete for a credential
7465 * 1) Explicit ACE on the node. (allow or deny DELETE)
7466 * 2) Explicit ACE on the directory (allow or deny DELETE_CHILD).
7468 * If there are conflicting ACEs on the node and the directory, the node
7471 * 3) Sticky bit on the directory.
7472 * Deletion is not permitted if the directory is sticky and the caller is
7473 * not owner of the node or directory. The sticky bit rules are like a deny
7474 * delete ACE except lower in priority than ACL's either allowing or denying
7477 * 4) POSIX permisions on the directory.
7479 * As an optimization, we cache whether or not delete child is permitted
7480 * on directories. This enables us to skip directory ACL and POSIX checks
7481 * as we already have the result from those checks. However, we always check the
7482 * node ACL and, if the directory has the sticky bit set, we always check its
7483 * ACL (even for a directory with an authorized delete child). Furthermore,
7484 * caching the delete child authorization is independent of the sticky bit
7485 * being set as it is only applicable in determining whether the node can be
7489 vnode_authorize_delete(vauth_ctx vcp
, boolean_t cached_delete_child
)
7491 struct vnode_attr
*vap
= vcp
->vap
;
7492 struct vnode_attr
*dvap
= vcp
->dvap
;
7493 kauth_cred_t cred
= vcp
->ctx
->vc_ucred
;
7494 struct kauth_acl_eval eval
;
7495 int error
, ismember
;
7497 /* Check the ACL on the node first */
7498 if (VATTR_IS_NOT(vap
, va_acl
, NULL
)) {
7499 eval
.ae_requested
= KAUTH_VNODE_DELETE
;
7500 eval
.ae_acl
= &vap
->va_acl
->acl_ace
[0];
7501 eval
.ae_count
= vap
->va_acl
->acl_entrycount
;
7502 eval
.ae_options
= 0;
7503 if (vauth_file_owner(vcp
)) {
7504 eval
.ae_options
|= KAUTH_AEVAL_IS_OWNER
;
7507 * We use ENOENT as a marker to indicate we could not get
7508 * information in order to delay evaluation until after we
7509 * have the ACL evaluation answer. Previously, we would
7510 * always deny the operation at this point.
7512 if ((error
= vauth_file_ingroup(vcp
, &ismember
, ENOENT
)) != 0 && error
!= ENOENT
) {
7515 if (error
== ENOENT
) {
7516 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP_UNKNOWN
;
7517 } else if (ismember
) {
7518 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP
;
7520 eval
.ae_exp_gall
= KAUTH_VNODE_GENERIC_ALL_BITS
;
7521 eval
.ae_exp_gread
= KAUTH_VNODE_GENERIC_READ_BITS
;
7522 eval
.ae_exp_gwrite
= KAUTH_VNODE_GENERIC_WRITE_BITS
;
7523 eval
.ae_exp_gexec
= KAUTH_VNODE_GENERIC_EXECUTE_BITS
;
7525 if ((error
= kauth_acl_evaluate(cred
, &eval
)) != 0) {
7526 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp
->vp
, error
);
7530 switch (eval
.ae_result
) {
7531 case KAUTH_RESULT_DENY
:
7532 KAUTH_DEBUG("%p DENIED - denied by ACL", vcp
->vp
);
7534 case KAUTH_RESULT_ALLOW
:
7535 KAUTH_DEBUG("%p ALLOWED - granted by ACL", vcp
->vp
);
7537 case KAUTH_RESULT_DEFER
:
7539 /* Defer to directory */
7540 KAUTH_DEBUG("%p DEFERRED - by file ACL", vcp
->vp
);
7546 * Without a sticky bit, a previously authorized delete child is
7547 * sufficient to authorize this delete.
7549 * If the sticky bit is set, a directory ACL which allows delete child
7550 * overrides a (potential) sticky bit deny. The authorized delete child
7551 * cannot tell us if it was authorized because of an explicit delete
7552 * child allow ACE or because of POSIX permisions so we have to check
7553 * the directory ACL everytime if the directory has a sticky bit.
7555 if (!(dvap
->va_mode
& S_ISTXT
) && cached_delete_child
) {
7556 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL or POSIX permissions and no sticky bit on directory", vcp
->vp
);
7560 /* check the ACL on the directory */
7561 if (VATTR_IS_NOT(dvap
, va_acl
, NULL
)) {
7562 eval
.ae_requested
= KAUTH_VNODE_DELETE_CHILD
;
7563 eval
.ae_acl
= &dvap
->va_acl
->acl_ace
[0];
7564 eval
.ae_count
= dvap
->va_acl
->acl_entrycount
;
7565 eval
.ae_options
= 0;
7566 if (vauth_dir_owner(vcp
)) {
7567 eval
.ae_options
|= KAUTH_AEVAL_IS_OWNER
;
7570 * We use ENOENT as a marker to indicate we could not get
7571 * information in order to delay evaluation until after we
7572 * have the ACL evaluation answer. Previously, we would
7573 * always deny the operation at this point.
7575 if ((error
= vauth_dir_ingroup(vcp
, &ismember
, ENOENT
)) != 0 && error
!= ENOENT
) {
7578 if (error
== ENOENT
) {
7579 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP_UNKNOWN
;
7580 } else if (ismember
) {
7581 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP
;
7583 eval
.ae_exp_gall
= KAUTH_VNODE_GENERIC_ALL_BITS
;
7584 eval
.ae_exp_gread
= KAUTH_VNODE_GENERIC_READ_BITS
;
7585 eval
.ae_exp_gwrite
= KAUTH_VNODE_GENERIC_WRITE_BITS
;
7586 eval
.ae_exp_gexec
= KAUTH_VNODE_GENERIC_EXECUTE_BITS
;
7589 * If there is no entry, we are going to defer to other
7590 * authorization mechanisms.
7592 error
= kauth_acl_evaluate(cred
, &eval
);
7595 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp
->vp
, error
);
7598 switch (eval
.ae_result
) {
7599 case KAUTH_RESULT_DENY
:
7600 KAUTH_DEBUG("%p DENIED - denied by directory ACL", vcp
->vp
);
7602 case KAUTH_RESULT_ALLOW
:
7603 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp
->vp
);
7604 if (!cached_delete_child
&& vcp
->dvp
) {
7605 vnode_cache_authorized_action(vcp
->dvp
,
7606 vcp
->ctx
, KAUTH_VNODE_DELETE_CHILD
);
7609 case KAUTH_RESULT_DEFER
:
7611 /* Deferred by directory ACL */
7612 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp
->vp
);
7618 * From this point, we can't explicitly allow and if we reach the end
7619 * of the function without a denial, then the delete is authorized.
7621 if (!cached_delete_child
) {
7622 if (vnode_authorize_posix(vcp
, VWRITE
, 1 /* on_dir */) != 0) {
7623 KAUTH_DEBUG("%p DENIED - denied by posix permisssions", vcp
->vp
);
7627 * Cache the authorized action on the vnode if allowed by the
7628 * directory ACL or POSIX permissions. It is correct to cache
7629 * this action even if sticky bit would deny deleting the node.
7632 vnode_cache_authorized_action(vcp
->dvp
, vcp
->ctx
,
7633 KAUTH_VNODE_DELETE_CHILD
);
7637 /* enforce sticky bit behaviour */
7638 if ((dvap
->va_mode
& S_ISTXT
) && !vauth_file_owner(vcp
) && !vauth_dir_owner(vcp
)) {
7639 KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)",
7640 vcp
->vp
, cred
->cr_posix
.cr_uid
, vap
->va_uid
, dvap
->va_uid
);
7644 /* not denied, must be OK */
7650 * Authorize an operation based on the node's attributes.
7653 vnode_authorize_simple(vauth_ctx vcp
, kauth_ace_rights_t acl_rights
, kauth_ace_rights_t preauth_rights
, boolean_t
*found_deny
)
7655 struct vnode_attr
*vap
= vcp
->vap
;
7656 kauth_cred_t cred
= vcp
->ctx
->vc_ucred
;
7657 struct kauth_acl_eval eval
;
7658 int error
, ismember
;
7659 mode_t posix_action
;
7662 * If we are the file owner, we automatically have some rights.
7664 * Do we need to expand this to support group ownership?
7666 if (vauth_file_owner(vcp
)) {
7667 acl_rights
&= ~(KAUTH_VNODE_WRITE_SECURITY
);
7671 * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can
7672 * mask the latter. If TAKE_OWNERSHIP is requested the caller is about to
7673 * change ownership to themselves, and WRITE_SECURITY is implicitly
7674 * granted to the owner. We need to do this because at this point
7675 * WRITE_SECURITY may not be granted as the caller is not currently
7678 if ((acl_rights
& KAUTH_VNODE_TAKE_OWNERSHIP
) &&
7679 (acl_rights
& KAUTH_VNODE_WRITE_SECURITY
)) {
7680 acl_rights
&= ~KAUTH_VNODE_WRITE_SECURITY
;
7683 if (acl_rights
== 0) {
7684 KAUTH_DEBUG("%p ALLOWED - implicit or no rights required", vcp
->vp
);
7688 /* if we have an ACL, evaluate it */
7689 if (VATTR_IS_NOT(vap
, va_acl
, NULL
)) {
7690 eval
.ae_requested
= acl_rights
;
7691 eval
.ae_acl
= &vap
->va_acl
->acl_ace
[0];
7692 eval
.ae_count
= vap
->va_acl
->acl_entrycount
;
7693 eval
.ae_options
= 0;
7694 if (vauth_file_owner(vcp
)) {
7695 eval
.ae_options
|= KAUTH_AEVAL_IS_OWNER
;
7698 * We use ENOENT as a marker to indicate we could not get
7699 * information in order to delay evaluation until after we
7700 * have the ACL evaluation answer. Previously, we would
7701 * always deny the operation at this point.
7703 if ((error
= vauth_file_ingroup(vcp
, &ismember
, ENOENT
)) != 0 && error
!= ENOENT
) {
7706 if (error
== ENOENT
) {
7707 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP_UNKNOWN
;
7708 } else if (ismember
) {
7709 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP
;
7711 eval
.ae_exp_gall
= KAUTH_VNODE_GENERIC_ALL_BITS
;
7712 eval
.ae_exp_gread
= KAUTH_VNODE_GENERIC_READ_BITS
;
7713 eval
.ae_exp_gwrite
= KAUTH_VNODE_GENERIC_WRITE_BITS
;
7714 eval
.ae_exp_gexec
= KAUTH_VNODE_GENERIC_EXECUTE_BITS
;
7716 if ((error
= kauth_acl_evaluate(cred
, &eval
)) != 0) {
7717 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp
->vp
, error
);
7721 switch (eval
.ae_result
) {
7722 case KAUTH_RESULT_DENY
:
7723 KAUTH_DEBUG("%p DENIED - by ACL", vcp
->vp
);
7724 return EACCES
; /* deny, deny, counter-allege */
7725 case KAUTH_RESULT_ALLOW
:
7726 KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp
->vp
);
7728 case KAUTH_RESULT_DEFER
:
7730 /* Effectively the same as !delete_child_denied */
7731 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp
->vp
);
7735 *found_deny
= eval
.ae_found_deny
;
7737 /* fall through and evaluate residual rights */
7739 /* no ACL, everything is residual */
7740 eval
.ae_residual
= acl_rights
;
7744 * Grant residual rights that have been pre-authorized.
7746 eval
.ae_residual
&= ~preauth_rights
;
7749 * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied.
7751 if (vauth_file_owner(vcp
)) {
7752 eval
.ae_residual
&= ~KAUTH_VNODE_WRITE_ATTRIBUTES
;
7755 if (eval
.ae_residual
== 0) {
7756 KAUTH_DEBUG("%p ALLOWED - rights already authorized", vcp
->vp
);
7761 * Bail if we have residual rights that can't be granted by posix permissions,
7762 * or aren't presumed granted at this point.
7764 * XXX these can be collapsed for performance
7766 if (eval
.ae_residual
& KAUTH_VNODE_CHANGE_OWNER
) {
7767 KAUTH_DEBUG("%p DENIED - CHANGE_OWNER not permitted", vcp
->vp
);
7770 if (eval
.ae_residual
& KAUTH_VNODE_WRITE_SECURITY
) {
7771 KAUTH_DEBUG("%p DENIED - WRITE_SECURITY not permitted", vcp
->vp
);
7776 if (eval
.ae_residual
& KAUTH_VNODE_DELETE
) {
7777 panic("vnode_authorize: can't be checking delete permission here");
7782 * Compute the fallback posix permissions that will satisfy the remaining
7786 if (eval
.ae_residual
& (KAUTH_VNODE_READ_DATA
|
7787 KAUTH_VNODE_LIST_DIRECTORY
|
7788 KAUTH_VNODE_READ_EXTATTRIBUTES
)) {
7789 posix_action
|= VREAD
;
7791 if (eval
.ae_residual
& (KAUTH_VNODE_WRITE_DATA
|
7792 KAUTH_VNODE_ADD_FILE
|
7793 KAUTH_VNODE_ADD_SUBDIRECTORY
|
7794 KAUTH_VNODE_DELETE_CHILD
|
7795 KAUTH_VNODE_WRITE_ATTRIBUTES
|
7796 KAUTH_VNODE_WRITE_EXTATTRIBUTES
)) {
7797 posix_action
|= VWRITE
;
7799 if (eval
.ae_residual
& (KAUTH_VNODE_EXECUTE
|
7800 KAUTH_VNODE_SEARCH
)) {
7801 posix_action
|= VEXEC
;
7804 if (posix_action
!= 0) {
7805 return vnode_authorize_posix(vcp
, posix_action
, 0 /* !on_dir */);
7807 KAUTH_DEBUG("%p ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping",
7809 (eval
.ae_residual
& KAUTH_VNODE_READ_DATA
)
7810 ? vnode_isdir(vcp
->vp
) ? " LIST_DIRECTORY" : " READ_DATA" : "",
7811 (eval
.ae_residual
& KAUTH_VNODE_WRITE_DATA
)
7812 ? vnode_isdir(vcp
->vp
) ? " ADD_FILE" : " WRITE_DATA" : "",
7813 (eval
.ae_residual
& KAUTH_VNODE_EXECUTE
)
7814 ? vnode_isdir(vcp
->vp
) ? " SEARCH" : " EXECUTE" : "",
7815 (eval
.ae_residual
& KAUTH_VNODE_DELETE
)
7817 (eval
.ae_residual
& KAUTH_VNODE_APPEND_DATA
)
7818 ? vnode_isdir(vcp
->vp
) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
7819 (eval
.ae_residual
& KAUTH_VNODE_DELETE_CHILD
)
7820 ? " DELETE_CHILD" : "",
7821 (eval
.ae_residual
& KAUTH_VNODE_READ_ATTRIBUTES
)
7822 ? " READ_ATTRIBUTES" : "",
7823 (eval
.ae_residual
& KAUTH_VNODE_WRITE_ATTRIBUTES
)
7824 ? " WRITE_ATTRIBUTES" : "",
7825 (eval
.ae_residual
& KAUTH_VNODE_READ_EXTATTRIBUTES
)
7826 ? " READ_EXTATTRIBUTES" : "",
7827 (eval
.ae_residual
& KAUTH_VNODE_WRITE_EXTATTRIBUTES
)
7828 ? " WRITE_EXTATTRIBUTES" : "",
7829 (eval
.ae_residual
& KAUTH_VNODE_READ_SECURITY
)
7830 ? " READ_SECURITY" : "",
7831 (eval
.ae_residual
& KAUTH_VNODE_WRITE_SECURITY
)
7832 ? " WRITE_SECURITY" : "",
7833 (eval
.ae_residual
& KAUTH_VNODE_CHECKIMMUTABLE
)
7834 ? " CHECKIMMUTABLE" : "",
7835 (eval
.ae_residual
& KAUTH_VNODE_CHANGE_OWNER
)
7836 ? " CHANGE_OWNER" : "");
7840 * Lack of required Posix permissions implies no reason to deny access.
7846 * Check for file immutability.
7849 vnode_authorize_checkimmutable(mount_t mp
, struct vnode_attr
*vap
, int rights
, int ignore
)
7855 * Perform immutability checks for operations that change data.
7857 * Sockets, fifos and devices require special handling.
7859 switch (vap
->va_type
) {
7865 * Writing to these nodes does not change the filesystem data,
7866 * so forget that it's being tried.
7868 rights
&= ~KAUTH_VNODE_WRITE_DATA
;
7875 if (rights
& KAUTH_VNODE_WRITE_RIGHTS
) {
7876 /* check per-filesystem options if possible */
7878 /* check for no-EA filesystems */
7879 if ((rights
& KAUTH_VNODE_WRITE_EXTATTRIBUTES
) &&
7880 (vfs_flags(mp
) & MNT_NOUSERXATTR
)) {
7881 KAUTH_DEBUG("%p DENIED - filesystem disallowed extended attributes", vap
);
7882 error
= EACCES
; /* User attributes disabled */
7888 * check for file immutability. first, check if the requested rights are
7889 * allowable for a UF_APPEND file.
7892 if (vap
->va_type
== VDIR
) {
7893 if ((rights
& (KAUTH_VNODE_ADD_FILE
| KAUTH_VNODE_ADD_SUBDIRECTORY
| KAUTH_VNODE_WRITE_EXTATTRIBUTES
)) == rights
) {
7897 if ((rights
& (KAUTH_VNODE_APPEND_DATA
| KAUTH_VNODE_WRITE_EXTATTRIBUTES
)) == rights
) {
7901 if ((error
= vnode_immutable(vap
, append
, ignore
)) != 0) {
7902 KAUTH_DEBUG("%p DENIED - file is immutable", vap
);
7911 * Handle authorization actions for filesystems that advertise that the
7912 * server will be enforcing.
7914 * Returns: 0 Authorization should be handled locally
7915 * 1 Authorization was handled by the FS
7917 * Note: Imputed returns will only occur if the authorization request
7918 * was handled by the FS.
7920 * Imputed: *resultp, modified Return code from FS when the request is
7921 * handled by the FS.
7926 vnode_authorize_opaque(vnode_t vp
, int *resultp
, kauth_action_t action
, vfs_context_t ctx
)
7931 * If the vp is a device node, socket or FIFO it actually represents a local
7932 * endpoint, so we need to handle it locally.
7934 switch (vp
->v_type
) {
7945 * In the advisory request case, if the filesystem doesn't think it's reliable
7946 * we will attempt to formulate a result ourselves based on VNOP_GETATTR data.
7948 if ((action
& KAUTH_VNODE_ACCESS
) && !vfs_authopaqueaccess(vp
->v_mount
)) {
7953 * Let the filesystem have a say in the matter. It's OK for it to not implemnent
7954 * VNOP_ACCESS, as most will authorise inline with the actual request.
7956 if ((error
= VNOP_ACCESS(vp
, action
, ctx
)) != ENOTSUP
) {
7958 KAUTH_DEBUG("%p DENIED - opaque filesystem VNOP_ACCESS denied access", vp
);
7963 * Typically opaque filesystems do authorisation in-line, but exec is a special case. In
7964 * order to be reasonably sure that exec will be permitted, we try a bit harder here.
7966 if ((action
& KAUTH_VNODE_EXECUTE
) && (vp
->v_type
== VREG
)) {
7967 /* try a VNOP_OPEN for readonly access */
7968 if ((error
= VNOP_OPEN(vp
, FREAD
, ctx
)) != 0) {
7970 KAUTH_DEBUG("%p DENIED - EXECUTE denied because file could not be opened readonly", vp
);
7973 VNOP_CLOSE(vp
, FREAD
, ctx
);
7977 * We don't have any reason to believe that the request has to be denied at this point,
7978 * so go ahead and allow it.
7981 KAUTH_DEBUG("%p ALLOWED - bypassing access check for non-local filesystem", vp
);
7989 * Returns: KAUTH_RESULT_ALLOW
7992 * Imputed: *arg3, modified Error code in the deny case
7993 * EROFS Read-only file system
7994 * EACCES Permission denied
7995 * EPERM Operation not permitted [no execute]
7996 * vnode_getattr:ENOMEM Not enough space [only if has filesec]
7998 * vnode_authorize_opaque:*arg2 ???
7999 * vnode_authorize_checkimmutable:???
8000 * vnode_authorize_delete:???
8001 * vnode_authorize_simple:???
8006 vnode_authorize_callback(__unused kauth_cred_t cred
, __unused
void *idata
,
8007 kauth_action_t action
, uintptr_t arg0
, uintptr_t arg1
, uintptr_t arg2
,
8011 vnode_t cvp
= NULLVP
;
8013 int result
= KAUTH_RESULT_DENY
;
8014 int parent_iocount
= 0;
8015 int parent_action
; /* In case we need to use namedstream's data fork for cached rights*/
8017 ctx
= (vfs_context_t
)arg0
;
8019 dvp
= (vnode_t
)arg2
;
8022 * if there are 2 vnodes passed in, we don't know at
8023 * this point which rights to look at based on the
8024 * combined action being passed in... defer until later...
8025 * otherwise check the kauth 'rights' cache hung
8026 * off of the vnode we're interested in... if we've already
8027 * been granted the right we're currently interested in,
8028 * we can just return success... otherwise we'll go through
8029 * the process of authorizing the requested right(s)... if that
8030 * succeeds, we'll add the right(s) to the cache.
8031 * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache
8040 * For named streams on local-authorization volumes, rights are cached on the parent;
8041 * authorization is determined by looking at the parent's properties anyway, so storing
8042 * on the parent means that we don't recompute for the named stream and that if
8043 * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the
8044 * stream to flush its cache separately. If we miss in the cache, then we authorize
8045 * as if there were no cached rights (passing the named stream vnode and desired rights to
8046 * vnode_authorize_callback_int()).
8048 * On an opaquely authorized volume, we don't know the relationship between the
8049 * data fork's properties and the rights granted on a stream. Thus, named stream vnodes
8050 * on such a volume are authorized directly (rather than using the parent) and have their
8051 * own caches. When a named stream vnode is created, we mark the parent as having a named
8052 * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we
8053 * find the stream and flush its cache.
8055 if (vnode_isnamedstream(vp
) && (!vfs_authopaque(vp
->v_mount
))) {
8056 cvp
= vnode_getparent(vp
);
8057 if (cvp
!= NULLVP
) {
8061 goto defer
; /* If we can't use the parent, take the slow path */
8064 /* Have to translate some actions */
8065 parent_action
= action
;
8066 if (parent_action
& KAUTH_VNODE_READ_DATA
) {
8067 parent_action
&= ~KAUTH_VNODE_READ_DATA
;
8068 parent_action
|= KAUTH_VNODE_READ_EXTATTRIBUTES
;
8070 if (parent_action
& KAUTH_VNODE_WRITE_DATA
) {
8071 parent_action
&= ~KAUTH_VNODE_WRITE_DATA
;
8072 parent_action
|= KAUTH_VNODE_WRITE_EXTATTRIBUTES
;
8079 if (vnode_cache_is_authorized(cvp
, ctx
, parent_iocount
? parent_action
: action
) == TRUE
) {
8080 result
= KAUTH_RESULT_ALLOW
;
8084 result
= vnode_authorize_callback_int(action
, ctx
, vp
, dvp
, (int *)arg3
);
8086 if (result
== KAUTH_RESULT_ALLOW
&& cvp
!= NULLVP
) {
8087 KAUTH_DEBUG("%p - caching action = %x", cvp
, action
);
8088 vnode_cache_authorized_action(cvp
, ctx
, action
);
8092 if (parent_iocount
) {
8100 vnode_attr_authorize_internal(vauth_ctx vcp
, mount_t mp
,
8101 kauth_ace_rights_t rights
, int is_suser
, boolean_t
*found_deny
,
8102 int noimmutable
, int parent_authorized_for_delete_child
)
8107 * Check for immutability.
8109 * In the deletion case, parent directory immutability vetoes specific
8112 if ((result
= vnode_authorize_checkimmutable(mp
, vcp
->vap
, rights
,
8113 noimmutable
)) != 0) {
8117 if ((rights
& KAUTH_VNODE_DELETE
) &&
8118 !parent_authorized_for_delete_child
) {
8119 result
= vnode_authorize_checkimmutable(mp
, vcp
->dvap
,
8120 KAUTH_VNODE_DELETE_CHILD
, 0);
8127 * Clear rights that have been authorized by reaching this point, bail if nothing left to
8130 rights
&= ~(KAUTH_VNODE_LINKTARGET
| KAUTH_VNODE_CHECKIMMUTABLE
);
8136 * If we're not the superuser, authorize based on file properties;
8137 * note that even if parent_authorized_for_delete_child is TRUE, we
8138 * need to check on the node itself.
8141 /* process delete rights */
8142 if ((rights
& KAUTH_VNODE_DELETE
) &&
8143 ((result
= vnode_authorize_delete(vcp
, parent_authorized_for_delete_child
)) != 0)) {
8147 /* process remaining rights */
8148 if ((rights
& ~KAUTH_VNODE_DELETE
) &&
8149 (result
= vnode_authorize_simple(vcp
, rights
, rights
& KAUTH_VNODE_DELETE
, found_deny
)) != 0) {
8154 * Execute is only granted to root if one of the x bits is set. This check only
8155 * makes sense if the posix mode bits are actually supported.
8157 if ((rights
& KAUTH_VNODE_EXECUTE
) &&
8158 (vcp
->vap
->va_type
== VREG
) &&
8159 VATTR_IS_SUPPORTED(vcp
->vap
, va_mode
) &&
8160 !(vcp
->vap
->va_mode
& (S_IXUSR
| S_IXGRP
| S_IXOTH
))) {
8162 KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vcp
, vcp
->vap
->va_mode
);
8166 /* Assume that there were DENYs so we don't wrongly cache KAUTH_VNODE_SEARCHBYANYONE */
8169 KAUTH_DEBUG("%p ALLOWED - caller is superuser", vcp
);
8176 vnode_authorize_callback_int(kauth_action_t action
, vfs_context_t ctx
,
8177 vnode_t vp
, vnode_t dvp
, int *errorp
)
8179 struct _vnode_authorize_context auth_context
;
8182 kauth_ace_rights_t rights
;
8183 struct vnode_attr va
, dva
;
8186 boolean_t parent_authorized_for_delete_child
= FALSE
;
8187 boolean_t found_deny
= FALSE
;
8188 boolean_t parent_ref
= FALSE
;
8189 boolean_t is_suser
= FALSE
;
8191 vcp
= &auth_context
;
8196 * Note that we authorize against the context, not the passed cred
8197 * (the same thing anyway)
8199 cred
= ctx
->vc_ucred
;
8206 vcp
->flags
= vcp
->flags_valid
= 0;
8209 if ((ctx
== NULL
) || (vp
== NULL
) || (cred
== NULL
)) {
8210 panic("vnode_authorize: bad arguments (context %p vp %p cred %p)", ctx
, vp
, cred
);
8214 KAUTH_DEBUG("%p AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)",
8215 vp
, vfs_context_proc(ctx
)->p_comm
,
8216 (action
& KAUTH_VNODE_ACCESS
) ? "access" : "auth",
8217 (action
& KAUTH_VNODE_READ_DATA
) ? vnode_isdir(vp
) ? " LIST_DIRECTORY" : " READ_DATA" : "",
8218 (action
& KAUTH_VNODE_WRITE_DATA
) ? vnode_isdir(vp
) ? " ADD_FILE" : " WRITE_DATA" : "",
8219 (action
& KAUTH_VNODE_EXECUTE
) ? vnode_isdir(vp
) ? " SEARCH" : " EXECUTE" : "",
8220 (action
& KAUTH_VNODE_DELETE
) ? " DELETE" : "",
8221 (action
& KAUTH_VNODE_APPEND_DATA
) ? vnode_isdir(vp
) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
8222 (action
& KAUTH_VNODE_DELETE_CHILD
) ? " DELETE_CHILD" : "",
8223 (action
& KAUTH_VNODE_READ_ATTRIBUTES
) ? " READ_ATTRIBUTES" : "",
8224 (action
& KAUTH_VNODE_WRITE_ATTRIBUTES
) ? " WRITE_ATTRIBUTES" : "",
8225 (action
& KAUTH_VNODE_READ_EXTATTRIBUTES
) ? " READ_EXTATTRIBUTES" : "",
8226 (action
& KAUTH_VNODE_WRITE_EXTATTRIBUTES
) ? " WRITE_EXTATTRIBUTES" : "",
8227 (action
& KAUTH_VNODE_READ_SECURITY
) ? " READ_SECURITY" : "",
8228 (action
& KAUTH_VNODE_WRITE_SECURITY
) ? " WRITE_SECURITY" : "",
8229 (action
& KAUTH_VNODE_CHANGE_OWNER
) ? " CHANGE_OWNER" : "",
8230 (action
& KAUTH_VNODE_NOIMMUTABLE
) ? " (noimmutable)" : "",
8231 vnode_isdir(vp
) ? "directory" : "file",
8232 vp
->v_name
? vp
->v_name
: "<NULL>", action
, vp
, dvp
);
8235 * Extract the control bits from the action, everything else is
8238 noimmutable
= (action
& KAUTH_VNODE_NOIMMUTABLE
) ? 1 : 0;
8239 rights
= action
& ~(KAUTH_VNODE_ACCESS
| KAUTH_VNODE_NOIMMUTABLE
);
8241 if (rights
& KAUTH_VNODE_DELETE
) {
8244 panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory");
8248 * check to see if we've already authorized the parent
8249 * directory for deletion of its children... if so, we
8250 * can skip a whole bunch of work... we will still have to
8251 * authorize that this specific child can be removed
8253 if (vnode_cache_is_authorized(dvp
, ctx
, KAUTH_VNODE_DELETE_CHILD
) == TRUE
) {
8254 parent_authorized_for_delete_child
= TRUE
;
8262 * Check for read-only filesystems.
8264 if ((rights
& KAUTH_VNODE_WRITE_RIGHTS
) &&
8265 (vp
->v_mount
->mnt_flag
& MNT_RDONLY
) &&
8266 ((vp
->v_type
== VREG
) || (vp
->v_type
== VDIR
) ||
8267 (vp
->v_type
== VLNK
) || (vp
->v_type
== VCPLX
) ||
8268 (rights
& KAUTH_VNODE_DELETE
) || (rights
& KAUTH_VNODE_DELETE_CHILD
))) {
8274 * Check for noexec filesystems.
8276 if ((rights
& KAUTH_VNODE_EXECUTE
) && (vp
->v_type
== VREG
) && (vp
->v_mount
->mnt_flag
& MNT_NOEXEC
)) {
8282 * Handle cases related to filesystems with non-local enforcement.
8283 * This call can return 0, in which case we will fall through to perform a
8284 * check based on VNOP_GETATTR data. Otherwise it returns 1 and sets
8285 * an appropriate result, at which point we can return immediately.
8287 if ((vp
->v_mount
->mnt_kern_flag
& MNTK_AUTH_OPAQUE
) && vnode_authorize_opaque(vp
, &result
, action
, ctx
)) {
8292 * If the vnode is a namedstream (extended attribute) data vnode (eg.
8293 * a resource fork), *_DATA becomes *_EXTATTRIBUTES.
8295 if (vnode_isnamedstream(vp
)) {
8296 if (rights
& KAUTH_VNODE_READ_DATA
) {
8297 rights
&= ~KAUTH_VNODE_READ_DATA
;
8298 rights
|= KAUTH_VNODE_READ_EXTATTRIBUTES
;
8300 if (rights
& KAUTH_VNODE_WRITE_DATA
) {
8301 rights
&= ~KAUTH_VNODE_WRITE_DATA
;
8302 rights
|= KAUTH_VNODE_WRITE_EXTATTRIBUTES
;
8306 * Point 'vp' to the namedstream's parent for ACL checking
8308 if ((vp
->v_parent
!= NULL
) &&
8309 (vget_internal(vp
->v_parent
, 0, VNODE_NODEAD
| VNODE_DRAINO
) == 0)) {
8311 vcp
->vp
= vp
= vp
->v_parent
;
8315 if (vfs_context_issuser(ctx
)) {
8317 * if we're not asking for execute permissions or modifications,
8318 * then we're done, this action is authorized.
8320 if (!(rights
& (KAUTH_VNODE_EXECUTE
| KAUTH_VNODE_WRITE_RIGHTS
))) {
8328 * Get vnode attributes and extended security information for the vnode
8329 * and directory if required.
8331 * If we're root we only want mode bits and flags for checking
8332 * execute and immutability.
8334 VATTR_WANTED(&va
, va_mode
);
8335 VATTR_WANTED(&va
, va_flags
);
8337 VATTR_WANTED(&va
, va_uid
);
8338 VATTR_WANTED(&va
, va_gid
);
8339 VATTR_WANTED(&va
, va_acl
);
8341 if ((result
= vnode_getattr(vp
, &va
, ctx
)) != 0) {
8342 KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp
, result
);
8345 VATTR_WANTED(&va
, va_type
);
8346 VATTR_RETURN(&va
, va_type
, vnode_vtype(vp
));
8349 VATTR_WANTED(&dva
, va_mode
);
8350 VATTR_WANTED(&dva
, va_flags
);
8352 VATTR_WANTED(&dva
, va_uid
);
8353 VATTR_WANTED(&dva
, va_gid
);
8354 VATTR_WANTED(&dva
, va_acl
);
8356 if ((result
= vnode_getattr(vcp
->dvp
, &dva
, ctx
)) != 0) {
8357 KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp
, result
);
8360 VATTR_WANTED(&dva
, va_type
);
8361 VATTR_RETURN(&dva
, va_type
, vnode_vtype(vcp
->dvp
));
8364 result
= vnode_attr_authorize_internal(vcp
, vp
->v_mount
, rights
, is_suser
,
8365 &found_deny
, noimmutable
, parent_authorized_for_delete_child
);
8367 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && (va
.va_acl
!= NULL
)) {
8368 kauth_acl_free(va
.va_acl
);
8370 if (VATTR_IS_SUPPORTED(&dva
, va_acl
) && (dva
.va_acl
!= NULL
)) {
8371 kauth_acl_free(dva
.va_acl
);
8379 KAUTH_DEBUG("%p DENIED - auth denied", vp
);
8380 return KAUTH_RESULT_DENY
;
8382 if ((rights
& KAUTH_VNODE_SEARCH
) && found_deny
== FALSE
&& vp
->v_type
== VDIR
) {
8384 * if we were successfully granted the right to search this directory
8385 * and there were NO ACL DENYs for search and the posix permissions also don't
8386 * deny execute, we can synthesize a global right that allows anyone to
8387 * traverse this directory during a pathname lookup without having to
8388 * match the credential associated with this cache of rights.
8390 * Note that we can correctly cache KAUTH_VNODE_SEARCHBYANYONE
8391 * only if we actually check ACLs which we don't for root. As
8392 * a workaround, the lookup fast path checks for root.
8394 if (!VATTR_IS_SUPPORTED(&va
, va_mode
) ||
8395 ((va
.va_mode
& (S_IXUSR
| S_IXGRP
| S_IXOTH
)) ==
8396 (S_IXUSR
| S_IXGRP
| S_IXOTH
))) {
8397 vnode_cache_authorized_action(vp
, ctx
, KAUTH_VNODE_SEARCHBYANYONE
);
8406 * Note that this implies that we will allow requests for no rights, as well as
8407 * for rights that we do not recognise. There should be none of these.
8409 KAUTH_DEBUG("%p ALLOWED - auth granted", vp
);
8410 return KAUTH_RESULT_ALLOW
;
8414 vnode_attr_authorize_init(struct vnode_attr
*vap
, struct vnode_attr
*dvap
,
8415 kauth_action_t action
, vfs_context_t ctx
)
8418 VATTR_WANTED(vap
, va_type
);
8419 VATTR_WANTED(vap
, va_mode
);
8420 VATTR_WANTED(vap
, va_flags
);
8423 if (action
& KAUTH_VNODE_DELETE
) {
8424 VATTR_WANTED(dvap
, va_type
);
8425 VATTR_WANTED(dvap
, va_mode
);
8426 VATTR_WANTED(dvap
, va_flags
);
8428 } else if (action
& KAUTH_VNODE_DELETE
) {
8432 if (!vfs_context_issuser(ctx
)) {
8433 VATTR_WANTED(vap
, va_uid
);
8434 VATTR_WANTED(vap
, va_gid
);
8435 VATTR_WANTED(vap
, va_acl
);
8436 if (dvap
&& (action
& KAUTH_VNODE_DELETE
)) {
8437 VATTR_WANTED(dvap
, va_uid
);
8438 VATTR_WANTED(dvap
, va_gid
);
8439 VATTR_WANTED(dvap
, va_acl
);
8447 vnode_attr_authorize(struct vnode_attr
*vap
, struct vnode_attr
*dvap
, mount_t mp
,
8448 kauth_action_t action
, vfs_context_t ctx
)
8450 struct _vnode_authorize_context auth_context
;
8452 kauth_ace_rights_t rights
;
8454 boolean_t found_deny
;
8455 boolean_t is_suser
= FALSE
;
8458 vcp
= &auth_context
;
8464 vcp
->flags
= vcp
->flags_valid
= 0;
8466 noimmutable
= (action
& KAUTH_VNODE_NOIMMUTABLE
) ? 1 : 0;
8467 rights
= action
& ~(KAUTH_VNODE_ACCESS
| KAUTH_VNODE_NOIMMUTABLE
);
8470 * Check for read-only filesystems.
8472 if ((rights
& KAUTH_VNODE_WRITE_RIGHTS
) &&
8473 mp
&& (mp
->mnt_flag
& MNT_RDONLY
) &&
8474 ((vap
->va_type
== VREG
) || (vap
->va_type
== VDIR
) ||
8475 (vap
->va_type
== VLNK
) || (rights
& KAUTH_VNODE_DELETE
) ||
8476 (rights
& KAUTH_VNODE_DELETE_CHILD
))) {
8482 * Check for noexec filesystems.
8484 if ((rights
& KAUTH_VNODE_EXECUTE
) &&
8485 (vap
->va_type
== VREG
) && mp
&& (mp
->mnt_flag
& MNT_NOEXEC
)) {
8490 if (vfs_context_issuser(ctx
)) {
8492 * if we're not asking for execute permissions or modifications,
8493 * then we're done, this action is authorized.
8495 if (!(rights
& (KAUTH_VNODE_EXECUTE
| KAUTH_VNODE_WRITE_RIGHTS
))) {
8500 if (!VATTR_IS_SUPPORTED(vap
, va_uid
) ||
8501 !VATTR_IS_SUPPORTED(vap
, va_gid
) ||
8502 (mp
&& vfs_extendedsecurity(mp
) && !VATTR_IS_SUPPORTED(vap
, va_acl
))) {
8503 panic("vnode attrs not complete for vnode_attr_authorize\n");
8507 result
= vnode_attr_authorize_internal(vcp
, mp
, rights
, is_suser
,
8508 &found_deny
, noimmutable
, FALSE
);
8510 if (result
== EPERM
) {
8519 vnode_authattr_new(vnode_t dvp
, struct vnode_attr
*vap
, int noauth
, vfs_context_t ctx
)
8521 return vnode_authattr_new_internal(dvp
, vap
, noauth
, NULL
, ctx
);
8525 * Check that the attribute information in vattr can be legally applied to
8526 * a new file by the context.
8529 vnode_authattr_new_internal(vnode_t dvp
, struct vnode_attr
*vap
, int noauth
, uint32_t *defaulted_fieldsp
, vfs_context_t ctx
)
8532 int has_priv_suser
, ismember
, defaulted_owner
, defaulted_group
, defaulted_mode
;
8533 uint32_t inherit_flags
;
8537 struct vnode_attr dva
;
8541 if (defaulted_fieldsp
) {
8542 *defaulted_fieldsp
= 0;
8545 defaulted_owner
= defaulted_group
= defaulted_mode
= 0;
8550 * Require that the filesystem support extended security to apply any.
8552 if (!vfs_extendedsecurity(dvp
->v_mount
) &&
8553 (VATTR_IS_ACTIVE(vap
, va_acl
) || VATTR_IS_ACTIVE(vap
, va_uuuid
) || VATTR_IS_ACTIVE(vap
, va_guuid
))) {
8559 * Default some fields.
8564 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit owner is set, that
8565 * owner takes ownership of all new files.
8567 if ((dmp
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) && (dmp
->mnt_fsowner
!= KAUTH_UID_NONE
)) {
8568 VATTR_SET(vap
, va_uid
, dmp
->mnt_fsowner
);
8569 defaulted_owner
= 1;
8571 if (!VATTR_IS_ACTIVE(vap
, va_uid
)) {
8572 /* default owner is current user */
8573 VATTR_SET(vap
, va_uid
, kauth_cred_getuid(vfs_context_ucred(ctx
)));
8574 defaulted_owner
= 1;
8579 * We need the dvp's va_flags and *may* need the gid of the directory,
8580 * we ask for both here.
8583 VATTR_WANTED(&dva
, va_gid
);
8584 VATTR_WANTED(&dva
, va_flags
);
8585 if ((error
= vnode_getattr(dvp
, &dva
, ctx
)) != 0) {
8590 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that
8591 * group takes ownership of all new files.
8593 if ((dmp
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) && (dmp
->mnt_fsgroup
!= KAUTH_GID_NONE
)) {
8594 VATTR_SET(vap
, va_gid
, dmp
->mnt_fsgroup
);
8595 defaulted_group
= 1;
8597 if (!VATTR_IS_ACTIVE(vap
, va_gid
)) {
8598 /* default group comes from parent object, fallback to current user */
8599 if (VATTR_IS_SUPPORTED(&dva
, va_gid
)) {
8600 VATTR_SET(vap
, va_gid
, dva
.va_gid
);
8602 VATTR_SET(vap
, va_gid
, kauth_cred_getgid(vfs_context_ucred(ctx
)));
8604 defaulted_group
= 1;
8608 if (!VATTR_IS_ACTIVE(vap
, va_flags
)) {
8609 VATTR_SET(vap
, va_flags
, 0);
8612 /* Determine if SF_RESTRICTED should be inherited from the parent
8614 if (VATTR_IS_SUPPORTED(&dva
, va_flags
)) {
8615 inherit_flags
= dva
.va_flags
& (UF_DATAVAULT
| SF_RESTRICTED
);
8618 /* default mode is everything, masked with current umask */
8619 if (!VATTR_IS_ACTIVE(vap
, va_mode
)) {
8620 VATTR_SET(vap
, va_mode
, ACCESSPERMS
& ~vfs_context_proc(ctx
)->p_fd
->fd_cmask
);
8621 KAUTH_DEBUG("ATTR - defaulting new file mode to %o from umask %o", vap
->va_mode
, vfs_context_proc(ctx
)->p_fd
->fd_cmask
);
8624 /* set timestamps to now */
8625 if (!VATTR_IS_ACTIVE(vap
, va_create_time
)) {
8626 nanotime(&vap
->va_create_time
);
8627 VATTR_SET_ACTIVE(vap
, va_create_time
);
8631 * Check for attempts to set nonsensical fields.
8633 if (vap
->va_active
& ~VNODE_ATTR_NEWOBJ
) {
8635 KAUTH_DEBUG("ATTR - ERROR - attempt to set unsupported new-file attributes %llx",
8636 vap
->va_active
& ~VNODE_ATTR_NEWOBJ
);
8641 * Quickly check for the applicability of any enforcement here.
8642 * Tests below maintain the integrity of the local security model.
8644 if (vfs_authopaque(dvp
->v_mount
)) {
8649 * We need to know if the caller is the superuser, or if the work is
8650 * otherwise already authorised.
8652 cred
= vfs_context_ucred(ctx
);
8654 /* doing work for the kernel */
8657 has_priv_suser
= vfs_context_issuser(ctx
);
8661 if (VATTR_IS_ACTIVE(vap
, va_flags
)) {
8662 vap
->va_flags
&= ~SF_SYNTHETIC
;
8663 if (has_priv_suser
) {
8664 if ((vap
->va_flags
& (UF_SETTABLE
| SF_SETTABLE
)) != vap
->va_flags
) {
8666 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
8670 if ((vap
->va_flags
& UF_SETTABLE
) != vap
->va_flags
) {
8672 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
8678 /* if not superuser, validate legality of new-item attributes */
8679 if (!has_priv_suser
) {
8680 if (!defaulted_mode
&& VATTR_IS_ACTIVE(vap
, va_mode
)) {
8682 if (vap
->va_mode
& S_ISGID
) {
8683 if ((error
= kauth_cred_ismember_gid(cred
, vap
->va_gid
, &ismember
)) != 0) {
8684 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error
, vap
->va_gid
);
8688 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", vap
->va_gid
);
8695 if ((vap
->va_mode
& S_ISUID
) && (vap
->va_uid
!= kauth_cred_getuid(cred
))) {
8696 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
8701 if (!defaulted_owner
&& (vap
->va_uid
!= kauth_cred_getuid(cred
))) {
8702 KAUTH_DEBUG(" DENIED - cannot create new item owned by %d", vap
->va_uid
);
8706 if (!defaulted_group
) {
8707 if ((error
= kauth_cred_ismember_gid(cred
, vap
->va_gid
, &ismember
)) != 0) {
8708 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error
, vap
->va_gid
);
8712 KAUTH_DEBUG(" DENIED - cannot create new item with group %d - not a member", vap
->va_gid
);
8718 /* initialising owner/group UUID */
8719 if (VATTR_IS_ACTIVE(vap
, va_uuuid
)) {
8720 if ((error
= kauth_cred_getguid(cred
, &changer
)) != 0) {
8721 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error
);
8722 /* XXX ENOENT here - no GUID - should perhaps become EPERM */
8725 if (!kauth_guid_equal(&vap
->va_uuuid
, &changer
)) {
8726 KAUTH_DEBUG(" ERROR - cannot create item with supplied owner UUID - not us");
8731 if (VATTR_IS_ACTIVE(vap
, va_guuid
)) {
8732 if ((error
= kauth_cred_ismember_guid(cred
, &vap
->va_guuid
, &ismember
)) != 0) {
8733 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error
);
8737 KAUTH_DEBUG(" ERROR - cannot create item with supplied group UUID - not a member");
8744 if (inherit_flags
) {
8745 /* Apply SF_RESTRICTED to the file if its parent directory was
8746 * restricted. This is done at the end so that root is not
8747 * required if this flag is only set due to inheritance. */
8748 VATTR_SET(vap
, va_flags
, (vap
->va_flags
| inherit_flags
));
8750 if (defaulted_fieldsp
) {
8751 if (defaulted_mode
) {
8752 *defaulted_fieldsp
|= VATTR_PREPARE_DEFAULTED_MODE
;
8754 if (defaulted_group
) {
8755 *defaulted_fieldsp
|= VATTR_PREPARE_DEFAULTED_GID
;
8757 if (defaulted_owner
) {
8758 *defaulted_fieldsp
|= VATTR_PREPARE_DEFAULTED_UID
;
8765 * Check that the attribute information in vap can be legally written by the
8768 * Call this when you're not sure about the vnode_attr; either its contents
8769 * have come from an unknown source, or when they are variable.
8771 * Returns errno, or zero and sets *actionp to the KAUTH_VNODE_* actions that
8772 * must be authorized to be permitted to write the vattr.
8775 vnode_authattr(vnode_t vp
, struct vnode_attr
*vap
, kauth_action_t
*actionp
, vfs_context_t ctx
)
8777 struct vnode_attr ova
;
8778 kauth_action_t required_action
;
8779 int error
, has_priv_suser
, ismember
, chowner
, chgroup
, clear_suid
, clear_sgid
;
8788 required_action
= 0;
8792 * Quickly check for enforcement applicability.
8794 if (vfs_authopaque(vp
->v_mount
)) {
8799 * Check for attempts to set nonsensical fields.
8801 if (vap
->va_active
& VNODE_ATTR_RDONLY
) {
8802 KAUTH_DEBUG("ATTR - ERROR: attempt to set readonly attribute(s)");
8808 * We need to know if the caller is the superuser.
8810 cred
= vfs_context_ucred(ctx
);
8811 has_priv_suser
= kauth_cred_issuser(cred
);
8814 * If any of the following are changing, we need information from the old file:
8821 if (VATTR_IS_ACTIVE(vap
, va_uid
) ||
8822 VATTR_IS_ACTIVE(vap
, va_gid
) ||
8823 VATTR_IS_ACTIVE(vap
, va_mode
) ||
8824 VATTR_IS_ACTIVE(vap
, va_uuuid
) ||
8825 VATTR_IS_ACTIVE(vap
, va_guuid
)) {
8826 VATTR_WANTED(&ova
, va_mode
);
8827 VATTR_WANTED(&ova
, va_uid
);
8828 VATTR_WANTED(&ova
, va_gid
);
8829 VATTR_WANTED(&ova
, va_uuuid
);
8830 VATTR_WANTED(&ova
, va_guuid
);
8831 KAUTH_DEBUG("ATTR - security information changing, fetching existing attributes");
8835 * If timestamps are being changed, we need to know who the file is owned
8838 if (VATTR_IS_ACTIVE(vap
, va_create_time
) ||
8839 VATTR_IS_ACTIVE(vap
, va_change_time
) ||
8840 VATTR_IS_ACTIVE(vap
, va_modify_time
) ||
8841 VATTR_IS_ACTIVE(vap
, va_access_time
) ||
8842 VATTR_IS_ACTIVE(vap
, va_backup_time
) ||
8843 VATTR_IS_ACTIVE(vap
, va_addedtime
)) {
8844 VATTR_WANTED(&ova
, va_uid
);
8845 #if 0 /* enable this when we support UUIDs as official owners */
8846 VATTR_WANTED(&ova
, va_uuuid
);
8848 KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID");
8852 * If flags are being changed, we need the old flags.
8854 if (VATTR_IS_ACTIVE(vap
, va_flags
)) {
8855 KAUTH_DEBUG("ATTR - flags changing, fetching old flags");
8856 VATTR_WANTED(&ova
, va_flags
);
8860 * If ACLs are being changed, we need the old ACLs.
8862 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
8863 KAUTH_DEBUG("ATTR - acl changing, fetching old flags");
8864 VATTR_WANTED(&ova
, va_acl
);
8868 * If the size is being set, make sure it's not a directory.
8870 if (VATTR_IS_ACTIVE(vap
, va_data_size
)) {
8871 /* size is only meaningful on regular files, don't permit otherwise */
8872 if (!vnode_isreg(vp
)) {
8873 KAUTH_DEBUG("ATTR - ERROR: size change requested on non-file");
8874 error
= vnode_isdir(vp
) ? EISDIR
: EINVAL
;
8882 KAUTH_DEBUG("ATTR - fetching old attributes %016llx", ova
.va_active
);
8883 if ((error
= vnode_getattr(vp
, &ova
, ctx
)) != 0) {
8884 KAUTH_DEBUG(" ERROR - got %d trying to get attributes", error
);
8889 * Size changes require write access to the file data.
8891 if (VATTR_IS_ACTIVE(vap
, va_data_size
)) {
8892 /* if we can't get the size, or it's different, we need write access */
8893 KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA");
8894 required_action
|= KAUTH_VNODE_WRITE_DATA
;
8898 * Changing timestamps?
8900 * Note that we are only called to authorize user-requested time changes;
8901 * side-effect time changes are not authorized. Authorisation is only
8902 * required for existing files.
8904 * Non-owners are not permitted to change the time on an existing
8905 * file to anything other than the current time.
8907 if (VATTR_IS_ACTIVE(vap
, va_create_time
) ||
8908 VATTR_IS_ACTIVE(vap
, va_change_time
) ||
8909 VATTR_IS_ACTIVE(vap
, va_modify_time
) ||
8910 VATTR_IS_ACTIVE(vap
, va_access_time
) ||
8911 VATTR_IS_ACTIVE(vap
, va_backup_time
) ||
8912 VATTR_IS_ACTIVE(vap
, va_addedtime
)) {
8914 * The owner and root may set any timestamps they like,
8915 * provided that the file is not immutable. The owner still needs
8916 * WRITE_ATTRIBUTES (implied by ownership but still deniable).
8918 if (has_priv_suser
|| vauth_node_owner(&ova
, cred
)) {
8919 KAUTH_DEBUG("ATTR - root or owner changing timestamps");
8920 required_action
|= KAUTH_VNODE_CHECKIMMUTABLE
| KAUTH_VNODE_WRITE_ATTRIBUTES
;
8922 /* just setting the current time? */
8923 if (vap
->va_vaflags
& VA_UTIMES_NULL
) {
8924 KAUTH_DEBUG("ATTR - non-root/owner changing timestamps, requiring WRITE_ATTRIBUTES");
8925 required_action
|= KAUTH_VNODE_WRITE_ATTRIBUTES
;
8927 KAUTH_DEBUG("ATTR - ERROR: illegal timestamp modification attempted");
8935 * Changing file mode?
8937 if (VATTR_IS_ACTIVE(vap
, va_mode
) && VATTR_IS_SUPPORTED(&ova
, va_mode
) && (ova
.va_mode
!= vap
->va_mode
)) {
8938 KAUTH_DEBUG("ATTR - mode change from %06o to %06o", ova
.va_mode
, vap
->va_mode
);
8941 * Mode changes always have the same basic auth requirements.
8943 if (has_priv_suser
) {
8944 KAUTH_DEBUG("ATTR - superuser mode change, requiring immutability check");
8945 required_action
|= KAUTH_VNODE_CHECKIMMUTABLE
;
8947 /* need WRITE_SECURITY */
8948 KAUTH_DEBUG("ATTR - non-superuser mode change, requiring WRITE_SECURITY");
8949 required_action
|= KAUTH_VNODE_WRITE_SECURITY
;
8953 * Can't set the setgid bit if you're not in the group and not root. Have to have
8954 * existing group information in the case we're not setting it right now.
8956 if (vap
->va_mode
& S_ISGID
) {
8957 required_action
|= KAUTH_VNODE_CHECKIMMUTABLE
; /* always required */
8958 if (!has_priv_suser
) {
8959 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
8960 group
= vap
->va_gid
;
8961 } else if (VATTR_IS_SUPPORTED(&ova
, va_gid
)) {
8964 KAUTH_DEBUG("ATTR - ERROR: setgid but no gid available");
8969 * This might be too restrictive; WRITE_SECURITY might be implied by
8970 * membership in this case, rather than being an additional requirement.
8972 if ((error
= kauth_cred_ismember_gid(cred
, group
, &ismember
)) != 0) {
8973 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error
, vap
->va_gid
);
8977 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", group
);
8985 * Can't set the setuid bit unless you're root or the file's owner.
8987 if (vap
->va_mode
& S_ISUID
) {
8988 required_action
|= KAUTH_VNODE_CHECKIMMUTABLE
; /* always required */
8989 if (!has_priv_suser
) {
8990 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
8991 owner
= vap
->va_uid
;
8992 } else if (VATTR_IS_SUPPORTED(&ova
, va_uid
)) {
8995 KAUTH_DEBUG("ATTR - ERROR: setuid but no uid available");
8999 if (owner
!= kauth_cred_getuid(cred
)) {
9001 * We could allow this if WRITE_SECURITY is permitted, perhaps.
9003 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
9012 * Validate/mask flags changes. This checks that only the flags in
9013 * the UF_SETTABLE mask are being set, and preserves the flags in
9014 * the SF_SETTABLE case.
9016 * Since flags changes may be made in conjunction with other changes,
9017 * we will ask the auth code to ignore immutability in the case that
9018 * the SF_* flags are not set and we are only manipulating the file flags.
9021 if (VATTR_IS_ACTIVE(vap
, va_flags
)) {
9022 /* compute changing flags bits */
9023 vap
->va_flags
&= ~SF_SYNTHETIC
;
9024 ova
.va_flags
&= ~SF_SYNTHETIC
;
9025 if (VATTR_IS_SUPPORTED(&ova
, va_flags
)) {
9026 fdelta
= vap
->va_flags
^ ova
.va_flags
;
9028 fdelta
= vap
->va_flags
;
9032 KAUTH_DEBUG("ATTR - flags changing, requiring WRITE_SECURITY");
9033 required_action
|= KAUTH_VNODE_WRITE_SECURITY
;
9035 /* check that changing bits are legal */
9036 if (has_priv_suser
) {
9038 * The immutability check will prevent us from clearing the SF_*
9039 * flags unless the system securelevel permits it, so just check
9040 * for legal flags here.
9042 if (fdelta
& ~(UF_SETTABLE
| SF_SETTABLE
)) {
9044 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
9048 if (fdelta
& ~UF_SETTABLE
) {
9050 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
9055 * If the caller has the ability to manipulate file flags,
9056 * security is not reduced by ignoring them for this operation.
9058 * A more complete test here would consider the 'after' states of the flags
9059 * to determine whether it would permit the operation, but this becomes
9062 * Ignoring immutability is conditional on securelevel; this does not bypass
9063 * the SF_* flags if securelevel > 0.
9065 required_action
|= KAUTH_VNODE_NOIMMUTABLE
;
9070 * Validate ownership information.
9079 * Note that if the filesystem didn't give us a UID, we expect that it doesn't
9080 * support them in general, and will ignore it if/when we try to set it.
9081 * We might want to clear the uid out of vap completely here.
9083 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
9084 if (VATTR_IS_SUPPORTED(&ova
, va_uid
) && (vap
->va_uid
!= ova
.va_uid
)) {
9085 if (!has_priv_suser
&& (kauth_cred_getuid(cred
) != vap
->va_uid
)) {
9086 KAUTH_DEBUG(" DENIED - non-superuser cannot change ownershipt to a third party");
9097 * Note that if the filesystem didn't give us a GID, we expect that it doesn't
9098 * support them in general, and will ignore it if/when we try to set it.
9099 * We might want to clear the gid out of vap completely here.
9101 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
9102 if (VATTR_IS_SUPPORTED(&ova
, va_gid
) && (vap
->va_gid
!= ova
.va_gid
)) {
9103 if (!has_priv_suser
) {
9104 if ((error
= kauth_cred_ismember_gid(cred
, vap
->va_gid
, &ismember
)) != 0) {
9105 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error
, vap
->va_gid
);
9109 KAUTH_DEBUG(" DENIED - group change from %d to %d but not a member of target group",
9110 ova
.va_gid
, vap
->va_gid
);
9121 * Owner UUID being set or changed.
9123 if (VATTR_IS_ACTIVE(vap
, va_uuuid
)) {
9124 /* if the owner UUID is not actually changing ... */
9125 if (VATTR_IS_SUPPORTED(&ova
, va_uuuid
)) {
9126 if (kauth_guid_equal(&vap
->va_uuuid
, &ova
.va_uuuid
)) {
9127 goto no_uuuid_change
;
9131 * If the current owner UUID is a null GUID, check
9132 * it against the UUID corresponding to the owner UID.
9134 if (kauth_guid_equal(&ova
.va_uuuid
, &kauth_null_guid
) &&
9135 VATTR_IS_SUPPORTED(&ova
, va_uid
)) {
9138 if (kauth_cred_uid2guid(ova
.va_uid
, &uid_guid
) == 0 &&
9139 kauth_guid_equal(&vap
->va_uuuid
, &uid_guid
)) {
9140 goto no_uuuid_change
;
9146 * The owner UUID cannot be set by a non-superuser to anything other than
9147 * their own or a null GUID (to "unset" the owner UUID).
9148 * Note that file systems must be prepared to handle the
9149 * null UUID case in a manner appropriate for that file
9152 if (!has_priv_suser
) {
9153 if ((error
= kauth_cred_getguid(cred
, &changer
)) != 0) {
9154 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error
);
9155 /* XXX ENOENT here - no UUID - should perhaps become EPERM */
9158 if (!kauth_guid_equal(&vap
->va_uuuid
, &changer
) &&
9159 !kauth_guid_equal(&vap
->va_uuuid
, &kauth_null_guid
)) {
9160 KAUTH_DEBUG(" ERROR - cannot set supplied owner UUID - not us / null");
9170 * Group UUID being set or changed.
9172 if (VATTR_IS_ACTIVE(vap
, va_guuid
)) {
9173 /* if the group UUID is not actually changing ... */
9174 if (VATTR_IS_SUPPORTED(&ova
, va_guuid
)) {
9175 if (kauth_guid_equal(&vap
->va_guuid
, &ova
.va_guuid
)) {
9176 goto no_guuid_change
;
9180 * If the current group UUID is a null UUID, check
9181 * it against the UUID corresponding to the group GID.
9183 if (kauth_guid_equal(&ova
.va_guuid
, &kauth_null_guid
) &&
9184 VATTR_IS_SUPPORTED(&ova
, va_gid
)) {
9187 if (kauth_cred_gid2guid(ova
.va_gid
, &gid_guid
) == 0 &&
9188 kauth_guid_equal(&vap
->va_guuid
, &gid_guid
)) {
9189 goto no_guuid_change
;
9195 * The group UUID cannot be set by a non-superuser to anything other than
9196 * one of which they are a member or a null GUID (to "unset"
9198 * Note that file systems must be prepared to handle the
9199 * null UUID case in a manner appropriate for that file
9202 if (!has_priv_suser
) {
9203 if (kauth_guid_equal(&vap
->va_guuid
, &kauth_null_guid
)) {
9205 } else if ((error
= kauth_cred_ismember_guid(cred
, &vap
->va_guuid
, &ismember
)) != 0) {
9206 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error
);
9210 KAUTH_DEBUG(" ERROR - cannot set supplied group UUID - not a member / null");
9220 * Compute authorisation for group/ownership changes.
9222 if (chowner
|| chgroup
|| clear_suid
|| clear_sgid
) {
9223 if (has_priv_suser
) {
9224 KAUTH_DEBUG("ATTR - superuser changing file owner/group, requiring immutability check");
9225 required_action
|= KAUTH_VNODE_CHECKIMMUTABLE
;
9228 KAUTH_DEBUG("ATTR - ownership change, requiring TAKE_OWNERSHIP");
9229 required_action
|= KAUTH_VNODE_TAKE_OWNERSHIP
;
9231 if (chgroup
&& !chowner
) {
9232 KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY");
9233 required_action
|= KAUTH_VNODE_WRITE_SECURITY
;
9238 * clear set-uid and set-gid bits. POSIX only requires this for
9239 * non-privileged processes but we do it even for root.
9241 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
9242 newmode
= vap
->va_mode
;
9243 } else if (VATTR_IS_SUPPORTED(&ova
, va_mode
)) {
9244 newmode
= ova
.va_mode
;
9246 KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits");
9250 /* chown always clears setuid/gid bits. An exception is made for
9251 * setattrlist which can set both at the same time: <uid, gid, mode> on a file:
9252 * setattrlist is allowed to set the new mode on the file and change (chown)
9255 if (newmode
& (S_ISUID
| S_ISGID
)) {
9256 if (!VATTR_IS_ACTIVE(vap
, va_mode
)) {
9257 KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o",
9258 newmode
, newmode
& ~(S_ISUID
| S_ISGID
));
9259 newmode
&= ~(S_ISUID
| S_ISGID
);
9261 VATTR_SET(vap
, va_mode
, newmode
);
9266 * Authorise changes in the ACL.
9268 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
9269 /* no existing ACL */
9270 if (!VATTR_IS_ACTIVE(&ova
, va_acl
) || (ova
.va_acl
== NULL
)) {
9272 if (vap
->va_acl
!= NULL
) {
9273 required_action
|= KAUTH_VNODE_WRITE_SECURITY
;
9274 KAUTH_DEBUG("CHMOD - adding ACL");
9277 /* removing an existing ACL */
9278 } else if (vap
->va_acl
== NULL
) {
9279 required_action
|= KAUTH_VNODE_WRITE_SECURITY
;
9280 KAUTH_DEBUG("CHMOD - removing ACL");
9282 /* updating an existing ACL */
9284 if (vap
->va_acl
->acl_entrycount
!= ova
.va_acl
->acl_entrycount
) {
9285 /* entry count changed, must be different */
9286 required_action
|= KAUTH_VNODE_WRITE_SECURITY
;
9287 KAUTH_DEBUG("CHMOD - adding/removing ACL entries");
9288 } else if (vap
->va_acl
->acl_entrycount
> 0) {
9289 /* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */
9290 if (memcmp(&vap
->va_acl
->acl_ace
[0], &ova
.va_acl
->acl_ace
[0],
9291 sizeof(struct kauth_ace
) * vap
->va_acl
->acl_entrycount
)) {
9292 required_action
|= KAUTH_VNODE_WRITE_SECURITY
;
9293 KAUTH_DEBUG("CHMOD - changing ACL entries");
9300 * Other attributes that require authorisation.
9302 if (VATTR_IS_ACTIVE(vap
, va_encoding
)) {
9303 required_action
|= KAUTH_VNODE_WRITE_ATTRIBUTES
;
9307 if (VATTR_IS_SUPPORTED(&ova
, va_acl
) && (ova
.va_acl
!= NULL
)) {
9308 kauth_acl_free(ova
.va_acl
);
9311 *actionp
= required_action
;
9317 setlocklocal_callback(struct vnode
*vp
, __unused
void *cargs
)
9319 vnode_lock_spin(vp
);
9320 vp
->v_flag
|= VLOCKLOCAL
;
9323 return VNODE_RETURNED
;
9327 vfs_setlocklocal(mount_t mp
)
9329 mount_lock_spin(mp
);
9330 mp
->mnt_kern_flag
|= MNTK_LOCK_LOCAL
;
9334 * The number of active vnodes is expected to be
9335 * very small when vfs_setlocklocal is invoked.
9337 vnode_iterate(mp
, 0, setlocklocal_callback
, NULL
);
9341 vfs_setcompoundopen(mount_t mp
)
9343 mount_lock_spin(mp
);
9344 mp
->mnt_compound_ops
|= COMPOUND_VNOP_OPEN
;
9349 vnode_setswapmount(vnode_t vp
)
9351 mount_lock(vp
->v_mount
);
9352 vp
->v_mount
->mnt_kern_flag
|= MNTK_SWAP_MOUNT
;
9353 mount_unlock(vp
->v_mount
);
9358 vnode_getswappin_avail(vnode_t vp
)
9360 int64_t max_swappin_avail
= 0;
9362 mount_lock(vp
->v_mount
);
9363 if (vp
->v_mount
->mnt_ioflags
& MNT_IOFLAGS_SWAPPIN_SUPPORTED
) {
9364 max_swappin_avail
= vp
->v_mount
->mnt_max_swappin_available
;
9366 mount_unlock(vp
->v_mount
);
9368 return max_swappin_avail
;
9373 vn_setunionwait(vnode_t vp
)
9375 vnode_lock_spin(vp
);
9376 vp
->v_flag
|= VISUNION
;
9382 vn_checkunionwait(vnode_t vp
)
9384 vnode_lock_spin(vp
);
9385 while ((vp
->v_flag
& VISUNION
) == VISUNION
) {
9386 msleep((caddr_t
)&vp
->v_flag
, &vp
->v_lock
, 0, 0, 0);
9392 vn_clearunionwait(vnode_t vp
, int locked
)
9395 vnode_lock_spin(vp
);
9397 if ((vp
->v_flag
& VISUNION
) == VISUNION
) {
9398 vp
->v_flag
&= ~VISUNION
;
9399 wakeup((caddr_t
)&vp
->v_flag
);
9407 vnode_materialize_dataless_file(vnode_t vp
, uint64_t op_type
)
9411 /* Swap files are special; ignore them */
9412 if (vnode_isswap(vp
)) {
9416 error
= resolve_nspace_item(vp
,
9417 op_type
| NAMESPACE_HANDLER_NSPACE_EVENT
);
9420 * The file resolver owns the logic about what error to return
9421 * to the caller. We only need to handle a couple of special
9424 if (error
== EJUSTRETURN
) {
9426 * The requesting process is allowed to interact with
9427 * dataless objects. Make a couple of sanity-checks
9428 * here to ensure the action makes sense.
9431 case NAMESPACE_HANDLER_WRITE_OP
:
9432 case NAMESPACE_HANDLER_TRUNCATE_OP
:
9433 case NAMESPACE_HANDLER_RENAME_OP
:
9435 * This handles the case of the resolver itself
9436 * writing data to the file (or throwing it
9441 case NAMESPACE_HANDLER_READ_OP
:
9443 * This handles the case of the resolver needing
9444 * to look up inside of a dataless directory while
9445 * it's in the process of materializing it (for
9446 * example, creating files or directories).
9448 error
= (vnode_vtype(vp
) == VDIR
) ? 0 : EBADF
;
9460 * Removes orphaned apple double files during a rmdir
9462 * 1. vnode_suspend().
9463 * 2. Call VNOP_READDIR() till the end of directory is reached.
9464 * 3. Check if the directory entries returned are regular files with name starting with "._". If not, return ENOTEMPTY.
9465 * 4. Continue (2) and (3) till end of directory is reached.
9466 * 5. If all the entries in the directory were files with "._" name, delete all the files.
9468 * 7. If deletion of all files succeeded, call VNOP_RMDIR() again.
9472 rmdir_remove_orphaned_appleDouble(vnode_t vp
, vfs_context_t ctx
, int * restart_flag
)
9474 #define UIO_BUFF_SIZE 2048
9476 int eofflag
, siz
= UIO_BUFF_SIZE
, nentries
= 0;
9477 int open_flag
= 0, full_erase_flag
= 0;
9478 char uio_buf
[UIO_SIZEOF(1)];
9485 error
= vnode_suspend(vp
);
9488 * restart_flag is set so that the calling rmdir sleeps and resets
9490 if (error
== EBUSY
) {
9498 * Prevent dataless fault materialization while we have
9499 * a suspended vnode.
9501 uthread_t ut
= get_bsdthread_info(current_thread());
9502 bool saved_nodatalessfaults
=
9503 (ut
->uu_flag
& UT_NSPACE_NODATALESSFAULTS
) ? true : false;
9504 ut
->uu_flag
|= UT_NSPACE_NODATALESSFAULTS
;
9509 MALLOC(rbuf
, caddr_t
, siz
, M_TEMP
, M_WAITOK
);
9511 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_READ
,
9512 &uio_buf
[0], sizeof(uio_buf
));
9514 if (!rbuf
|| !auio
) {
9519 uio_setoffset(auio
, 0);
9523 if ((error
= VNOP_OPEN(vp
, FREAD
, ctx
))) {
9530 * First pass checks if all files are appleDouble files.
9534 siz
= UIO_BUFF_SIZE
;
9535 uio_reset(auio
, uio_offset(auio
), UIO_SYSSPACE
, UIO_READ
);
9536 uio_addiov(auio
, CAST_USER_ADDR_T(rbuf
), UIO_BUFF_SIZE
);
9538 if ((error
= VNOP_READDIR(vp
, auio
, 0, &eofflag
, &nentries
, ctx
))) {
9542 if (uio_resid(auio
) != 0) {
9543 siz
-= uio_resid(auio
);
9547 * Iterate through directory
9549 dir_pos
= (void*) rbuf
;
9550 dir_end
= (void*) (rbuf
+ siz
);
9551 dp
= (struct dirent
*) (dir_pos
);
9553 if (dir_pos
== dir_end
) {
9557 while (dir_pos
< dir_end
) {
9559 * Check for . and .. as well as directories
9561 if (dp
->d_ino
!= 0 &&
9562 !((dp
->d_namlen
== 1 && dp
->d_name
[0] == '.') ||
9563 (dp
->d_namlen
== 2 && dp
->d_name
[0] == '.' && dp
->d_name
[1] == '.'))) {
9565 * Check for irregular files and ._ files
9566 * If there is a ._._ file abort the op
9568 if (dp
->d_namlen
< 2 ||
9569 strncmp(dp
->d_name
, "._", 2) ||
9570 (dp
->d_namlen
>= 4 && !strncmp(&(dp
->d_name
[2]), "._", 2))) {
9575 dir_pos
= (void*) ((uint8_t*)dir_pos
+ dp
->d_reclen
);
9576 dp
= (struct dirent
*)dir_pos
;
9580 * workaround for HFS/NFS setting eofflag before end of file
9582 if (vp
->v_tag
== VT_HFS
&& nentries
> 2) {
9586 if (vp
->v_tag
== VT_NFS
) {
9587 if (eofflag
&& !full_erase_flag
) {
9588 full_erase_flag
= 1;
9590 uio_reset(auio
, 0, UIO_SYSSPACE
, UIO_READ
);
9591 } else if (!eofflag
&& full_erase_flag
) {
9592 full_erase_flag
= 0;
9597 * If we've made it here all the files in the dir are ._ files.
9598 * We can delete the files even though the node is suspended
9599 * because we are the owner of the file.
9602 uio_reset(auio
, 0, UIO_SYSSPACE
, UIO_READ
);
9604 full_erase_flag
= 0;
9607 siz
= UIO_BUFF_SIZE
;
9608 uio_reset(auio
, uio_offset(auio
), UIO_SYSSPACE
, UIO_READ
);
9609 uio_addiov(auio
, CAST_USER_ADDR_T(rbuf
), UIO_BUFF_SIZE
);
9611 error
= VNOP_READDIR(vp
, auio
, 0, &eofflag
, &nentries
, ctx
);
9617 if (uio_resid(auio
) != 0) {
9618 siz
-= uio_resid(auio
);
9622 * Iterate through directory
9624 dir_pos
= (void*) rbuf
;
9625 dir_end
= (void*) (rbuf
+ siz
);
9626 dp
= (struct dirent
*) dir_pos
;
9628 if (dir_pos
== dir_end
) {
9632 while (dir_pos
< dir_end
) {
9634 * Check for . and .. as well as directories
9636 if (dp
->d_ino
!= 0 &&
9637 !((dp
->d_namlen
== 1 && dp
->d_name
[0] == '.') ||
9638 (dp
->d_namlen
== 2 && dp
->d_name
[0] == '.' && dp
->d_name
[1] == '.'))
9640 error
= unlink1(ctx
, vp
,
9641 CAST_USER_ADDR_T(dp
->d_name
), UIO_SYSSPACE
,
9642 VNODE_REMOVE_SKIP_NAMESPACE_EVENT
|
9643 VNODE_REMOVE_NO_AUDIT_PATH
);
9645 if (error
&& error
!= ENOENT
) {
9649 dir_pos
= (void*) ((uint8_t*)dir_pos
+ dp
->d_reclen
);
9650 dp
= (struct dirent
*)dir_pos
;
9654 * workaround for HFS/NFS setting eofflag before end of file
9656 if (vp
->v_tag
== VT_HFS
&& nentries
> 2) {
9660 if (vp
->v_tag
== VT_NFS
) {
9661 if (eofflag
&& !full_erase_flag
) {
9662 full_erase_flag
= 1;
9664 uio_reset(auio
, 0, UIO_SYSSPACE
, UIO_READ
);
9665 } else if (!eofflag
&& full_erase_flag
) {
9666 full_erase_flag
= 0;
9676 VNOP_CLOSE(vp
, FREAD
, ctx
);
9684 if (saved_nodatalessfaults
== false) {
9685 ut
->uu_flag
&= ~UT_NSPACE_NODATALESSFAULTS
;
9695 lock_vnode_and_post(vnode_t vp
, int kevent_num
)
9697 /* Only take the lock if there's something there! */
9698 if (vp
->v_knotes
.slh_first
!= NULL
) {
9700 KNOTE(&vp
->v_knotes
, kevent_num
);
9705 void panic_print_vnodes(void);
9707 /* define PANIC_PRINTS_VNODES only if investigation is required. */
9708 #ifdef PANIC_PRINTS_VNODES
9711 __vtype(uint16_t vtype
)
9740 * build a path from the bottom up
9741 * NOTE: called from the panic path - no alloc'ing of memory and no locks!
9744 __vpath(vnode_t vp
, char *str
, int len
, int depth
)
9753 /* str + len is the start of the string we created */
9758 /* follow mount vnodes to get the full path */
9759 if ((vp
->v_flag
& VROOT
)) {
9760 if (vp
->v_mount
!= NULL
&& vp
->v_mount
->mnt_vnodecovered
) {
9761 return __vpath(vp
->v_mount
->mnt_vnodecovered
,
9762 str
, len
, depth
+ 1);
9768 vnm_len
= strlen(src
);
9769 if (vnm_len
> len
) {
9770 /* truncate the name to fit in the string */
9771 src
+= (vnm_len
- len
);
9775 /* start from the back and copy just characters (no NULLs) */
9777 /* this will chop off leaf path (file) names */
9779 dst
= str
+ len
- vnm_len
;
9780 memcpy(dst
, src
, vnm_len
);
9786 if (vp
->v_parent
&& len
> 1) {
9787 /* follow parents up the chain */
9790 return __vpath(vp
->v_parent
, str
, len
, depth
+ 1);
9796 #define SANE_VNODE_PRINT_LIMIT 5000
9798 panic_print_vnodes(void)
9807 paniclog_append_noflush("\n***** VNODES *****\n"
9808 "TYPE UREF ICNT PATH\n");
9810 /* NULL-terminate the path name */
9811 vname
[sizeof(vname
) - 1] = '\0';
9814 * iterate all vnodelist items in all mounts (mntlist) -> mnt_vnodelist
9816 TAILQ_FOREACH(mnt
, &mountlist
, mnt_list
) {
9817 if (!ml_validate_nofault((vm_offset_t
)mnt
, sizeof(mount_t
))) {
9818 paniclog_append_noflush("Unable to iterate the mount list %p - encountered an invalid mount pointer %p \n",
9823 TAILQ_FOREACH(vp
, &mnt
->mnt_vnodelist
, v_mntvnodes
) {
9824 if (!ml_validate_nofault((vm_offset_t
)vp
, sizeof(vnode_t
))) {
9825 paniclog_append_noflush("Unable to iterate the vnode list %p - encountered an invalid vnode pointer %p \n",
9826 &mnt
->mnt_vnodelist
, vp
);
9830 if (++nvnodes
> SANE_VNODE_PRINT_LIMIT
) {
9833 type
= __vtype(vp
->v_type
);
9834 nm
= __vpath(vp
, vname
, sizeof(vname
) - 1, 0);
9835 paniclog_append_noflush("%s %0d %0d %s\n",
9836 type
, vp
->v_usecount
, vp
->v_iocount
, nm
);
9841 #else /* !PANIC_PRINTS_VNODES */
9843 panic_print_vnodes(void)
9852 record_vp(vnode_t vp
, int count
)
9857 if (vp
->v_resolve
) {
9861 if ((vp
->v_flag
& VSYSTEM
)) {
9865 ut
= get_bsdthread_info(current_thread());
9866 ut
->uu_iocount
+= count
;
9869 if (ut
->uu_vpindex
< 32) {
9870 OSBacktrace((void **)&ut
->uu_pcs
[ut
->uu_vpindex
][0], 10);
9872 ut
->uu_vps
[ut
->uu_vpindex
] = vp
;
9882 #define TRIG_DEBUG 0
9885 #define TRIG_LOG(...) do { printf("%s: ", __FUNCTION__); printf(__VA_ARGS__); } while (0)
9887 #define TRIG_LOG(...)
9891 * Resolver result functions
9895 vfs_resolver_result(uint32_t seq
, enum resolver_status stat
, int aux
)
9898 * |<--- 32 --->|<--- 28 --->|<- 4 ->|
9899 * sequence auxiliary status
9901 return (((uint64_t)seq
) << 32) |
9902 (((uint64_t)(aux
& 0x0fffffff)) << 4) |
9903 (uint64_t)(stat
& 0x0000000F);
9906 enum resolver_status
9907 vfs_resolver_status(resolver_result_t result
)
9909 /* lower 4 bits is status */
9910 return result
& 0x0000000F;
9914 vfs_resolver_sequence(resolver_result_t result
)
9916 /* upper 32 bits is sequence */
9917 return (uint32_t)(result
>> 32);
9921 vfs_resolver_auxiliary(resolver_result_t result
)
9923 /* 28 bits of auxiliary */
9924 return (int)(((uint32_t)(result
& 0xFFFFFFF0)) >> 4);
9929 * Call in for resolvers to update vnode trigger state
9932 vnode_trigger_update(vnode_t vp
, resolver_result_t result
)
9936 enum resolver_status stat
;
9938 if (vp
->v_resolve
== NULL
) {
9942 stat
= vfs_resolver_status(result
);
9943 seq
= vfs_resolver_sequence(result
);
9945 if ((stat
!= RESOLVER_RESOLVED
) && (stat
!= RESOLVER_UNRESOLVED
)) {
9950 lck_mtx_lock(&rp
->vr_lock
);
9952 if (seq
> rp
->vr_lastseq
) {
9953 if (stat
== RESOLVER_RESOLVED
) {
9954 rp
->vr_flags
|= VNT_RESOLVED
;
9956 rp
->vr_flags
&= ~VNT_RESOLVED
;
9959 rp
->vr_lastseq
= seq
;
9962 lck_mtx_unlock(&rp
->vr_lock
);
9968 vnode_resolver_attach(vnode_t vp
, vnode_resolve_t rp
, boolean_t ref
)
9972 vnode_lock_spin(vp
);
9973 if (vp
->v_resolve
!= NULL
) {
9982 error
= vnode_ref_ext(vp
, O_EVTONLY
, VNODE_REF_FORCE
);
9984 panic("VNODE_REF_FORCE didn't help...");
9992 * VFS internal interfaces for vnode triggers
9994 * vnode must already have an io count on entry
9995 * v_resolve is stable when io count is non-zero
9998 vnode_resolver_create(mount_t mp
, vnode_t vp
, struct vnode_trigger_param
*tinfo
, boolean_t external
)
10000 vnode_resolve_t rp
;
10005 /* minimum pointer test (debugging) */
10006 if (tinfo
->vnt_data
) {
10007 byte
= *((char *)tinfo
->vnt_data
);
10010 MALLOC(rp
, vnode_resolve_t
, sizeof(*rp
), M_TEMP
, M_WAITOK
);
10015 lck_mtx_init(&rp
->vr_lock
, trigger_vnode_lck_grp
, trigger_vnode_lck_attr
);
10017 rp
->vr_resolve_func
= tinfo
->vnt_resolve_func
;
10018 rp
->vr_unresolve_func
= tinfo
->vnt_unresolve_func
;
10019 rp
->vr_rearm_func
= tinfo
->vnt_rearm_func
;
10020 rp
->vr_reclaim_func
= tinfo
->vnt_reclaim_func
;
10021 rp
->vr_data
= tinfo
->vnt_data
;
10022 rp
->vr_lastseq
= 0;
10023 rp
->vr_flags
= tinfo
->vnt_flags
& VNT_VALID_MASK
;
10025 rp
->vr_flags
|= VNT_EXTERNAL
;
10028 result
= vnode_resolver_attach(vp
, rp
, external
);
10034 OSAddAtomic(1, &mp
->mnt_numtriggers
);
10045 vnode_resolver_release(vnode_resolve_t rp
)
10048 * Give them a chance to free any private data
10050 if (rp
->vr_data
&& rp
->vr_reclaim_func
) {
10051 rp
->vr_reclaim_func(NULLVP
, rp
->vr_data
);
10054 lck_mtx_destroy(&rp
->vr_lock
, trigger_vnode_lck_grp
);
10058 /* Called after the vnode has been drained */
10060 vnode_resolver_detach(vnode_t vp
)
10062 vnode_resolve_t rp
;
10065 mp
= vnode_mount(vp
);
10068 rp
= vp
->v_resolve
;
10069 vp
->v_resolve
= NULL
;
10072 if ((rp
->vr_flags
& VNT_EXTERNAL
) != 0) {
10073 vnode_rele_ext(vp
, O_EVTONLY
, 1);
10076 vnode_resolver_release(rp
);
10078 /* Keep count of active trigger vnodes per mount */
10079 OSAddAtomic(-1, &mp
->mnt_numtriggers
);
10084 vnode_trigger_rearm(vnode_t vp
, vfs_context_t ctx
)
10086 vnode_resolve_t rp
;
10087 resolver_result_t result
;
10088 enum resolver_status status
;
10091 if ((vp
->v_resolve
== NULL
) ||
10092 (vp
->v_resolve
->vr_rearm_func
== NULL
) ||
10093 (vp
->v_resolve
->vr_flags
& VNT_AUTO_REARM
) == 0) {
10097 rp
= vp
->v_resolve
;
10098 lck_mtx_lock(&rp
->vr_lock
);
10101 * Check if VFS initiated this unmount. If so, we'll catch it after the unresolve completes.
10103 if (rp
->vr_flags
& VNT_VFS_UNMOUNTED
) {
10104 lck_mtx_unlock(&rp
->vr_lock
);
10108 /* Check if this vnode is already armed */
10109 if ((rp
->vr_flags
& VNT_RESOLVED
) == 0) {
10110 lck_mtx_unlock(&rp
->vr_lock
);
10114 lck_mtx_unlock(&rp
->vr_lock
);
10116 result
= rp
->vr_rearm_func(vp
, 0, rp
->vr_data
, ctx
);
10117 status
= vfs_resolver_status(result
);
10118 seq
= vfs_resolver_sequence(result
);
10120 lck_mtx_lock(&rp
->vr_lock
);
10121 if (seq
> rp
->vr_lastseq
) {
10122 if (status
== RESOLVER_UNRESOLVED
) {
10123 rp
->vr_flags
&= ~VNT_RESOLVED
;
10125 rp
->vr_lastseq
= seq
;
10127 lck_mtx_unlock(&rp
->vr_lock
);
10132 vnode_trigger_resolve(vnode_t vp
, struct nameidata
*ndp
, vfs_context_t ctx
)
10134 vnode_resolve_t rp
;
10135 enum path_operation op
;
10136 resolver_result_t result
;
10137 enum resolver_status status
;
10140 /* Only trigger on topmost vnodes */
10141 if ((vp
->v_resolve
== NULL
) ||
10142 (vp
->v_resolve
->vr_resolve_func
== NULL
) ||
10143 (vp
->v_mountedhere
!= NULL
)) {
10147 rp
= vp
->v_resolve
;
10148 lck_mtx_lock(&rp
->vr_lock
);
10150 /* Check if this vnode is already resolved */
10151 if (rp
->vr_flags
& VNT_RESOLVED
) {
10152 lck_mtx_unlock(&rp
->vr_lock
);
10156 lck_mtx_unlock(&rp
->vr_lock
);
10159 if ((rp
->vr_flags
& VNT_KERN_RESOLVE
) == 0) {
10161 * VNT_KERN_RESOLVE indicates this trigger has no parameters
10162 * at the discression of the accessing process other than
10163 * the act of access. All other triggers must be checked
10165 int rv
= mac_vnode_check_trigger_resolve(ctx
, vp
, &ndp
->ni_cnd
);
10174 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
10175 * is there anyway to know this???
10176 * there can also be other legitimate lookups in parallel
10178 * XXX - should we call this on a separate thread with a timeout?
10180 * XXX - should we use ISLASTCN to pick the op value??? Perhaps only leafs should
10181 * get the richer set and non-leafs should get generic OP_LOOKUP? TBD
10183 op
= (ndp
->ni_op
< OP_MAXOP
) ? ndp
->ni_op
: OP_LOOKUP
;
10185 result
= rp
->vr_resolve_func(vp
, &ndp
->ni_cnd
, op
, 0, rp
->vr_data
, ctx
);
10186 status
= vfs_resolver_status(result
);
10187 seq
= vfs_resolver_sequence(result
);
10189 lck_mtx_lock(&rp
->vr_lock
);
10190 if (seq
> rp
->vr_lastseq
) {
10191 if (status
== RESOLVER_RESOLVED
) {
10192 rp
->vr_flags
|= VNT_RESOLVED
;
10194 rp
->vr_lastseq
= seq
;
10196 lck_mtx_unlock(&rp
->vr_lock
);
10198 /* On resolver errors, propagate the error back up */
10199 return status
== RESOLVER_ERROR
? vfs_resolver_auxiliary(result
) : 0;
10203 vnode_trigger_unresolve(vnode_t vp
, int flags
, vfs_context_t ctx
)
10205 vnode_resolve_t rp
;
10206 resolver_result_t result
;
10207 enum resolver_status status
;
10210 if ((vp
->v_resolve
== NULL
) || (vp
->v_resolve
->vr_unresolve_func
== NULL
)) {
10214 rp
= vp
->v_resolve
;
10215 lck_mtx_lock(&rp
->vr_lock
);
10217 /* Check if this vnode is already resolved */
10218 if ((rp
->vr_flags
& VNT_RESOLVED
) == 0) {
10219 printf("vnode_trigger_unresolve: not currently resolved\n");
10220 lck_mtx_unlock(&rp
->vr_lock
);
10224 rp
->vr_flags
|= VNT_VFS_UNMOUNTED
;
10226 lck_mtx_unlock(&rp
->vr_lock
);
10230 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
10231 * there can also be other legitimate lookups in parallel
10233 * XXX - should we call this on a separate thread with a timeout?
10236 result
= rp
->vr_unresolve_func(vp
, flags
, rp
->vr_data
, ctx
);
10237 status
= vfs_resolver_status(result
);
10238 seq
= vfs_resolver_sequence(result
);
10240 lck_mtx_lock(&rp
->vr_lock
);
10241 if (seq
> rp
->vr_lastseq
) {
10242 if (status
== RESOLVER_UNRESOLVED
) {
10243 rp
->vr_flags
&= ~VNT_RESOLVED
;
10245 rp
->vr_lastseq
= seq
;
10247 rp
->vr_flags
&= ~VNT_VFS_UNMOUNTED
;
10248 lck_mtx_unlock(&rp
->vr_lock
);
10250 /* On resolver errors, propagate the error back up */
10251 return status
== RESOLVER_ERROR
? vfs_resolver_auxiliary(result
) : 0;
10255 triggerisdescendant(mount_t mp
, mount_t rmp
)
10260 * walk up vnode covered chain looking for a match
10262 name_cache_lock_shared();
10267 /* did we encounter "/" ? */
10268 if (mp
->mnt_flag
& MNT_ROOTFS
) {
10272 vp
= mp
->mnt_vnodecovered
;
10273 if (vp
== NULLVP
) {
10284 name_cache_unlock();
10289 struct trigger_unmount_info
{
10292 vnode_t trigger_vp
;
10293 mount_t trigger_mp
;
10294 uint32_t trigger_vid
;
10299 trigger_unmount_callback(mount_t mp
, void * arg
)
10301 struct trigger_unmount_info
* infop
= (struct trigger_unmount_info
*)arg
;
10302 boolean_t mountedtrigger
= FALSE
;
10305 * When we encounter the top level mount we're done
10307 if (mp
== infop
->top_mp
) {
10308 return VFS_RETURNED_DONE
;
10311 if ((mp
->mnt_vnodecovered
== NULL
) ||
10312 (vnode_getwithref(mp
->mnt_vnodecovered
) != 0)) {
10313 return VFS_RETURNED
;
10316 if ((mp
->mnt_vnodecovered
->v_mountedhere
== mp
) &&
10317 (mp
->mnt_vnodecovered
->v_resolve
!= NULL
) &&
10318 (mp
->mnt_vnodecovered
->v_resolve
->vr_flags
& VNT_RESOLVED
)) {
10319 mountedtrigger
= TRUE
;
10321 vnode_put(mp
->mnt_vnodecovered
);
10324 * When we encounter a mounted trigger, check if its under the top level mount
10326 if (!mountedtrigger
|| !triggerisdescendant(mp
, infop
->top_mp
)) {
10327 return VFS_RETURNED
;
10331 * Process any pending nested mount (now that its not referenced)
10333 if ((infop
->trigger_vp
!= NULLVP
) &&
10334 (vnode_getwithvid(infop
->trigger_vp
, infop
->trigger_vid
) == 0)) {
10335 vnode_t vp
= infop
->trigger_vp
;
10338 infop
->trigger_vp
= NULLVP
;
10340 if (mp
== vp
->v_mountedhere
) {
10342 printf("trigger_unmount_callback: unexpected match '%s'\n",
10343 mp
->mnt_vfsstat
.f_mntonname
);
10344 return VFS_RETURNED
;
10346 if (infop
->trigger_mp
!= vp
->v_mountedhere
) {
10348 printf("trigger_unmount_callback: trigger mnt changed! (%p != %p)\n",
10349 infop
->trigger_mp
, vp
->v_mountedhere
);
10353 error
= vnode_trigger_unresolve(vp
, infop
->flags
, infop
->ctx
);
10356 printf("unresolving: '%s', err %d\n",
10357 vp
->v_mountedhere
? vp
->v_mountedhere
->mnt_vfsstat
.f_mntonname
:
10359 return VFS_RETURNED_DONE
; /* stop iteration on errors */
10364 * We can't call resolver here since we hold a mount iter
10365 * ref on mp so save its covered vp for later processing
10367 infop
->trigger_vp
= mp
->mnt_vnodecovered
;
10368 if ((infop
->trigger_vp
!= NULLVP
) &&
10369 (vnode_getwithref(infop
->trigger_vp
) == 0)) {
10370 if (infop
->trigger_vp
->v_mountedhere
== mp
) {
10371 infop
->trigger_vid
= infop
->trigger_vp
->v_id
;
10372 infop
->trigger_mp
= mp
;
10374 vnode_put(infop
->trigger_vp
);
10377 return VFS_RETURNED
;
10381 * Attempt to unmount any trigger mounts nested underneath a mount.
10382 * This is a best effort attempt and no retries are performed here.
10384 * Note: mp->mnt_rwlock is held exclusively on entry (so be carefull)
10388 vfs_nested_trigger_unmounts(mount_t mp
, int flags
, vfs_context_t ctx
)
10390 struct trigger_unmount_info info
;
10392 /* Must have trigger vnodes */
10393 if (mp
->mnt_numtriggers
== 0) {
10396 /* Avoid recursive requests (by checking covered vnode) */
10397 if ((mp
->mnt_vnodecovered
!= NULL
) &&
10398 (vnode_getwithref(mp
->mnt_vnodecovered
) == 0)) {
10399 boolean_t recursive
= FALSE
;
10401 if ((mp
->mnt_vnodecovered
->v_mountedhere
== mp
) &&
10402 (mp
->mnt_vnodecovered
->v_resolve
!= NULL
) &&
10403 (mp
->mnt_vnodecovered
->v_resolve
->vr_flags
& VNT_VFS_UNMOUNTED
)) {
10406 vnode_put(mp
->mnt_vnodecovered
);
10413 * Attempt to unmount any nested trigger mounts (best effort)
10417 info
.trigger_vp
= NULLVP
;
10418 info
.trigger_vid
= 0;
10419 info
.trigger_mp
= NULL
;
10420 info
.flags
= flags
;
10422 (void) vfs_iterate(VFS_ITERATE_TAIL_FIRST
, trigger_unmount_callback
, &info
);
10425 * Process remaining nested mount (now that its not referenced)
10427 if ((info
.trigger_vp
!= NULLVP
) &&
10428 (vnode_getwithvid(info
.trigger_vp
, info
.trigger_vid
) == 0)) {
10429 vnode_t vp
= info
.trigger_vp
;
10431 if (info
.trigger_mp
== vp
->v_mountedhere
) {
10432 (void) vnode_trigger_unresolve(vp
, flags
, ctx
);
10439 vfs_addtrigger(mount_t mp
, const char *relpath
, struct vnode_trigger_info
*vtip
, vfs_context_t ctx
)
10441 struct nameidata nd
;
10444 struct vnode_trigger_param vtp
;
10447 * Must be called for trigger callback, wherein rwlock is held
10449 lck_rw_assert(&mp
->mnt_rwlock
, LCK_RW_ASSERT_HELD
);
10451 TRIG_LOG("Adding trigger at %s\n", relpath
);
10452 TRIG_LOG("Trying VFS_ROOT\n");
10455 * We do a lookup starting at the root of the mountpoint, unwilling
10456 * to cross into other mountpoints.
10458 res
= VFS_ROOT(mp
, &rvp
, ctx
);
10463 TRIG_LOG("Trying namei\n");
10465 NDINIT(&nd
, LOOKUP
, OP_LOOKUP
, USEDVP
| NOCROSSMOUNT
| FOLLOW
, UIO_SYSSPACE
,
10466 CAST_USER_ADDR_T(relpath
), ctx
);
10478 TRIG_LOG("Trying vnode_resolver_create()\n");
10481 * Set up blob. vnode_create() takes a larger structure
10482 * with creation info, and we needed something different
10483 * for this case. One needs to win, or we need to munge both;
10484 * vnode_create() wins.
10486 bzero(&vtp
, sizeof(vtp
));
10487 vtp
.vnt_resolve_func
= vtip
->vti_resolve_func
;
10488 vtp
.vnt_unresolve_func
= vtip
->vti_unresolve_func
;
10489 vtp
.vnt_rearm_func
= vtip
->vti_rearm_func
;
10490 vtp
.vnt_reclaim_func
= vtip
->vti_reclaim_func
;
10491 vtp
.vnt_reclaim_func
= vtip
->vti_reclaim_func
;
10492 vtp
.vnt_data
= vtip
->vti_data
;
10493 vtp
.vnt_flags
= vtip
->vti_flags
;
10495 res
= vnode_resolver_create(mp
, vp
, &vtp
, TRUE
);
10498 TRIG_LOG("Returning %d\n", res
);
10502 #endif /* CONFIG_TRIGGERS */
10505 kdebug_vnode(vnode_t vp
)
10507 return VM_KERNEL_ADDRPERM(vp
);
10510 static int flush_cache_on_write
= 0;
10511 SYSCTL_INT(_kern
, OID_AUTO
, flush_cache_on_write
,
10512 CTLFLAG_RW
| CTLFLAG_LOCKED
, &flush_cache_on_write
, 0,
10513 "always flush the drive cache on writes to uncached files");
10516 vnode_should_flush_after_write(vnode_t vp
, int ioflag
)
10518 return flush_cache_on_write
10519 && (ISSET(ioflag
, IO_NOCACHE
) || vnode_isnocache(vp
));
10523 * sysctl for use by disk I/O tracing tools to get the list of existing
10527 struct vnode_trace_paths_context
{
10529 long path
[MAXPATHLEN
/ sizeof(long) + 1]; /* + 1 in case sizeof (long) does not divide MAXPATHLEN */
10533 vnode_trace_path_callback(struct vnode
*vp
, void *arg
)
10536 struct vnode_trace_paths_context
*ctx
;
10540 len
= sizeof(ctx
->path
);
10541 rv
= vn_getpath(vp
, (char *)ctx
->path
, &len
);
10542 /* vn_getpath() NUL-terminates, and len includes the NUL */
10545 kdebug_vfs_lookup(ctx
->path
, len
, vp
,
10546 KDBG_VFS_LOOKUP_FLAG_LOOKUP
| KDBG_VFS_LOOKUP_FLAG_NOPROCFILT
);
10548 if (++(ctx
->count
) == 1000) {
10549 thread_yield_to_preemption();
10554 return VNODE_RETURNED
;
10558 vfs_trace_paths_callback(mount_t mp
, void *arg
)
10560 if (mp
->mnt_flag
& MNT_LOCAL
) {
10561 vnode_iterate(mp
, VNODE_ITERATE_ALL
, vnode_trace_path_callback
, arg
);
10564 return VFS_RETURNED
;
10567 static int sysctl_vfs_trace_paths SYSCTL_HANDLER_ARGS
{
10568 struct vnode_trace_paths_context ctx
;
10575 if (!kauth_cred_issuser(kauth_cred_get())) {
10579 if (!kdebug_enable
|| !kdebug_debugid_enabled(VFS_LOOKUP
)) {
10583 bzero(&ctx
, sizeof(struct vnode_trace_paths_context
));
10585 vfs_iterate(0, vfs_trace_paths_callback
, &ctx
);
10590 SYSCTL_PROC(_vfs_generic
, OID_AUTO
, trace_paths
, CTLFLAG_RD
| CTLFLAG_LOCKED
| CTLFLAG_MASKED
, NULL
, 0, &sysctl_vfs_trace_paths
, "-", "trace_paths");