2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
76 * External virtual filesystem routines
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
87 #include <sys/vnode.h>
88 #include <sys/vnode_internal.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf_internal.h>
93 #include <sys/errno.h>
94 #include <sys/malloc.h>
95 #include <sys/uio_internal.h>
97 #include <sys/domain.h>
99 #include <sys/syslog.h>
100 #include <sys/ubc_internal.h>
102 #include <sys/sysctl.h>
103 #include <sys/filedesc.h>
104 #include <sys/event.h>
105 #include <sys/kdebug.h>
106 #include <sys/kauth.h>
107 #include <sys/user.h>
108 #include <sys/systm.h>
109 #include <sys/kern_memorystatus.h>
110 #include <sys/lockf.h>
111 #include <miscfs/fifofs/fifo.h>
114 #include <machine/spl.h>
117 #include <kern/assert.h>
118 #include <mach/kern_return.h>
119 #include <kern/thread.h>
120 #include <kern/sched_prim.h>
122 #include <miscfs/specfs/specdev.h>
124 #include <mach/mach_types.h>
125 #include <mach/memory_object_types.h>
126 #include <mach/memory_object_control.h>
128 #include <kern/kalloc.h> /* kalloc()/kfree() */
129 #include <kern/clock.h> /* delay_for_interval() */
130 #include <libkern/OSAtomic.h> /* OSAddAtomic() */
131 #include <console/video_console.h>
134 #include <libkern/OSDebug.h>
137 #include <vm/vm_protos.h> /* vnode_pager_vrele() */
140 #include <security/mac_framework.h>
143 #define PANIC_PRINTS_VNODES
145 extern lck_grp_t
*vnode_lck_grp
;
146 extern lck_attr_t
*vnode_lck_attr
;
149 extern lck_grp_t
*trigger_vnode_lck_grp
;
150 extern lck_attr_t
*trigger_vnode_lck_attr
;
153 extern lck_mtx_t
* mnt_list_mtx_lock
;
155 enum vtype iftovt_tab
[16] = {
156 VNON
, VFIFO
, VCHR
, VNON
, VDIR
, VNON
, VBLK
, VNON
,
157 VREG
, VNON
, VLNK
, VNON
, VSOCK
, VNON
, VNON
, VBAD
,
159 int vttoif_tab
[9] = {
160 0, S_IFREG
, S_IFDIR
, S_IFBLK
, S_IFCHR
, S_IFLNK
,
161 S_IFSOCK
, S_IFIFO
, S_IFMT
,
165 /* XXX These should be in a BSD accessible Mach header, but aren't. */
166 extern void memory_object_mark_used(
167 memory_object_control_t control
);
169 extern void memory_object_mark_unused(
170 memory_object_control_t control
,
173 extern void memory_object_mark_io_tracking(
174 memory_object_control_t control
);
176 /* XXX next protptype should be from <nfs/nfs.h> */
177 extern int nfs_vinvalbuf(vnode_t
, int, vfs_context_t
, int);
179 /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */
180 __private_extern__
void qsort(
184 int (*)(const void *, const void *));
186 extern kern_return_t
adjust_vm_object_cache(vm_size_t oval
, vm_size_t nval
);
187 __private_extern__
void vntblinit(void);
188 __private_extern__ kern_return_t
reset_vmobjectcache(unsigned int val1
,
190 __private_extern__
int unlink1(vfs_context_t
, struct nameidata
*, int);
192 extern int system_inshutdown
;
194 static void vnode_list_add(vnode_t
);
195 static void vnode_async_list_add(vnode_t
);
196 static void vnode_list_remove(vnode_t
);
197 static void vnode_list_remove_locked(vnode_t
);
199 static void vnode_abort_advlocks(vnode_t
);
200 static errno_t
vnode_drain(vnode_t
);
201 static void vgone(vnode_t
, int flags
);
202 static void vclean(vnode_t vp
, int flag
);
203 static void vnode_reclaim_internal(vnode_t
, int, int, int);
205 static void vnode_dropiocount (vnode_t
);
207 static vnode_t
checkalias(vnode_t vp
, dev_t nvp_rdev
);
208 static int vnode_reload(vnode_t
);
209 static int vnode_isinuse_locked(vnode_t
, int, int);
211 static int unmount_callback(mount_t
, __unused
void *);
213 static void insmntque(vnode_t vp
, mount_t mp
);
214 static int mount_getvfscnt(void);
215 static int mount_fillfsids(fsid_t
*, int );
216 static void vnode_iterate_setup(mount_t
);
217 int vnode_umount_preflight(mount_t
, vnode_t
, int);
218 static int vnode_iterate_prepare(mount_t
);
219 static int vnode_iterate_reloadq(mount_t
);
220 static void vnode_iterate_clear(mount_t
);
221 static mount_t
vfs_getvfs_locked(fsid_t
*);
222 static int vn_create_reg(vnode_t dvp
, vnode_t
*vpp
, struct nameidata
*ndp
,
223 struct vnode_attr
*vap
, uint32_t flags
, int fmode
, uint32_t *statusp
, vfs_context_t ctx
);
224 static int vnode_authattr_new_internal(vnode_t dvp
, struct vnode_attr
*vap
, int noauth
, uint32_t *defaulted_fieldsp
, vfs_context_t ctx
);
226 errno_t
rmdir_remove_orphaned_appleDouble(vnode_t
, vfs_context_t
, int *);
229 static void record_vp(vnode_t vp
, int count
);
233 static int vnode_resolver_create(mount_t
, vnode_t
, struct vnode_trigger_param
*, boolean_t external
);
234 static void vnode_resolver_detach(vnode_t
);
237 TAILQ_HEAD(freelst
, vnode
) vnode_free_list
; /* vnode free list */
238 TAILQ_HEAD(deadlst
, vnode
) vnode_dead_list
; /* vnode dead list */
239 TAILQ_HEAD(async_work_lst
, vnode
) vnode_async_work_list
;
242 TAILQ_HEAD(ragelst
, vnode
) vnode_rage_list
; /* vnode rapid age list */
243 struct timeval rage_tv
;
247 #define RAGE_LIMIT_MIN 100
248 #define RAGE_TIME_LIMIT 5
250 struct mntlist mountlist
; /* mounted filesystem list */
251 static int nummounts
= 0;
254 #define VLISTCHECK(fun, vp, list) \
255 if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \
256 panic("%s: %s vnode not on %slist", (fun), (list), (list));
258 #define VLISTCHECK(fun, vp, list)
259 #endif /* DIAGNOSTIC */
261 #define VLISTNONE(vp) \
263 (vp)->v_freelist.tqe_next = (struct vnode *)0; \
264 (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \
267 #define VONLIST(vp) \
268 ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
270 /* remove a vnode from free vnode list */
271 #define VREMFREE(fun, vp) \
273 VLISTCHECK((fun), (vp), "free"); \
274 TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \
280 /* remove a vnode from dead vnode list */
281 #define VREMDEAD(fun, vp) \
283 VLISTCHECK((fun), (vp), "dead"); \
284 TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \
286 vp->v_listflag &= ~VLIST_DEAD; \
291 /* remove a vnode from async work vnode list */
292 #define VREMASYNC_WORK(fun, vp) \
294 VLISTCHECK((fun), (vp), "async_work"); \
295 TAILQ_REMOVE(&vnode_async_work_list, (vp), v_freelist); \
297 vp->v_listflag &= ~VLIST_ASYNC_WORK; \
298 async_work_vnodes--; \
302 /* remove a vnode from rage vnode list */
303 #define VREMRAGE(fun, vp) \
305 if ( !(vp->v_listflag & VLIST_RAGE)) \
306 panic("VREMRAGE: vp not on rage list"); \
307 VLISTCHECK((fun), (vp), "rage"); \
308 TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \
310 vp->v_listflag &= ~VLIST_RAGE; \
316 * vnodetarget hasn't been used in a long time, but
317 * it was exported for some reason... I'm leaving in
318 * place for now... it should be deprecated out of the
319 * exports and removed eventually.
321 u_int32_t vnodetarget
; /* target for vnreclaim() */
322 #define VNODE_FREE_TARGET 20 /* Default value for vnodetarget */
325 * We need quite a few vnodes on the free list to sustain the
326 * rapid stat() the compilation process does, and still benefit from the name
327 * cache. Having too few vnodes on the free list causes serious disk
328 * thrashing as we cycle through them.
330 #define VNODE_FREE_MIN CONFIG_VNODE_FREE_MIN /* freelist should have at least this many */
333 static void async_work_continue(void);
336 * Initialize the vnode management data structures.
338 __private_extern__
void
341 thread_t thread
= THREAD_NULL
;
343 TAILQ_INIT(&vnode_free_list
);
344 TAILQ_INIT(&vnode_rage_list
);
345 TAILQ_INIT(&vnode_dead_list
);
346 TAILQ_INIT(&vnode_async_work_list
);
347 TAILQ_INIT(&mountlist
);
350 vnodetarget
= VNODE_FREE_TARGET
;
352 microuptime(&rage_tv
);
353 rage_limit
= desiredvnodes
/ 100;
355 if (rage_limit
< RAGE_LIMIT_MIN
)
356 rage_limit
= RAGE_LIMIT_MIN
;
359 * Scale the vm_object_cache to accomodate the vnodes
362 (void) adjust_vm_object_cache(0, desiredvnodes
- VNODE_FREE_MIN
);
365 * create worker threads
367 kernel_thread_start((thread_continue_t
)async_work_continue
, NULL
, &thread
);
368 thread_deallocate(thread
);
371 /* Reset the VM Object Cache with the values passed in */
372 __private_extern__ kern_return_t
373 reset_vmobjectcache(unsigned int val1
, unsigned int val2
)
375 vm_size_t oval
= val1
- VNODE_FREE_MIN
;
382 if(val2
< VNODE_FREE_MIN
)
385 nval
= val2
- VNODE_FREE_MIN
;
387 return(adjust_vm_object_cache(oval
, nval
));
391 /* the timeout is in 10 msecs */
393 vnode_waitforwrites(vnode_t vp
, int output_target
, int slpflag
, int slptimeout
, const char *msg
) {
397 KERNEL_DEBUG(0x3010280 | DBG_FUNC_START
, (int)vp
, output_target
, vp
->v_numoutput
, 0, 0);
399 if (vp
->v_numoutput
> output_target
) {
405 while ((vp
->v_numoutput
> output_target
) && error
== 0) {
407 vp
->v_flag
|= VTHROTTLED
;
409 vp
->v_flag
|= VBWAIT
;
411 ts
.tv_sec
= (slptimeout
/100);
412 ts
.tv_nsec
= (slptimeout
% 1000) * 10 * NSEC_PER_USEC
* 1000 ;
413 error
= msleep((caddr_t
)&vp
->v_numoutput
, &vp
->v_lock
, (slpflag
| (PRIBIO
+ 1)), msg
, &ts
);
419 KERNEL_DEBUG(0x3010280 | DBG_FUNC_END
, (int)vp
, output_target
, vp
->v_numoutput
, error
, 0);
426 vnode_startwrite(vnode_t vp
) {
428 OSAddAtomic(1, &vp
->v_numoutput
);
433 vnode_writedone(vnode_t vp
)
438 OSAddAtomic(-1, &vp
->v_numoutput
);
442 if (vp
->v_numoutput
< 0)
443 panic("vnode_writedone: numoutput < 0");
445 if ((vp
->v_flag
& VTHROTTLED
)) {
446 vp
->v_flag
&= ~VTHROTTLED
;
449 if ((vp
->v_flag
& VBWAIT
) && (vp
->v_numoutput
== 0)) {
450 vp
->v_flag
&= ~VBWAIT
;
456 wakeup((caddr_t
)&vp
->v_numoutput
);
463 vnode_hasdirtyblks(vnode_t vp
)
465 struct cl_writebehind
*wbp
;
468 * Not taking the buf_mtxp as there is little
469 * point doing it. Even if the lock is taken the
470 * state can change right after that. If their
471 * needs to be a synchronization, it must be driven
474 if (vp
->v_dirtyblkhd
.lh_first
)
477 if (!UBCINFOEXISTS(vp
))
480 wbp
= vp
->v_ubcinfo
->cl_wbehind
;
482 if (wbp
&& (wbp
->cl_number
|| wbp
->cl_scmap
))
489 vnode_hascleanblks(vnode_t vp
)
492 * Not taking the buf_mtxp as there is little
493 * point doing it. Even if the lock is taken the
494 * state can change right after that. If their
495 * needs to be a synchronization, it must be driven
498 if (vp
->v_cleanblkhd
.lh_first
)
504 vnode_iterate_setup(mount_t mp
)
506 while (mp
->mnt_lflag
& MNT_LITER
) {
507 mp
->mnt_lflag
|= MNT_LITERWAIT
;
508 msleep((caddr_t
)mp
, &mp
->mnt_mlock
, PVFS
, "vnode_iterate_setup", NULL
);
511 mp
->mnt_lflag
|= MNT_LITER
;
516 vnode_umount_preflight(mount_t mp
, vnode_t skipvp
, int flags
)
520 TAILQ_FOREACH(vp
, &mp
->mnt_vnodelist
, v_mntvnodes
) {
521 if (vp
->v_type
== VDIR
)
525 if ((flags
& SKIPSYSTEM
) && ((vp
->v_flag
& VSYSTEM
) || (vp
->v_flag
& VNOFLUSH
)))
527 if ((flags
& SKIPSWAP
) && (vp
->v_flag
& VSWAP
))
529 if ((flags
& WRITECLOSE
) && (vp
->v_writecount
== 0 || vp
->v_type
!= VREG
))
532 /* Look for busy vnode */
533 if ((vp
->v_usecount
!= 0) && ((vp
->v_usecount
- vp
->v_kusecount
) != 0)) {
536 } else if (vp
->v_iocount
> 0) {
537 /* Busy if iocount is > 0 for more than 3 seconds */
538 tsleep(&vp
->v_iocount
, PVFS
, "vnode_drain_network", 3 * hz
);
539 if (vp
->v_iocount
> 0)
549 * This routine prepares iteration by moving all the vnodes to worker queue
550 * called with mount lock held
553 vnode_iterate_prepare(mount_t mp
)
557 if (TAILQ_EMPTY(&mp
->mnt_vnodelist
)) {
562 vp
= TAILQ_FIRST(&mp
->mnt_vnodelist
);
563 vp
->v_mntvnodes
.tqe_prev
= &(mp
->mnt_workerqueue
.tqh_first
);
564 mp
->mnt_workerqueue
.tqh_first
= mp
->mnt_vnodelist
.tqh_first
;
565 mp
->mnt_workerqueue
.tqh_last
= mp
->mnt_vnodelist
.tqh_last
;
567 TAILQ_INIT(&mp
->mnt_vnodelist
);
568 if (mp
->mnt_newvnodes
.tqh_first
!= NULL
)
569 panic("vnode_iterate_prepare: newvnode when entering vnode");
570 TAILQ_INIT(&mp
->mnt_newvnodes
);
576 /* called with mount lock held */
578 vnode_iterate_reloadq(mount_t mp
)
582 /* add the remaining entries in workerq to the end of mount vnode list */
583 if (!TAILQ_EMPTY(&mp
->mnt_workerqueue
)) {
585 mvp
= TAILQ_LAST(&mp
->mnt_vnodelist
, vnodelst
);
587 /* Joining the workerque entities to mount vnode list */
589 mvp
->v_mntvnodes
.tqe_next
= mp
->mnt_workerqueue
.tqh_first
;
591 mp
->mnt_vnodelist
.tqh_first
= mp
->mnt_workerqueue
.tqh_first
;
592 mp
->mnt_workerqueue
.tqh_first
->v_mntvnodes
.tqe_prev
= mp
->mnt_vnodelist
.tqh_last
;
593 mp
->mnt_vnodelist
.tqh_last
= mp
->mnt_workerqueue
.tqh_last
;
594 TAILQ_INIT(&mp
->mnt_workerqueue
);
597 /* add the newvnodes to the head of mount vnode list */
598 if (!TAILQ_EMPTY(&mp
->mnt_newvnodes
)) {
600 nlvp
= TAILQ_LAST(&mp
->mnt_newvnodes
, vnodelst
);
602 mp
->mnt_newvnodes
.tqh_first
->v_mntvnodes
.tqe_prev
= &mp
->mnt_vnodelist
.tqh_first
;
603 nlvp
->v_mntvnodes
.tqe_next
= mp
->mnt_vnodelist
.tqh_first
;
604 if(mp
->mnt_vnodelist
.tqh_first
)
605 mp
->mnt_vnodelist
.tqh_first
->v_mntvnodes
.tqe_prev
= &nlvp
->v_mntvnodes
.tqe_next
;
607 mp
->mnt_vnodelist
.tqh_last
= mp
->mnt_newvnodes
.tqh_last
;
608 mp
->mnt_vnodelist
.tqh_first
= mp
->mnt_newvnodes
.tqh_first
;
609 TAILQ_INIT(&mp
->mnt_newvnodes
);
618 vnode_iterate_clear(mount_t mp
)
620 mp
->mnt_lflag
&= ~MNT_LITER
;
621 if (mp
->mnt_lflag
& MNT_LITERWAIT
) {
622 mp
->mnt_lflag
&= ~MNT_LITERWAIT
;
628 #include <i386/panic_hooks.h>
630 struct vnode_iterate_panic_hook
{
636 static void vnode_iterate_panic_hook(panic_hook_t
*hook_
)
638 extern int kdb_log(const char *fmt
, ...);
639 struct vnode_iterate_panic_hook
*hook
= (struct vnode_iterate_panic_hook
*)hook_
;
640 panic_phys_range_t range
;
643 if (panic_phys_range_before(hook
->mp
, &phys
, &range
)) {
644 kdb_log("mp = %p, phys = %p, prev (%p: %p-%p)\n",
645 hook
->mp
, phys
, range
.type
, range
.phys_start
,
646 range
.phys_start
+ range
.len
);
648 kdb_log("mp = %p, phys = %p, prev (!)\n", hook
->mp
, phys
);
651 if (panic_phys_range_before(hook
->vp
, &phys
, &range
)) {
652 kdb_log("vp = %p, phys = %p, prev (%p: %p-%p)\n",
653 hook
->mp
, phys
, range
.type
, range
.phys_start
,
654 range
.phys_start
+ range
.len
);
656 kdb_log("vp = %p, phys = %p, prev (!)\n", hook
->vp
, phys
);
658 panic_dump_mem((void *)(((vm_offset_t
)hook
->mp
-4096) & ~4095), 12288);
662 vnode_iterate(mount_t mp
, int flags
, int (*callout
)(struct vnode
*, void *),
671 vnode_iterate_setup(mp
);
673 /* it is returns 0 then there is nothing to do */
674 retval
= vnode_iterate_prepare(mp
);
677 vnode_iterate_clear(mp
);
682 struct vnode_iterate_panic_hook hook
;
685 panic_hook(&hook
.hook
, vnode_iterate_panic_hook
);
686 /* iterate over all the vnodes */
687 while (!TAILQ_EMPTY(&mp
->mnt_workerqueue
)) {
688 vp
= TAILQ_FIRST(&mp
->mnt_workerqueue
);
690 TAILQ_REMOVE(&mp
->mnt_workerqueue
, vp
, v_mntvnodes
);
691 TAILQ_INSERT_TAIL(&mp
->mnt_vnodelist
, vp
, v_mntvnodes
);
693 if ((vp
->v_data
== NULL
) || (vp
->v_type
== VNON
) || (vp
->v_mount
!= mp
)) {
698 if ( vget_internal(vp
, vid
, (flags
| VNODE_NODEAD
| VNODE_WITHID
| VNODE_NOSUSPEND
))) {
702 if (flags
& VNODE_RELOAD
) {
704 * we're reloading the filesystem
705 * cast out any inactive vnodes...
707 if (vnode_reload(vp
)) {
708 /* vnode will be recycled on the refcount drop */
715 retval
= callout(vp
, arg
);
719 case VNODE_RETURNED_DONE
:
721 if (retval
== VNODE_RETURNED_DONE
) {
728 case VNODE_CLAIMED_DONE
:
740 panic_unhook(&hook
.hook
);
741 (void)vnode_iterate_reloadq(mp
);
742 vnode_iterate_clear(mp
);
748 mount_lock_renames(mount_t mp
)
750 lck_mtx_lock(&mp
->mnt_renamelock
);
754 mount_unlock_renames(mount_t mp
)
756 lck_mtx_unlock(&mp
->mnt_renamelock
);
760 mount_lock(mount_t mp
)
762 lck_mtx_lock(&mp
->mnt_mlock
);
766 mount_lock_spin(mount_t mp
)
768 lck_mtx_lock_spin(&mp
->mnt_mlock
);
772 mount_unlock(mount_t mp
)
774 lck_mtx_unlock(&mp
->mnt_mlock
);
779 mount_ref(mount_t mp
, int locked
)
792 mount_drop(mount_t mp
, int locked
)
799 if (mp
->mnt_count
== 0 && (mp
->mnt_lflag
& MNT_LDRAIN
))
800 wakeup(&mp
->mnt_lflag
);
808 mount_iterref(mount_t mp
, int locked
)
814 if (mp
->mnt_iterref
< 0) {
825 mount_isdrained(mount_t mp
, int locked
)
831 if (mp
->mnt_iterref
< 0)
841 mount_iterdrop(mount_t mp
)
845 wakeup(&mp
->mnt_iterref
);
850 mount_iterdrain(mount_t mp
)
853 while (mp
->mnt_iterref
)
854 msleep((caddr_t
)&mp
->mnt_iterref
, mnt_list_mtx_lock
, PVFS
, "mount_iterdrain", NULL
);
855 /* mount iterations drained */
856 mp
->mnt_iterref
= -1;
860 mount_iterreset(mount_t mp
)
863 if (mp
->mnt_iterref
== -1)
868 /* always called with mount lock held */
870 mount_refdrain(mount_t mp
)
872 if (mp
->mnt_lflag
& MNT_LDRAIN
)
873 panic("already in drain");
874 mp
->mnt_lflag
|= MNT_LDRAIN
;
876 while (mp
->mnt_count
)
877 msleep((caddr_t
)&mp
->mnt_lflag
, &mp
->mnt_mlock
, PVFS
, "mount_drain", NULL
);
879 if (mp
->mnt_vnodelist
.tqh_first
!= NULL
)
880 panic("mount_refdrain: dangling vnode");
882 mp
->mnt_lflag
&= ~MNT_LDRAIN
;
887 /* Tags the mount point as not supportine extended readdir for NFS exports */
889 mount_set_noreaddirext(mount_t mp
) {
891 mp
->mnt_kern_flag
|= MNTK_DENY_READDIREXT
;
896 * Mark a mount point as busy. Used to synchronize access and to delay
900 vfs_busy(mount_t mp
, int flags
)
904 if (mp
->mnt_lflag
& MNT_LDEAD
)
909 if (mp
->mnt_lflag
& MNT_LUNMOUNT
) {
910 if (flags
& LK_NOWAIT
|| mp
->mnt_lflag
& MNT_LDEAD
) {
916 * Since all busy locks are shared except the exclusive
917 * lock granted when unmounting, the only place that a
918 * wakeup needs to be done is at the release of the
919 * exclusive lock at the end of dounmount.
921 mp
->mnt_lflag
|= MNT_LWAIT
;
922 msleep((caddr_t
)mp
, &mp
->mnt_mlock
, (PVFS
| PDROP
), "vfsbusy", NULL
);
928 lck_rw_lock_shared(&mp
->mnt_rwlock
);
931 * Until we are granted the rwlock, it's possible for the mount point to
932 * change state, so re-evaluate before granting the vfs_busy.
934 if (mp
->mnt_lflag
& (MNT_LDEAD
| MNT_LUNMOUNT
)) {
935 lck_rw_done(&mp
->mnt_rwlock
);
942 * Free a busy filesystem.
945 vfs_unbusy(mount_t mp
)
947 lck_rw_done(&mp
->mnt_rwlock
);
953 vfs_rootmountfailed(mount_t mp
) {
956 mp
->mnt_vtable
->vfc_refcount
--;
961 mount_lock_destroy(mp
);
964 mac_mount_label_destroy(mp
);
967 FREE_ZONE(mp
, sizeof(struct mount
), M_MOUNT
);
971 * Lookup a filesystem type, and if found allocate and initialize
972 * a mount structure for it.
974 * Devname is usually updated by mount(8) after booting.
977 vfs_rootmountalloc_internal(struct vfstable
*vfsp
, const char *devname
)
981 mp
= _MALLOC_ZONE(sizeof(struct mount
), M_MOUNT
, M_WAITOK
);
982 bzero((char *)mp
, sizeof(struct mount
));
984 /* Initialize the default IO constraints */
985 mp
->mnt_maxreadcnt
= mp
->mnt_maxwritecnt
= MAXPHYS
;
986 mp
->mnt_segreadcnt
= mp
->mnt_segwritecnt
= 32;
987 mp
->mnt_maxsegreadsize
= mp
->mnt_maxreadcnt
;
988 mp
->mnt_maxsegwritesize
= mp
->mnt_maxwritecnt
;
989 mp
->mnt_devblocksize
= DEV_BSIZE
;
990 mp
->mnt_alignmentmask
= PAGE_MASK
;
991 mp
->mnt_ioqueue_depth
= MNT_DEFAULT_IOQUEUE_DEPTH
;
994 mp
->mnt_realrootvp
= NULLVP
;
995 mp
->mnt_authcache_ttl
= CACHED_LOOKUP_RIGHT_TTL
;
996 mp
->mnt_throttle_mask
= LOWPRI_MAX_NUM_DEV
- 1;
997 mp
->mnt_devbsdunit
= 0;
1000 (void)vfs_busy(mp
, LK_NOWAIT
);
1002 TAILQ_INIT(&mp
->mnt_vnodelist
);
1003 TAILQ_INIT(&mp
->mnt_workerqueue
);
1004 TAILQ_INIT(&mp
->mnt_newvnodes
);
1006 mp
->mnt_vtable
= vfsp
;
1007 mp
->mnt_op
= vfsp
->vfc_vfsops
;
1008 mp
->mnt_flag
= MNT_RDONLY
| MNT_ROOTFS
;
1009 mp
->mnt_vnodecovered
= NULLVP
;
1010 //mp->mnt_stat.f_type = vfsp->vfc_typenum;
1011 mp
->mnt_flag
|= vfsp
->vfc_flags
& MNT_VISFLAGMASK
;
1014 vfsp
->vfc_refcount
++;
1015 mount_list_unlock();
1017 strlcpy(mp
->mnt_vfsstat
.f_fstypename
, vfsp
->vfc_name
, MFSTYPENAMELEN
);
1018 mp
->mnt_vfsstat
.f_mntonname
[0] = '/';
1019 /* XXX const poisoning layering violation */
1020 (void) copystr((const void *)devname
, mp
->mnt_vfsstat
.f_mntfromname
, MAXPATHLEN
- 1, NULL
);
1023 mac_mount_label_init(mp
);
1024 mac_mount_label_associate(vfs_context_kernel(), mp
);
1030 vfs_rootmountalloc(const char *fstypename
, const char *devname
, mount_t
*mpp
)
1032 struct vfstable
*vfsp
;
1034 for (vfsp
= vfsconf
; vfsp
; vfsp
= vfsp
->vfc_next
)
1035 if (!strncmp(vfsp
->vfc_name
, fstypename
,
1036 sizeof(vfsp
->vfc_name
)))
1041 *mpp
= vfs_rootmountalloc_internal(vfsp
, devname
);
1051 * Find an appropriate filesystem to use for the root. If a filesystem
1052 * has not been preselected, walk through the list of known filesystems
1053 * trying those that have mountroot routines, and try them until one
1054 * works or we have tried them all.
1056 extern int (*mountroot
)(void);
1064 struct vfstable
*vfsp
;
1065 vfs_context_t ctx
= vfs_context_kernel();
1066 struct vfs_attr vfsattr
;
1069 vnode_t bdevvp_rootvp
;
1071 if (mountroot
!= NULL
) {
1073 * used for netboot which follows a different set of rules
1075 error
= (*mountroot
)();
1078 if ((error
= bdevvp(rootdev
, &rootvp
))) {
1079 printf("vfs_mountroot: can't setup bdevvp\n");
1083 * 4951998 - code we call in vfc_mountroot may replace rootvp
1084 * so keep a local copy for some house keeping.
1086 bdevvp_rootvp
= rootvp
;
1088 for (vfsp
= vfsconf
; vfsp
; vfsp
= vfsp
->vfc_next
) {
1089 if (vfsp
->vfc_mountroot
== NULL
)
1092 mp
= vfs_rootmountalloc_internal(vfsp
, "root_device");
1093 mp
->mnt_devvp
= rootvp
;
1095 if ((error
= (*vfsp
->vfc_mountroot
)(mp
, rootvp
, ctx
)) == 0) {
1096 if ( bdevvp_rootvp
!= rootvp
) {
1099 * bump the iocount and fix up mnt_devvp for the
1100 * new rootvp (it will already have a usecount taken)...
1101 * drop the iocount and the usecount on the orignal
1102 * since we are no longer going to use it...
1104 vnode_getwithref(rootvp
);
1105 mp
->mnt_devvp
= rootvp
;
1107 vnode_rele(bdevvp_rootvp
);
1108 vnode_put(bdevvp_rootvp
);
1110 mp
->mnt_devvp
->v_specflags
|= SI_MOUNTEDON
;
1117 * cache the IO attributes for the underlying physical media...
1118 * an error return indicates the underlying driver doesn't
1119 * support all the queries necessary... however, reasonable
1120 * defaults will have been set, so no reason to bail or care
1122 vfs_init_io_attributes(rootvp
, mp
);
1125 * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS.
1127 if (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSNATIVEXATTR
) {
1128 mp
->mnt_kern_flag
|= MNTK_EXTENDED_ATTRS
;
1130 if (mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSPREFLIGHT
) {
1131 mp
->mnt_kern_flag
|= MNTK_UNMOUNT_PREFLIGHT
;
1136 if (MNTK_VIRTUALDEV
& mp
->mnt_kern_flag
) speed
= 128;
1137 else if (MNTK_SSD
& mp
->mnt_kern_flag
) speed
= 7*256;
1139 vc_progress_setdiskspeed(speed
);
1141 * Probe root file system for additional features.
1143 (void)VFS_START(mp
, 0, ctx
);
1145 VFSATTR_INIT(&vfsattr
);
1146 VFSATTR_WANTED(&vfsattr
, f_capabilities
);
1147 if (vfs_getattr(mp
, &vfsattr
, ctx
) == 0 &&
1148 VFSATTR_IS_SUPPORTED(&vfsattr
, f_capabilities
)) {
1149 if ((vfsattr
.f_capabilities
.capabilities
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_EXTENDED_ATTR
) &&
1150 (vfsattr
.f_capabilities
.valid
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_EXTENDED_ATTR
)) {
1151 mp
->mnt_kern_flag
|= MNTK_EXTENDED_ATTRS
;
1154 if ((vfsattr
.f_capabilities
.capabilities
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_NAMEDSTREAMS
) &&
1155 (vfsattr
.f_capabilities
.valid
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_NAMEDSTREAMS
)) {
1156 mp
->mnt_kern_flag
|= MNTK_NAMED_STREAMS
;
1159 if ((vfsattr
.f_capabilities
.capabilities
[VOL_CAPABILITIES_FORMAT
] & VOL_CAP_FMT_PATH_FROM_ID
) &&
1160 (vfsattr
.f_capabilities
.valid
[VOL_CAPABILITIES_FORMAT
] & VOL_CAP_FMT_PATH_FROM_ID
)) {
1161 mp
->mnt_kern_flag
|= MNTK_PATH_FROM_ID
;
1166 * get rid of iocount reference returned
1167 * by bdevvp (or picked up by us on the substitued
1168 * rootvp)... it (or we) will have also taken
1169 * a usecount reference which we want to keep
1174 if ((vfs_flags(mp
) & MNT_MULTILABEL
) == 0)
1177 error
= VFS_ROOT(mp
, &vp
, ctx
);
1179 printf("%s() VFS_ROOT() returned %d\n",
1181 dounmount(mp
, MNT_FORCE
, 0, ctx
);
1184 error
= vnode_label(mp
, NULL
, vp
, NULL
, 0, ctx
);
1186 * get rid of reference provided by VFS_ROOT
1191 printf("%s() vnode_label() returned %d\n",
1193 dounmount(mp
, MNT_FORCE
, 0, ctx
);
1202 vfs_rootmountfailed(mp
);
1204 if (error
!= EINVAL
)
1205 printf("%s_mountroot failed: %d\n", vfsp
->vfc_name
, error
);
1211 * Lookup a mount point by filesystem identifier.
1215 vfs_getvfs(fsid_t
*fsid
)
1217 return (mount_list_lookupby_fsid(fsid
, 0, 0));
1220 static struct mount
*
1221 vfs_getvfs_locked(fsid_t
*fsid
)
1223 return(mount_list_lookupby_fsid(fsid
, 1, 0));
1227 vfs_getvfs_by_mntonname(char *path
)
1229 mount_t retmp
= (mount_t
)0;
1233 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
) {
1234 if (!strncmp(mp
->mnt_vfsstat
.f_mntonname
, path
,
1235 sizeof(mp
->mnt_vfsstat
.f_mntonname
))) {
1237 if (mount_iterref(retmp
, 1))
1243 mount_list_unlock();
1247 /* generation number for creation of new fsids */
1248 u_short mntid_gen
= 0;
1250 * Get a new unique fsid
1253 vfs_getnewfsid(struct mount
*mp
)
1262 /* generate a new fsid */
1263 mtype
= mp
->mnt_vtable
->vfc_typenum
;
1264 if (++mntid_gen
== 0)
1266 tfsid
.val
[0] = makedev(nblkdev
+ mtype
, mntid_gen
);
1267 tfsid
.val
[1] = mtype
;
1269 TAILQ_FOREACH(nmp
, &mountlist
, mnt_list
) {
1270 while (vfs_getvfs_locked(&tfsid
)) {
1271 if (++mntid_gen
== 0)
1273 tfsid
.val
[0] = makedev(nblkdev
+ mtype
, mntid_gen
);
1276 mp
->mnt_vfsstat
.f_fsid
.val
[0] = tfsid
.val
[0];
1277 mp
->mnt_vfsstat
.f_fsid
.val
[1] = tfsid
.val
[1];
1278 mount_list_unlock();
1282 * Routines having to do with the management of the vnode table.
1284 extern int (**dead_vnodeop_p
)(void *);
1285 long numvnodes
, freevnodes
, deadvnodes
, async_work_vnodes
;
1288 int async_work_timed_out
= 0;
1289 int async_work_handled
= 0;
1290 int dead_vnode_wanted
= 0;
1291 int dead_vnode_waited
= 0;
1294 * Move a vnode from one mount queue to another.
1297 insmntque(vnode_t vp
, mount_t mp
)
1301 * Delete from old mount point vnode list, if on one.
1303 if ( (lmp
= vp
->v_mount
) != NULL
&& lmp
!= dead_mountp
) {
1304 if ((vp
->v_lflag
& VNAMED_MOUNT
) == 0)
1305 panic("insmntque: vp not in mount vnode list");
1306 vp
->v_lflag
&= ~VNAMED_MOUNT
;
1308 mount_lock_spin(lmp
);
1312 if (vp
->v_mntvnodes
.tqe_next
== NULL
) {
1313 if (TAILQ_LAST(&lmp
->mnt_vnodelist
, vnodelst
) == vp
)
1314 TAILQ_REMOVE(&lmp
->mnt_vnodelist
, vp
, v_mntvnodes
);
1315 else if (TAILQ_LAST(&lmp
->mnt_newvnodes
, vnodelst
) == vp
)
1316 TAILQ_REMOVE(&lmp
->mnt_newvnodes
, vp
, v_mntvnodes
);
1317 else if (TAILQ_LAST(&lmp
->mnt_workerqueue
, vnodelst
) == vp
)
1318 TAILQ_REMOVE(&lmp
->mnt_workerqueue
, vp
, v_mntvnodes
);
1320 vp
->v_mntvnodes
.tqe_next
->v_mntvnodes
.tqe_prev
= vp
->v_mntvnodes
.tqe_prev
;
1321 *vp
->v_mntvnodes
.tqe_prev
= vp
->v_mntvnodes
.tqe_next
;
1323 vp
->v_mntvnodes
.tqe_next
= NULL
;
1324 vp
->v_mntvnodes
.tqe_prev
= NULL
;
1330 * Insert into list of vnodes for the new mount point, if available.
1332 if ((vp
->v_mount
= mp
) != NULL
) {
1333 mount_lock_spin(mp
);
1334 if ((vp
->v_mntvnodes
.tqe_next
!= 0) && (vp
->v_mntvnodes
.tqe_prev
!= 0))
1335 panic("vp already in mount list");
1336 if (mp
->mnt_lflag
& MNT_LITER
)
1337 TAILQ_INSERT_HEAD(&mp
->mnt_newvnodes
, vp
, v_mntvnodes
);
1339 TAILQ_INSERT_HEAD(&mp
->mnt_vnodelist
, vp
, v_mntvnodes
);
1340 if (vp
->v_lflag
& VNAMED_MOUNT
)
1341 panic("insmntque: vp already in mount vnode list");
1342 vp
->v_lflag
|= VNAMED_MOUNT
;
1350 * Create a vnode for a block device.
1351 * Used for root filesystem, argdev, and swap areas.
1352 * Also used for memory file system special devices.
1355 bdevvp(dev_t dev
, vnode_t
*vpp
)
1359 struct vnode_fsparam vfsp
;
1360 struct vfs_context context
;
1367 context
.vc_thread
= current_thread();
1368 context
.vc_ucred
= FSCRED
;
1370 vfsp
.vnfs_mp
= (struct mount
*)0;
1371 vfsp
.vnfs_vtype
= VBLK
;
1372 vfsp
.vnfs_str
= "bdevvp";
1373 vfsp
.vnfs_dvp
= NULL
;
1374 vfsp
.vnfs_fsnode
= NULL
;
1375 vfsp
.vnfs_cnp
= NULL
;
1376 vfsp
.vnfs_vops
= spec_vnodeop_p
;
1377 vfsp
.vnfs_rdev
= dev
;
1378 vfsp
.vnfs_filesize
= 0;
1380 vfsp
.vnfs_flags
= VNFS_NOCACHE
| VNFS_CANTCACHE
;
1382 vfsp
.vnfs_marksystem
= 0;
1383 vfsp
.vnfs_markroot
= 0;
1385 if ( (error
= vnode_create(VNCREATE_FLAVOR
, VCREATESIZE
, &vfsp
, &nvp
)) ) {
1389 vnode_lock_spin(nvp
);
1390 nvp
->v_flag
|= VBDEVVP
;
1391 nvp
->v_tag
= VT_NON
; /* set this to VT_NON so during aliasing it can be replaced */
1393 if ( (error
= vnode_ref(nvp
)) ) {
1394 panic("bdevvp failed: vnode_ref");
1397 if ( (error
= VNOP_FSYNC(nvp
, MNT_WAIT
, &context
)) ) {
1398 panic("bdevvp failed: fsync");
1401 if ( (error
= buf_invalidateblks(nvp
, BUF_WRITE_DATA
, 0, 0)) ) {
1402 panic("bdevvp failed: invalidateblks");
1408 * XXXMAC: We can't put a MAC check here, the system will
1409 * panic without this vnode.
1413 if ( (error
= VNOP_OPEN(nvp
, FREAD
, &context
)) ) {
1414 panic("bdevvp failed: open");
1424 * Check to see if the new vnode represents a special device
1425 * for which we already have a vnode (either because of
1426 * bdevvp() or because of a different vnode representing
1427 * the same block device). If such an alias exists, deallocate
1428 * the existing contents and return the aliased vnode. The
1429 * caller is responsible for filling it with its new contents.
1432 checkalias(struct vnode
*nvp
, dev_t nvp_rdev
)
1436 struct specinfo
*sin
= NULL
;
1439 vpp
= &speclisth
[SPECHASH(nvp_rdev
)];
1443 for (vp
= *vpp
; vp
; vp
= vp
->v_specnext
) {
1444 if (nvp_rdev
== vp
->v_rdev
&& nvp
->v_type
== vp
->v_type
) {
1453 if (vnode_getwithvid(vp
,vid
)) {
1457 * Termination state is checked in vnode_getwithvid
1462 * Alias, but not in use, so flush it out.
1464 if ((vp
->v_iocount
== 1) && (vp
->v_usecount
== 0)) {
1465 vnode_reclaim_internal(vp
, 1, 1, 0);
1466 vnode_put_locked(vp
);
1472 if (vp
== NULL
|| vp
->v_tag
!= VT_NON
) {
1474 MALLOC_ZONE(sin
, struct specinfo
*, sizeof(struct specinfo
),
1475 M_SPECINFO
, M_WAITOK
);
1478 nvp
->v_specinfo
= sin
;
1479 bzero(nvp
->v_specinfo
, sizeof(struct specinfo
));
1480 nvp
->v_rdev
= nvp_rdev
;
1481 nvp
->v_specflags
= 0;
1482 nvp
->v_speclastr
= -1;
1483 nvp
->v_specinfo
->si_opencount
= 0;
1484 nvp
->v_specinfo
->si_initted
= 0;
1485 nvp
->v_specinfo
->si_throttleable
= 0;
1489 /* We dropped the lock, someone could have added */
1491 for (vp
= *vpp
; vp
; vp
= vp
->v_specnext
) {
1492 if (nvp_rdev
== vp
->v_rdev
&& nvp
->v_type
== vp
->v_type
) {
1500 nvp
->v_hashchain
= vpp
;
1501 nvp
->v_specnext
= *vpp
;
1505 nvp
->v_specflags
|= SI_ALIASED
;
1506 vp
->v_specflags
|= SI_ALIASED
;
1508 vnode_put_locked(vp
);
1518 FREE_ZONE(sin
, sizeof(struct specinfo
), M_SPECINFO
);
1521 if ((vp
->v_flag
& (VBDEVVP
| VDEVFLUSH
)) != 0)
1524 panic("checkalias with VT_NON vp that shouldn't: %p", vp
);
1531 * Get a reference on a particular vnode and lock it if requested.
1532 * If the vnode was on the inactive list, remove it from the list.
1533 * If the vnode was on the free list, remove it from the list and
1534 * move it to inactive list as needed.
1535 * The vnode lock bit is set if the vnode is being eliminated in
1536 * vgone. The process is awakened when the transition is completed,
1537 * and an error returned to indicate that the vnode is no longer
1538 * usable (possibly having been changed to a new file system type).
1541 vget_internal(vnode_t vp
, int vid
, int vflags
)
1545 vnode_lock_spin(vp
);
1547 if ((vflags
& VNODE_WRITEABLE
) && (vp
->v_writecount
== 0))
1549 * vnode to be returned only if it has writers opened
1553 error
= vnode_getiocount(vp
, vid
, vflags
);
1561 * Returns: 0 Success
1562 * ENOENT No such file or directory [terminating]
1565 vnode_ref(vnode_t vp
)
1568 return (vnode_ref_ext(vp
, 0, 0));
1572 * Returns: 0 Success
1573 * ENOENT No such file or directory [terminating]
1576 vnode_ref_ext(vnode_t vp
, int fmode
, int flags
)
1580 vnode_lock_spin(vp
);
1583 * once all the current call sites have been fixed to insure they have
1584 * taken an iocount, we can toughen this assert up and insist that the
1585 * iocount is non-zero... a non-zero usecount doesn't insure correctness
1587 if (vp
->v_iocount
<= 0 && vp
->v_usecount
<= 0)
1588 panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp
, vp
->v_iocount
, vp
->v_usecount
);
1591 * if you are the owner of drain/termination, can acquire usecount
1593 if ((flags
& VNODE_REF_FORCE
) == 0) {
1594 if ((vp
->v_lflag
& (VL_DRAIN
| VL_TERMINATE
| VL_DEAD
))) {
1595 if (vp
->v_owner
!= current_thread()) {
1603 if (fmode
& FWRITE
) {
1604 if (++vp
->v_writecount
<= 0)
1605 panic("vnode_ref_ext: v_writecount");
1607 if (fmode
& O_EVTONLY
) {
1608 if (++vp
->v_kusecount
<= 0)
1609 panic("vnode_ref_ext: v_kusecount");
1611 if (vp
->v_flag
& VRAGE
) {
1614 ut
= get_bsdthread_info(current_thread());
1616 if ( !(current_proc()->p_lflag
& P_LRAGE_VNODES
) &&
1617 !(ut
->uu_flag
& UT_RAGE_VNODES
)) {
1619 * a 'normal' process accessed this vnode
1620 * so make sure its no longer marked
1621 * for rapid aging... also, make sure
1622 * it gets removed from the rage list...
1623 * when v_usecount drops back to 0, it
1624 * will be put back on the real free list
1626 vp
->v_flag
&= ~VRAGE
;
1627 vp
->v_references
= 0;
1628 vnode_list_remove(vp
);
1631 if (vp
->v_usecount
== 1 && vp
->v_type
== VREG
&& !(vp
->v_flag
& VSYSTEM
)) {
1633 if (vp
->v_ubcinfo
) {
1634 vnode_lock_convert(vp
);
1635 memory_object_mark_used(vp
->v_ubcinfo
->ui_control
);
1646 vnode_on_reliable_media(vnode_t vp
)
1648 if ( !(vp
->v_mount
->mnt_kern_flag
& MNTK_VIRTUALDEV
) && (vp
->v_mount
->mnt_flag
& MNT_LOCAL
) )
1654 vnode_async_list_add(vnode_t vp
)
1658 if (VONLIST(vp
) || (vp
->v_lflag
& (VL_TERMINATE
|VL_DEAD
)))
1659 panic("vnode_async_list_add: %p is in wrong state", vp
);
1661 TAILQ_INSERT_HEAD(&vnode_async_work_list
, vp
, v_freelist
);
1662 vp
->v_listflag
|= VLIST_ASYNC_WORK
;
1664 async_work_vnodes
++;
1666 vnode_list_unlock();
1668 wakeup(&vnode_async_work_list
);
1674 * put the vnode on appropriate free list.
1675 * called with vnode LOCKED
1678 vnode_list_add(vnode_t vp
)
1680 boolean_t need_dead_wakeup
= FALSE
;
1683 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
1686 * if it is already on a list or non zero references return
1688 if (VONLIST(vp
) || (vp
->v_usecount
!= 0) || (vp
->v_iocount
!= 0) || (vp
->v_lflag
& VL_TERMINATE
))
1693 if ((vp
->v_flag
& VRAGE
) && !(vp
->v_lflag
& VL_DEAD
)) {
1695 * add the new guy to the appropriate end of the RAGE list
1697 if ((vp
->v_flag
& VAGE
))
1698 TAILQ_INSERT_HEAD(&vnode_rage_list
, vp
, v_freelist
);
1700 TAILQ_INSERT_TAIL(&vnode_rage_list
, vp
, v_freelist
);
1702 vp
->v_listflag
|= VLIST_RAGE
;
1706 * reset the timestamp for the last inserted vp on the RAGE
1707 * queue to let new_vnode know that its not ok to start stealing
1708 * from this list... as long as we're actively adding to this list
1709 * we'll push out the vnodes we want to donate to the real free list
1710 * once we stop pushing, we'll let some time elapse before we start
1711 * stealing them in the new_vnode routine
1713 microuptime(&rage_tv
);
1716 * if VL_DEAD, insert it at head of the dead list
1717 * else insert at tail of LRU list or at head if VAGE is set
1719 if ( (vp
->v_lflag
& VL_DEAD
)) {
1720 TAILQ_INSERT_HEAD(&vnode_dead_list
, vp
, v_freelist
);
1721 vp
->v_listflag
|= VLIST_DEAD
;
1724 if (dead_vnode_wanted
) {
1725 dead_vnode_wanted
--;
1726 need_dead_wakeup
= TRUE
;
1729 } else if ( (vp
->v_flag
& VAGE
) ) {
1730 TAILQ_INSERT_HEAD(&vnode_free_list
, vp
, v_freelist
);
1731 vp
->v_flag
&= ~VAGE
;
1734 TAILQ_INSERT_TAIL(&vnode_free_list
, vp
, v_freelist
);
1738 vnode_list_unlock();
1740 if (need_dead_wakeup
== TRUE
)
1741 wakeup_one((caddr_t
)&dead_vnode_wanted
);
1746 * remove the vnode from appropriate free list.
1747 * called with vnode LOCKED and
1748 * the list lock held
1751 vnode_list_remove_locked(vnode_t vp
)
1755 * the v_listflag field is
1756 * protected by the vnode_list_lock
1758 if (vp
->v_listflag
& VLIST_RAGE
)
1759 VREMRAGE("vnode_list_remove", vp
);
1760 else if (vp
->v_listflag
& VLIST_DEAD
)
1761 VREMDEAD("vnode_list_remove", vp
);
1762 else if (vp
->v_listflag
& VLIST_ASYNC_WORK
)
1763 VREMASYNC_WORK("vnode_list_remove", vp
);
1765 VREMFREE("vnode_list_remove", vp
);
1771 * remove the vnode from appropriate free list.
1772 * called with vnode LOCKED
1775 vnode_list_remove(vnode_t vp
)
1778 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
1781 * we want to avoid taking the list lock
1782 * in the case where we're not on the free
1783 * list... this will be true for most
1784 * directories and any currently in use files
1786 * we're guaranteed that we can't go from
1787 * the not-on-list state to the on-list
1788 * state since we hold the vnode lock...
1789 * all calls to vnode_list_add are done
1790 * under the vnode lock... so we can
1791 * check for that condition (the prevelant one)
1792 * without taking the list lock
1797 * however, we're not guaranteed that
1798 * we won't go from the on-list state
1799 * to the not-on-list state until we
1800 * hold the vnode_list_lock... this
1801 * is due to "new_vnode" removing vnodes
1802 * from the free list uder the list_lock
1803 * w/o the vnode lock... so we need to
1804 * check again whether we're currently
1807 vnode_list_remove_locked(vp
);
1809 vnode_list_unlock();
1815 vnode_rele(vnode_t vp
)
1817 vnode_rele_internal(vp
, 0, 0, 0);
1822 vnode_rele_ext(vnode_t vp
, int fmode
, int dont_reenter
)
1824 vnode_rele_internal(vp
, fmode
, dont_reenter
, 0);
1829 vnode_rele_internal(vnode_t vp
, int fmode
, int dont_reenter
, int locked
)
1833 vnode_lock_spin(vp
);
1836 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
1838 if (--vp
->v_usecount
< 0)
1839 panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp
, vp
->v_usecount
, vp
->v_tag
, vp
->v_type
, vp
->v_flag
);
1841 if (fmode
& FWRITE
) {
1842 if (--vp
->v_writecount
< 0)
1843 panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp
, vp
->v_writecount
, vp
->v_tag
, vp
->v_type
, vp
->v_flag
);
1845 if (fmode
& O_EVTONLY
) {
1846 if (--vp
->v_kusecount
< 0)
1847 panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp
, vp
->v_kusecount
, vp
->v_tag
, vp
->v_type
, vp
->v_flag
);
1849 if (vp
->v_kusecount
> vp
->v_usecount
)
1850 panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.",vp
, vp
->v_kusecount
, vp
->v_usecount
, vp
->v_tag
, vp
->v_type
, vp
->v_flag
);
1852 if ((vp
->v_iocount
> 0) || (vp
->v_usecount
> 0)) {
1854 * vnode is still busy... if we're the last
1855 * usecount, mark for a future call to VNOP_INACTIVE
1856 * when the iocount finally drops to 0
1858 if (vp
->v_usecount
== 0) {
1859 vp
->v_lflag
|= VL_NEEDINACTIVE
;
1860 vp
->v_flag
&= ~(VNOCACHE_DATA
| VRAOFF
| VOPENEVT
);
1864 vp
->v_flag
&= ~(VNOCACHE_DATA
| VRAOFF
| VOPENEVT
);
1866 if (ISSET(vp
->v_lflag
, VL_TERMINATE
| VL_DEAD
) || dont_reenter
) {
1868 * vnode is being cleaned, or
1869 * we've requested that we don't reenter
1870 * the filesystem on this release...in
1871 * the latter case, we'll mark the vnode aged
1874 if ( !(vp
->v_lflag
& (VL_TERMINATE
| VL_DEAD
| VL_MARKTERM
)) ) {
1875 vp
->v_lflag
|= VL_NEEDINACTIVE
;
1877 if (vnode_on_reliable_media(vp
) == FALSE
|| vp
->v_flag
& VISDIRTY
) {
1878 vnode_async_list_add(vp
);
1889 * at this point both the iocount and usecount
1891 * pick up an iocount so that we can call
1892 * VNOP_INACTIVE with the vnode lock unheld
1898 vp
->v_lflag
&= ~VL_NEEDINACTIVE
;
1901 VNOP_INACTIVE(vp
, vfs_context_current());
1903 vnode_lock_spin(vp
);
1905 * because we dropped the vnode lock to call VNOP_INACTIVE
1906 * the state of the vnode may have changed... we may have
1907 * picked up an iocount, usecount or the MARKTERM may have
1908 * been set... we need to reevaluate the reference counts
1909 * to determine if we can call vnode_reclaim_internal at
1910 * this point... if the reference counts are up, we'll pick
1911 * up the MARKTERM state when they get subsequently dropped
1913 if ( (vp
->v_iocount
== 1) && (vp
->v_usecount
== 0) &&
1914 ((vp
->v_lflag
& (VL_MARKTERM
| VL_TERMINATE
| VL_DEAD
)) == VL_MARKTERM
)) {
1917 ut
= get_bsdthread_info(current_thread());
1919 if (ut
->uu_defer_reclaims
) {
1920 vp
->v_defer_reclaimlist
= ut
->uu_vreclaims
;
1921 ut
->uu_vreclaims
= vp
;
1924 vnode_lock_convert(vp
);
1925 vnode_reclaim_internal(vp
, 1, 1, 0);
1927 vnode_dropiocount(vp
);
1930 if (vp
->v_usecount
== 0 && vp
->v_type
== VREG
&& !(vp
->v_flag
& VSYSTEM
)) {
1932 if (vp
->v_ubcinfo
) {
1933 vnode_lock_convert(vp
);
1934 memory_object_mark_unused(vp
->v_ubcinfo
->ui_control
, (vp
->v_flag
& VRAGE
) == VRAGE
);
1943 * Remove any vnodes in the vnode table belonging to mount point mp.
1945 * If MNT_NOFORCE is specified, there should not be any active ones,
1946 * return error if any are found (nb: this is a user error, not a
1947 * system error). If MNT_FORCE is specified, detach any active vnodes
1951 int busyprt
= 0; /* print out busy vnodes */
1955 vflush(struct mount
*mp
, struct vnode
*skipvp
, int flags
)
1964 vnode_iterate_setup(mp
);
1966 * On regular unmounts(not forced) do a
1967 * quick check for vnodes to be in use. This
1968 * preserves the caching of vnodes. automounter
1969 * tries unmounting every so often to see whether
1970 * it is still busy or not.
1972 if (((flags
& FORCECLOSE
)==0) && ((mp
->mnt_kern_flag
& MNTK_UNMOUNT_PREFLIGHT
) != 0)) {
1973 if (vnode_umount_preflight(mp
, skipvp
, flags
)) {
1974 vnode_iterate_clear(mp
);
1980 /* it is returns 0 then there is nothing to do */
1981 retval
= vnode_iterate_prepare(mp
);
1984 vnode_iterate_clear(mp
);
1989 /* iterate over all the vnodes */
1990 while (!TAILQ_EMPTY(&mp
->mnt_workerqueue
)) {
1992 vp
= TAILQ_FIRST(&mp
->mnt_workerqueue
);
1993 TAILQ_REMOVE(&mp
->mnt_workerqueue
, vp
, v_mntvnodes
);
1994 TAILQ_INSERT_TAIL(&mp
->mnt_vnodelist
, vp
, v_mntvnodes
);
1996 if ( (vp
->v_mount
!= mp
) || (vp
== skipvp
)) {
2002 vnode_lock_spin(vp
);
2004 if ((vp
->v_id
!= vid
) || ((vp
->v_lflag
& (VL_DEAD
| VL_TERMINATE
)))) {
2011 * If requested, skip over vnodes marked VSYSTEM.
2012 * Skip over all vnodes marked VNOFLUSH.
2014 if ((flags
& SKIPSYSTEM
) && ((vp
->v_flag
& VSYSTEM
) ||
2015 (vp
->v_flag
& VNOFLUSH
))) {
2021 * If requested, skip over vnodes marked VSWAP.
2023 if ((flags
& SKIPSWAP
) && (vp
->v_flag
& VSWAP
)) {
2029 * If requested, skip over vnodes marked VROOT.
2031 if ((flags
& SKIPROOT
) && (vp
->v_flag
& VROOT
)) {
2037 * If WRITECLOSE is set, only flush out regular file
2038 * vnodes open for writing.
2040 if ((flags
& WRITECLOSE
) &&
2041 (vp
->v_writecount
== 0 || vp
->v_type
!= VREG
)) {
2047 * If the real usecount is 0, all we need to do is clear
2048 * out the vnode data structures and we are done.
2050 if (((vp
->v_usecount
== 0) ||
2051 ((vp
->v_usecount
- vp
->v_kusecount
) == 0))) {
2053 vnode_lock_convert(vp
);
2054 vp
->v_iocount
++; /* so that drain waits for * other iocounts */
2058 vnode_reclaim_internal(vp
, 1, 1, 0);
2059 vnode_dropiocount(vp
);
2068 * If FORCECLOSE is set, forcibly close the vnode.
2069 * For block or character devices, revert to an
2070 * anonymous device. For all other files, just kill them.
2072 if (flags
& FORCECLOSE
) {
2073 vnode_lock_convert(vp
);
2075 if (vp
->v_type
!= VBLK
&& vp
->v_type
!= VCHR
) {
2076 vp
->v_iocount
++; /* so that drain waits * for other iocounts */
2080 vnode_abort_advlocks(vp
);
2081 vnode_reclaim_internal(vp
, 1, 1, 0);
2082 vnode_dropiocount(vp
);
2087 vp
->v_lflag
&= ~VL_DEAD
;
2088 vp
->v_op
= spec_vnodeop_p
;
2089 vp
->v_flag
|= VDEVFLUSH
;
2097 vprint("vflush: busy vnode", vp
);
2104 /* At this point the worker queue is completed */
2105 if (busy
&& ((flags
& FORCECLOSE
)==0) && reclaimed
) {
2108 (void)vnode_iterate_reloadq(mp
);
2109 /* returned with mount lock held */
2113 /* if new vnodes were created in between retry the reclaim */
2114 if ( vnode_iterate_reloadq(mp
) != 0) {
2115 if (!(busy
&& ((flags
& FORCECLOSE
)==0)))
2118 vnode_iterate_clear(mp
);
2121 if (busy
&& ((flags
& FORCECLOSE
)==0))
2126 long num_recycledvnodes
= 0;
2128 * Disassociate the underlying file system from a vnode.
2129 * The vnode lock is held on entry.
2132 vclean(vnode_t vp
, int flags
)
2134 vfs_context_t ctx
= vfs_context_current();
2137 int already_terminating
;
2144 * Check to see if the vnode is in use.
2145 * If so we have to reference it before we clean it out
2146 * so that its count cannot fall to zero and generate a
2147 * race against ourselves to recycle it.
2149 active
= vp
->v_usecount
;
2152 * just in case we missed sending a needed
2153 * VNOP_INACTIVE, we'll do it now
2155 need_inactive
= (vp
->v_lflag
& VL_NEEDINACTIVE
);
2157 vp
->v_lflag
&= ~VL_NEEDINACTIVE
;
2160 * Prevent the vnode from being recycled or
2161 * brought into use while we clean it out.
2163 already_terminating
= (vp
->v_lflag
& VL_TERMINATE
);
2165 vp
->v_lflag
|= VL_TERMINATE
;
2168 * remove the vnode from any mount list
2171 insmntque(vp
, (struct mount
*)0);
2174 is_namedstream
= vnode_isnamedstream(vp
);
2179 OSAddAtomicLong(1, &num_recycledvnodes
);
2181 if (flags
& DOCLOSE
)
2182 clflags
|= IO_NDELAY
;
2183 if (flags
& REVOKEALL
)
2184 clflags
|= IO_REVOKE
;
2186 if (active
&& (flags
& DOCLOSE
))
2187 VNOP_CLOSE(vp
, clflags
, ctx
);
2190 * Clean out any buffers associated with the vnode.
2192 if (flags
& DOCLOSE
) {
2194 if (vp
->v_tag
== VT_NFS
)
2195 nfs_vinvalbuf(vp
, V_SAVE
, ctx
, 0);
2199 VNOP_FSYNC(vp
, MNT_WAIT
, ctx
);
2200 buf_invalidateblks(vp
, BUF_WRITE_DATA
| BUF_INVALIDATE_LOCKED
, 0, 0);
2202 if (UBCINFOEXISTS(vp
))
2204 * Clean the pages in VM.
2206 (void)ubc_msync(vp
, (off_t
)0, ubc_getsize(vp
), NULL
, UBC_PUSHALL
| UBC_INVALIDATE
| UBC_SYNC
);
2208 if (active
|| need_inactive
)
2209 VNOP_INACTIVE(vp
, ctx
);
2212 if ((is_namedstream
!= 0) && (vp
->v_parent
!= NULLVP
)) {
2213 vnode_t pvp
= vp
->v_parent
;
2215 /* Delete the shadow stream file before we reclaim its vnode */
2216 if (vnode_isshadow(vp
)) {
2217 vnode_relenamedstream(pvp
, vp
);
2221 * No more streams associated with the parent. We
2222 * have a ref on it, so its identity is stable.
2223 * If the parent is on an opaque volume, then we need to know
2224 * whether it has associated named streams.
2226 if (vfs_authopaque(pvp
->v_mount
)) {
2227 vnode_lock_spin(pvp
);
2228 pvp
->v_lflag
&= ~VL_HASSTREAMS
;
2235 * Destroy ubc named reference
2236 * cluster_release is done on this path
2237 * along with dropping the reference on the ucred
2238 * (and in the case of forced unmount of an mmap-ed file,
2239 * the ubc reference on the vnode is dropped here too).
2241 ubc_destroy_named(vp
);
2245 * cleanup trigger info from vnode (if any)
2248 vnode_resolver_detach(vp
);
2252 * Reclaim the vnode.
2254 if (VNOP_RECLAIM(vp
, ctx
))
2255 panic("vclean: cannot reclaim");
2257 // make sure the name & parent ptrs get cleaned out!
2258 vnode_update_identity(vp
, NULLVP
, NULL
, 0, 0, VNODE_UPDATE_PARENT
| VNODE_UPDATE_NAME
| VNODE_UPDATE_PURGE
);
2262 vp
->v_mount
= dead_mountp
;
2263 vp
->v_op
= dead_vnodeop_p
;
2267 vp
->v_lflag
|= VL_DEAD
;
2268 vp
->v_flag
&= ~VISDIRTY
;
2270 if (already_terminating
== 0) {
2271 vp
->v_lflag
&= ~VL_TERMINATE
;
2273 * Done with purge, notify sleepers of the grim news.
2275 if (vp
->v_lflag
& VL_TERMWANT
) {
2276 vp
->v_lflag
&= ~VL_TERMWANT
;
2277 wakeup(&vp
->v_lflag
);
2283 * Eliminate all activity associated with the requested vnode
2284 * and with all vnodes aliased to the requested vnode.
2288 vn_revoke(vnode_t vp
, int flags
, __unused vfs_context_t a_context
)
2290 vn_revoke(vnode_t vp
, __unused
int flags
, __unused vfs_context_t a_context
)
2297 if ((flags
& REVOKEALL
) == 0)
2298 panic("vnop_revoke");
2301 if (vnode_isaliased(vp
)) {
2303 * If a vgone (or vclean) is already in progress,
2304 * return an immediate error
2306 if (vp
->v_lflag
& VL_TERMINATE
)
2310 * Ensure that vp will not be vgone'd while we
2311 * are eliminating its aliases.
2314 while ((vp
->v_specflags
& SI_ALIASED
)) {
2315 for (vq
= *vp
->v_hashchain
; vq
; vq
= vq
->v_specnext
) {
2316 if (vq
->v_rdev
!= vp
->v_rdev
||
2317 vq
->v_type
!= vp
->v_type
|| vp
== vq
)
2321 if (vnode_getwithvid(vq
,vid
)){
2326 if (!(vq
->v_lflag
& VL_TERMINATE
)) {
2327 vnode_reclaim_internal(vq
, 1, 1, 0);
2329 vnode_put_locked(vq
);
2338 if (vp
->v_lflag
& VL_TERMINATE
) {
2342 vnode_reclaim_internal(vp
, 1, 0, REVOKEALL
);
2349 * Recycle an unused vnode to the front of the free list.
2350 * Release the passed interlock if the vnode will be recycled.
2353 vnode_recycle(struct vnode
*vp
)
2355 vnode_lock_spin(vp
);
2357 if (vp
->v_iocount
|| vp
->v_usecount
) {
2358 vp
->v_lflag
|= VL_MARKTERM
;
2362 vnode_lock_convert(vp
);
2363 vnode_reclaim_internal(vp
, 1, 0, 0);
2371 vnode_reload(vnode_t vp
)
2373 vnode_lock_spin(vp
);
2375 if ((vp
->v_iocount
> 1) || vp
->v_usecount
) {
2379 if (vp
->v_iocount
<= 0)
2380 panic("vnode_reload with no iocount %d", vp
->v_iocount
);
2382 /* mark for release when iocount is dopped */
2383 vp
->v_lflag
|= VL_MARKTERM
;
2391 vgone(vnode_t vp
, int flags
)
2397 * Clean out the filesystem specific data.
2398 * vclean also takes care of removing the
2399 * vnode from any mount list it might be on
2401 vclean(vp
, flags
| DOCLOSE
);
2404 * If special device, remove it from special device alias list
2407 if ((vp
->v_type
== VBLK
|| vp
->v_type
== VCHR
) && vp
->v_specinfo
!= 0) {
2409 if (*vp
->v_hashchain
== vp
) {
2410 *vp
->v_hashchain
= vp
->v_specnext
;
2412 for (vq
= *vp
->v_hashchain
; vq
; vq
= vq
->v_specnext
) {
2413 if (vq
->v_specnext
!= vp
)
2415 vq
->v_specnext
= vp
->v_specnext
;
2419 panic("missing bdev");
2421 if (vp
->v_specflags
& SI_ALIASED
) {
2423 for (vq
= *vp
->v_hashchain
; vq
; vq
= vq
->v_specnext
) {
2424 if (vq
->v_rdev
!= vp
->v_rdev
||
2425 vq
->v_type
!= vp
->v_type
)
2432 panic("missing alias");
2434 vx
->v_specflags
&= ~SI_ALIASED
;
2435 vp
->v_specflags
&= ~SI_ALIASED
;
2439 struct specinfo
*tmp
= vp
->v_specinfo
;
2440 vp
->v_specinfo
= NULL
;
2441 FREE_ZONE((void *)tmp
, sizeof(struct specinfo
), M_SPECINFO
);
2447 * Lookup a vnode by device number.
2450 check_mountedon(dev_t dev
, enum vtype type
, int *errorp
)
2458 for (vp
= speclisth
[SPECHASH(dev
)]; vp
; vp
= vp
->v_specnext
) {
2459 if (dev
!= vp
->v_rdev
|| type
!= vp
->v_type
)
2463 if (vnode_getwithvid(vp
,vid
))
2465 vnode_lock_spin(vp
);
2466 if ((vp
->v_usecount
> 0) || (vp
->v_iocount
> 1)) {
2468 if ((*errorp
= vfs_mountedon(vp
)) != 0)
2480 * Calculate the total number of references to a special device.
2490 if (!vnode_isaliased(vp
))
2491 return (vp
->v_specinfo
->si_opencount
);
2496 * Grab first vnode and its vid.
2498 vq
= *vp
->v_hashchain
;
2499 vid
= vq
? vq
->v_id
: 0;
2505 * Attempt to get the vnode outside the SPECHASH lock.
2507 if (vnode_getwithvid(vq
, vid
)) {
2512 if (vq
->v_rdev
== vp
->v_rdev
&& vq
->v_type
== vp
->v_type
) {
2513 if ((vq
->v_usecount
== 0) && (vq
->v_iocount
== 1) && vq
!= vp
) {
2515 * Alias, but not in use, so flush it out.
2517 vnode_reclaim_internal(vq
, 1, 1, 0);
2518 vnode_put_locked(vq
);
2522 count
+= vq
->v_specinfo
->si_opencount
;
2528 * must do this with the reference still held on 'vq'
2529 * so that it can't be destroyed while we're poking
2530 * through v_specnext
2532 vnext
= vq
->v_specnext
;
2533 vid
= vnext
? vnext
->v_id
: 0;
2545 int prtactive
= 0; /* 1 => print out reclaim of active vnodes */
2548 * Print out a description of a vnode.
2550 static const char *typename
[] =
2551 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
2554 vprint(const char *label
, struct vnode
*vp
)
2559 printf("%s: ", label
);
2560 printf("type %s, usecount %d, writecount %d",
2561 typename
[vp
->v_type
], vp
->v_usecount
, vp
->v_writecount
);
2563 if (vp
->v_flag
& VROOT
)
2564 strlcat(sbuf
, "|VROOT", sizeof(sbuf
));
2565 if (vp
->v_flag
& VTEXT
)
2566 strlcat(sbuf
, "|VTEXT", sizeof(sbuf
));
2567 if (vp
->v_flag
& VSYSTEM
)
2568 strlcat(sbuf
, "|VSYSTEM", sizeof(sbuf
));
2569 if (vp
->v_flag
& VNOFLUSH
)
2570 strlcat(sbuf
, "|VNOFLUSH", sizeof(sbuf
));
2571 if (vp
->v_flag
& VBWAIT
)
2572 strlcat(sbuf
, "|VBWAIT", sizeof(sbuf
));
2573 if (vnode_isaliased(vp
))
2574 strlcat(sbuf
, "|VALIASED", sizeof(sbuf
));
2575 if (sbuf
[0] != '\0')
2576 printf(" flags (%s)", &sbuf
[1]);
2581 vn_getpath(struct vnode
*vp
, char *pathbuf
, int *len
)
2583 return build_path(vp
, pathbuf
, *len
, len
, BUILDPATH_NO_FS_ENTER
, vfs_context_current());
2587 vn_getpath_fsenter(struct vnode
*vp
, char *pathbuf
, int *len
)
2589 return build_path(vp
, pathbuf
, *len
, len
, 0, vfs_context_current());
2593 vn_getcdhash(struct vnode
*vp
, off_t offset
, unsigned char *cdhash
)
2595 return ubc_cs_getcdhash(vp
, offset
, cdhash
);
2599 static char *extension_table
=NULL
;
2601 static int max_ext_width
;
2604 extension_cmp(const void *a
, const void *b
)
2606 return (strlen((const char *)a
) - strlen((const char *)b
));
2611 // This is the api LaunchServices uses to inform the kernel
2612 // the list of package extensions to ignore.
2614 // Internally we keep the list sorted by the length of the
2615 // the extension (from longest to shortest). We sort the
2616 // list of extensions so that we can speed up our searches
2617 // when comparing file names -- we only compare extensions
2618 // that could possibly fit into the file name, not all of
2619 // them (i.e. a short 8 character name can't have an 8
2620 // character extension).
2622 extern lck_mtx_t
*pkg_extensions_lck
;
2624 __private_extern__
int
2625 set_package_extensions_table(user_addr_t data
, int nentries
, int maxwidth
)
2627 char *new_exts
, *old_exts
;
2630 if (nentries
<= 0 || nentries
> 1024 || maxwidth
<= 0 || maxwidth
> 255) {
2635 // allocate one byte extra so we can guarantee null termination
2636 MALLOC(new_exts
, char *, (nentries
* maxwidth
) + 1, M_TEMP
, M_WAITOK
);
2637 if (new_exts
== NULL
) {
2641 error
= copyin(data
, new_exts
, nentries
* maxwidth
);
2643 FREE(new_exts
, M_TEMP
);
2647 new_exts
[(nentries
* maxwidth
)] = '\0'; // guarantee null termination of the block
2649 qsort(new_exts
, nentries
, maxwidth
, extension_cmp
);
2651 lck_mtx_lock(pkg_extensions_lck
);
2653 old_exts
= extension_table
;
2654 extension_table
= new_exts
;
2656 max_ext_width
= maxwidth
;
2658 lck_mtx_unlock(pkg_extensions_lck
);
2661 FREE(old_exts
, M_TEMP
);
2668 __private_extern__
int
2669 is_package_name(const char *name
, int len
)
2672 const char *ptr
, *name_ext
;
2679 for(ptr
=name
; *ptr
!= '\0'; ptr
++) {
2685 // if there is no "." extension, it can't match
2686 if (name_ext
== NULL
) {
2690 // advance over the "."
2693 lck_mtx_lock(pkg_extensions_lck
);
2695 // now iterate over all the extensions to see if any match
2696 ptr
= &extension_table
[0];
2697 for(i
=0; i
< nexts
; i
++, ptr
+=max_ext_width
) {
2698 extlen
= strlen(ptr
);
2699 if (strncasecmp(name_ext
, ptr
, extlen
) == 0 && name_ext
[extlen
] == '\0') {
2701 lck_mtx_unlock(pkg_extensions_lck
);
2706 lck_mtx_unlock(pkg_extensions_lck
);
2708 // if we get here, no extension matched
2713 vn_path_package_check(__unused vnode_t vp
, char *path
, int pathlen
, int *component
)
2724 while(end
< path
+ pathlen
&& *end
!= '\0') {
2725 while(end
< path
+ pathlen
&& *end
== '/' && *end
!= '\0') {
2731 while(end
< path
+ pathlen
&& *end
!= '/' && *end
!= '\0') {
2735 if (end
> path
+ pathlen
) {
2736 // hmm, string wasn't null terminated
2741 if (is_package_name(ptr
, end
- ptr
)) {
2754 * Determine if a name is inappropriate for a searchfs query.
2755 * This list consists of /System currently.
2758 int vn_searchfs_inappropriate_name(const char *name
, int len
) {
2759 const char *bad_names
[] = { "System" };
2760 int bad_len
[] = { 6 };
2763 for(i
=0; i
< (int) (sizeof(bad_names
) / sizeof(bad_names
[0])); i
++) {
2764 if (len
== bad_len
[i
] && strncmp(name
, bad_names
[i
], strlen(bad_names
[i
]) + 1) == 0) {
2769 // if we get here, no name matched
2774 * Top level filesystem related information gathering.
2776 extern unsigned int vfs_nummntops
;
2779 * The VFS_NUMMNTOPS shouldn't be at name[1] since
2780 * is a VFS generic variable. Since we no longer support
2781 * VT_UFS, we reserve its value to support this sysctl node.
2783 * It should have been:
2784 * name[0]: VFS_GENERIC
2785 * name[1]: VFS_NUMMNTOPS
2787 SYSCTL_INT(_vfs
, VFS_NUMMNTOPS
, nummntops
,
2788 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2789 &vfs_nummntops
, 0, "");
2792 vfs_sysctl(int *name __unused
, u_int namelen __unused
,
2793 user_addr_t oldp __unused
, size_t *oldlenp __unused
,
2794 user_addr_t newp __unused
, size_t newlen __unused
, proc_t p __unused
);
2797 vfs_sysctl(int *name __unused
, u_int namelen __unused
,
2798 user_addr_t oldp __unused
, size_t *oldlenp __unused
,
2799 user_addr_t newp __unused
, size_t newlen __unused
, proc_t p __unused
)
2806 // The following code disallows specific sysctl's that came through
2807 // the direct sysctl interface (vfs_sysctl_node) instead of the newer
2808 // sysctl_vfs_ctlbyfsid() interface. We can not allow these selectors
2809 // through vfs_sysctl_node() because it passes the user's oldp pointer
2810 // directly to the file system which (for these selectors) casts it
2811 // back to a struct sysctl_req and then proceed to use SYSCTL_IN()
2812 // which jumps through an arbitrary function pointer. When called
2813 // through the sysctl_vfs_ctlbyfsid() interface this does not happen
2814 // and so it's safe.
2816 // Unfortunately we have to pull in definitions from AFP and SMB and
2817 // perform explicit name checks on the file system to determine if
2818 // these selectors are being used.
2821 #define AFPFS_VFS_CTL_GETID 0x00020001
2822 #define AFPFS_VFS_CTL_NETCHANGE 0x00020002
2823 #define AFPFS_VFS_CTL_VOLCHANGE 0x00020003
2825 #define SMBFS_SYSCTL_REMOUNT 1
2826 #define SMBFS_SYSCTL_REMOUNT_INFO 2
2827 #define SMBFS_SYSCTL_GET_SERVER_SHARE 3
2831 is_bad_sysctl_name(struct vfstable
*vfsp
, int selector_name
)
2833 switch(selector_name
) {
2836 case VFS_CTL_NOLOCKS
:
2837 case VFS_CTL_NSTATUS
:
2840 case VFS_CTL_SERVERINFO
:
2848 // the more complicated check for some of SMB's special values
2849 if (strcmp(vfsp
->vfc_name
, "smbfs") == 0) {
2850 switch(selector_name
) {
2851 case SMBFS_SYSCTL_REMOUNT
:
2852 case SMBFS_SYSCTL_REMOUNT_INFO
:
2853 case SMBFS_SYSCTL_GET_SERVER_SHARE
:
2856 } else if (strcmp(vfsp
->vfc_name
, "afpfs") == 0) {
2857 switch(selector_name
) {
2858 case AFPFS_VFS_CTL_GETID
:
2859 case AFPFS_VFS_CTL_NETCHANGE
:
2860 case AFPFS_VFS_CTL_VOLCHANGE
:
2867 // If we get here we passed all the checks so the selector is ok
2873 int vfs_sysctl_node SYSCTL_HANDLER_ARGS
2876 struct vfstable
*vfsp
;
2880 fstypenum
= oidp
->oid_number
;
2884 /* all sysctl names at this level should have at least one name slot for the FS */
2886 return (EISDIR
); /* overloaded */
2889 for (vfsp
= vfsconf
; vfsp
; vfsp
= vfsp
->vfc_next
)
2890 if (vfsp
->vfc_typenum
== fstypenum
) {
2891 vfsp
->vfc_refcount
++;
2894 mount_list_unlock();
2900 if (is_bad_sysctl_name(vfsp
, name
[0])) {
2901 printf("vfs: bad selector 0x%.8x for old-style sysctl(). use the sysctl-by-fsid interface instead\n", name
[0]);
2905 error
= (vfsp
->vfc_vfsops
->vfs_sysctl
)(name
, namelen
, req
->oldptr
, &req
->oldlen
, req
->newptr
, req
->newlen
, vfs_context_current());
2908 vfsp
->vfc_refcount
--;
2909 mount_list_unlock();
2915 * Check to see if a filesystem is mounted on a block device.
2918 vfs_mountedon(struct vnode
*vp
)
2924 if (vp
->v_specflags
& SI_MOUNTEDON
) {
2928 if (vp
->v_specflags
& SI_ALIASED
) {
2929 for (vq
= *vp
->v_hashchain
; vq
; vq
= vq
->v_specnext
) {
2930 if (vq
->v_rdev
!= vp
->v_rdev
||
2931 vq
->v_type
!= vp
->v_type
)
2933 if (vq
->v_specflags
& SI_MOUNTEDON
) {
2944 struct unmount_info
{
2945 int u_errs
; // Total failed unmounts
2946 int u_busy
; // EBUSY failed unmounts
2950 unmount_callback(mount_t mp
, void *arg
)
2954 struct unmount_info
*uip
= arg
;
2957 mount_iterdrop(mp
); // avoid vfs_iterate deadlock in dounmount()
2959 MALLOC_ZONE(mntname
, void *, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
2961 strlcpy(mntname
, mp
->mnt_vfsstat
.f_mntonname
, MAXPATHLEN
);
2963 error
= dounmount(mp
, MNT_FORCE
, 1, vfs_context_current());
2966 printf("Unmount of %s failed (%d)\n", mntname
? mntname
:"?", error
);
2971 FREE_ZONE(mntname
, MAXPATHLEN
, M_NAMEI
);
2973 return (VFS_RETURNED
);
2977 * Unmount all filesystems. The list is traversed in reverse order
2978 * of mounting to avoid dependencies.
2979 * Busy mounts are retried.
2981 __private_extern__
void
2982 vfs_unmountall(void)
2984 int mounts
, sec
= 1;
2985 struct unmount_info ui
;
2988 ui
.u_errs
= ui
.u_busy
= 0;
2989 vfs_iterate(VFS_ITERATE_CB_DROPREF
| VFS_ITERATE_TAIL_FIRST
, unmount_callback
, &ui
);
2990 mounts
= mount_getvfscnt();
2994 if (ui
.u_busy
> 0) { // Busy mounts - wait & retry
2995 tsleep(&nummounts
, PVFS
, "busy mount", sec
* hz
);
2999 printf("Unmounting timed out\n");
3000 } else if (ui
.u_errs
< mounts
) {
3001 // If the vfs_iterate missed mounts in progress - wait a bit
3002 tsleep(&nummounts
, PVFS
, "missed mount", 2 * hz
);
3007 * This routine is called from vnode_pager_deallocate out of the VM
3008 * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named
3009 * on a vnode that has a UBCINFO
3011 __private_extern__
void
3012 vnode_pager_vrele(vnode_t vp
)
3014 struct ubc_info
*uip
;
3016 vnode_lock_spin(vp
);
3018 vp
->v_lflag
&= ~VNAMED_UBC
;
3019 if (vp
->v_usecount
!= 0) {
3021 * At the eleventh hour, just before the ubcinfo is
3022 * destroyed, ensure the ubc-specific v_usecount
3023 * reference has gone. We use v_usecount != 0 as a hint;
3024 * ubc_unmap() does nothing if there's no mapping.
3026 * This case is caused by coming here via forced unmount,
3027 * versus the usual vm_object_deallocate() path.
3028 * In the forced unmount case, ubc_destroy_named()
3029 * releases the pager before memory_object_last_unmap()
3034 vnode_lock_spin(vp
);
3037 uip
= vp
->v_ubcinfo
;
3038 vp
->v_ubcinfo
= UBC_INFO_NULL
;
3042 ubc_info_deallocate(uip
);
3046 #include <sys/disk.h>
3048 u_int32_t rootunit
= (u_int32_t
)-1;
3051 extern int lowpri_throttle_enabled
;
3052 extern int iosched_enabled
;
3056 vfs_init_io_attributes(vnode_t devvp
, mount_t mp
)
3059 off_t readblockcnt
= 0;
3060 off_t writeblockcnt
= 0;
3061 off_t readmaxcnt
= 0;
3062 off_t writemaxcnt
= 0;
3063 off_t readsegcnt
= 0;
3064 off_t writesegcnt
= 0;
3065 off_t readsegsize
= 0;
3066 off_t writesegsize
= 0;
3067 off_t alignment
= 0;
3068 u_int32_t ioqueue_depth
= 0;
3072 vfs_context_t ctx
= vfs_context_current();
3077 VNOP_IOCTL(devvp
, DKIOCGETTHROTTLEMASK
, (caddr_t
)&mp
->mnt_throttle_mask
, 0, NULL
);
3079 * as a reasonable approximation, only use the lowest bit of the mask
3080 * to generate a disk unit number
3082 mp
->mnt_devbsdunit
= num_trailing_0(mp
->mnt_throttle_mask
);
3084 if (devvp
== rootvp
)
3085 rootunit
= mp
->mnt_devbsdunit
;
3087 if (mp
->mnt_devbsdunit
== rootunit
) {
3089 * this mount point exists on the same device as the root
3090 * partition, so it comes under the hard throttle control...
3091 * this is true even for the root mount point itself
3093 mp
->mnt_kern_flag
|= MNTK_ROOTDEV
;
3096 * force the spec device to re-cache
3097 * the underlying block size in case
3098 * the filesystem overrode the initial value
3100 set_fsblocksize(devvp
);
3103 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETBLOCKSIZE
,
3104 (caddr_t
)&blksize
, 0, ctx
)))
3107 mp
->mnt_devblocksize
= blksize
;
3110 * set the maximum possible I/O size
3111 * this may get clipped to a smaller value
3112 * based on which constraints are being advertised
3113 * and if those advertised constraints result in a smaller
3114 * limit for a given I/O
3116 mp
->mnt_maxreadcnt
= MAX_UPL_SIZE_BYTES
;
3117 mp
->mnt_maxwritecnt
= MAX_UPL_SIZE_BYTES
;
3119 if (VNOP_IOCTL(devvp
, DKIOCISVIRTUAL
, (caddr_t
)&isvirtual
, 0, ctx
) == 0) {
3121 mp
->mnt_kern_flag
|= MNTK_VIRTUALDEV
;
3123 if (VNOP_IOCTL(devvp
, DKIOCISSOLIDSTATE
, (caddr_t
)&isssd
, 0, ctx
) == 0) {
3125 mp
->mnt_kern_flag
|= MNTK_SSD
;
3127 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETFEATURES
,
3128 (caddr_t
)&features
, 0, ctx
)))
3131 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXBLOCKCOUNTREAD
,
3132 (caddr_t
)&readblockcnt
, 0, ctx
)))
3135 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXBLOCKCOUNTWRITE
,
3136 (caddr_t
)&writeblockcnt
, 0, ctx
)))
3139 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXBYTECOUNTREAD
,
3140 (caddr_t
)&readmaxcnt
, 0, ctx
)))
3143 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXBYTECOUNTWRITE
,
3144 (caddr_t
)&writemaxcnt
, 0, ctx
)))
3147 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXSEGMENTCOUNTREAD
,
3148 (caddr_t
)&readsegcnt
, 0, ctx
)))
3151 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXSEGMENTCOUNTWRITE
,
3152 (caddr_t
)&writesegcnt
, 0, ctx
)))
3155 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXSEGMENTBYTECOUNTREAD
,
3156 (caddr_t
)&readsegsize
, 0, ctx
)))
3159 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMAXSEGMENTBYTECOUNTWRITE
,
3160 (caddr_t
)&writesegsize
, 0, ctx
)))
3163 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT
,
3164 (caddr_t
)&alignment
, 0, ctx
)))
3167 if ((error
= VNOP_IOCTL(devvp
, DKIOCGETCOMMANDPOOLSIZE
,
3168 (caddr_t
)&ioqueue_depth
, 0, ctx
)))
3172 mp
->mnt_maxreadcnt
= (readmaxcnt
> UINT32_MAX
) ? UINT32_MAX
: readmaxcnt
;
3175 temp
= readblockcnt
* blksize
;
3176 temp
= (temp
> UINT32_MAX
) ? UINT32_MAX
: temp
;
3178 if (temp
< mp
->mnt_maxreadcnt
)
3179 mp
->mnt_maxreadcnt
= (u_int32_t
)temp
;
3183 mp
->mnt_maxwritecnt
= (writemaxcnt
> UINT32_MAX
) ? UINT32_MAX
: writemaxcnt
;
3185 if (writeblockcnt
) {
3186 temp
= writeblockcnt
* blksize
;
3187 temp
= (temp
> UINT32_MAX
) ? UINT32_MAX
: temp
;
3189 if (temp
< mp
->mnt_maxwritecnt
)
3190 mp
->mnt_maxwritecnt
= (u_int32_t
)temp
;
3194 temp
= (readsegcnt
> UINT16_MAX
) ? UINT16_MAX
: readsegcnt
;
3196 temp
= mp
->mnt_maxreadcnt
/ PAGE_SIZE
;
3198 if (temp
> UINT16_MAX
)
3201 mp
->mnt_segreadcnt
= (u_int16_t
)temp
;
3204 temp
= (writesegcnt
> UINT16_MAX
) ? UINT16_MAX
: writesegcnt
;
3206 temp
= mp
->mnt_maxwritecnt
/ PAGE_SIZE
;
3208 if (temp
> UINT16_MAX
)
3211 mp
->mnt_segwritecnt
= (u_int16_t
)temp
;
3214 temp
= (readsegsize
> UINT32_MAX
) ? UINT32_MAX
: readsegsize
;
3216 temp
= mp
->mnt_maxreadcnt
;
3217 mp
->mnt_maxsegreadsize
= (u_int32_t
)temp
;
3220 temp
= (writesegsize
> UINT32_MAX
) ? UINT32_MAX
: writesegsize
;
3222 temp
= mp
->mnt_maxwritecnt
;
3223 mp
->mnt_maxsegwritesize
= (u_int32_t
)temp
;
3226 temp
= (alignment
> PAGE_SIZE
) ? PAGE_MASK
: alignment
- 1;
3229 mp
->mnt_alignmentmask
= temp
;
3232 if (ioqueue_depth
> MNT_DEFAULT_IOQUEUE_DEPTH
)
3233 temp
= ioqueue_depth
;
3235 temp
= MNT_DEFAULT_IOQUEUE_DEPTH
;
3237 mp
->mnt_ioqueue_depth
= temp
;
3238 mp
->mnt_ioscale
= (mp
->mnt_ioqueue_depth
+ (MNT_DEFAULT_IOQUEUE_DEPTH
- 1)) / MNT_DEFAULT_IOQUEUE_DEPTH
;
3240 if (mp
->mnt_ioscale
> 1)
3241 printf("ioqueue_depth = %d, ioscale = %d\n", (int)mp
->mnt_ioqueue_depth
, (int)mp
->mnt_ioscale
);
3243 if (features
& DK_FEATURE_FORCE_UNIT_ACCESS
)
3244 mp
->mnt_ioflags
|= MNT_IOFLAGS_FUA_SUPPORTED
;
3246 if (features
& DK_FEATURE_UNMAP
) {
3247 mp
->mnt_ioflags
|= MNT_IOFLAGS_UNMAP_SUPPORTED
;
3249 if (VNOP_IOCTL(devvp
, _DKIOCCORESTORAGE
, NULL
, 0, ctx
) == 0)
3250 mp
->mnt_ioflags
|= MNT_IOFLAGS_CSUNMAP_SUPPORTED
;
3253 if (iosched_enabled
&& (features
& DK_FEATURE_PRIORITY
)) {
3254 mp
->mnt_ioflags
|= MNT_IOFLAGS_IOSCHED_SUPPORTED
;
3255 throttle_info_disable_throttle(mp
->mnt_devbsdunit
);
3257 #endif /* CONFIG_IOSCHED */
3261 static struct klist fs_klist
;
3262 lck_grp_t
*fs_klist_lck_grp
;
3263 lck_mtx_t
*fs_klist_lock
;
3266 vfs_event_init(void)
3269 klist_init(&fs_klist
);
3270 fs_klist_lck_grp
= lck_grp_alloc_init("fs_klist", NULL
);
3271 fs_klist_lock
= lck_mtx_alloc_init(fs_klist_lck_grp
, NULL
);
3275 vfs_event_signal(fsid_t
*fsid
, u_int32_t event
, intptr_t data
)
3277 if (event
== VQ_DEAD
|| event
== VQ_NOTRESP
) {
3278 struct mount
*mp
= vfs_getvfs(fsid
);
3280 mount_lock_spin(mp
);
3282 mp
->mnt_kern_flag
&= ~MNT_LNOTRESP
; // Now responding
3284 mp
->mnt_kern_flag
|= MNT_LNOTRESP
; // Not responding
3289 lck_mtx_lock(fs_klist_lock
);
3290 KNOTE(&fs_klist
, event
);
3291 lck_mtx_unlock(fs_klist_lock
);
3295 * return the number of mounted filesystems.
3298 sysctl_vfs_getvfscnt(void)
3300 return(mount_getvfscnt());
3305 mount_getvfscnt(void)
3311 mount_list_unlock();
3319 mount_fillfsids(fsid_t
*fsidlst
, int count
)
3326 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
) {
3327 if (actual
<= count
) {
3328 fsidlst
[actual
] = mp
->mnt_vfsstat
.f_fsid
;
3332 mount_list_unlock();
3338 * fill in the array of fsid_t's up to a max of 'count', the actual
3339 * number filled in will be set in '*actual'. If there are more fsid_t's
3340 * than room in fsidlst then ENOMEM will be returned and '*actual' will
3341 * have the actual count.
3342 * having *actual filled out even in the error case is depended upon.
3345 sysctl_vfs_getvfslist(fsid_t
*fsidlst
, int count
, int *actual
)
3351 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
) {
3353 if (*actual
<= count
)
3354 fsidlst
[(*actual
) - 1] = mp
->mnt_vfsstat
.f_fsid
;
3356 mount_list_unlock();
3357 return (*actual
<= count
? 0 : ENOMEM
);
3361 sysctl_vfs_vfslist(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
3362 __unused
int arg2
, struct sysctl_req
*req
)
3368 /* This is a readonly node. */
3369 if (req
->newptr
!= USER_ADDR_NULL
)
3372 /* they are querying us so just return the space required. */
3373 if (req
->oldptr
== USER_ADDR_NULL
) {
3374 req
->oldidx
= sysctl_vfs_getvfscnt() * sizeof(fsid_t
);
3379 * Retrieve an accurate count of the amount of space required to copy
3380 * out all the fsids in the system.
3382 space
= req
->oldlen
;
3383 req
->oldlen
= sysctl_vfs_getvfscnt() * sizeof(fsid_t
);
3385 /* they didn't give us enough space. */
3386 if (space
< req
->oldlen
)
3389 MALLOC(fsidlst
, fsid_t
*, req
->oldlen
, M_TEMP
, M_WAITOK
);
3390 if (fsidlst
== NULL
) {
3394 error
= sysctl_vfs_getvfslist(fsidlst
, req
->oldlen
/ sizeof(fsid_t
),
3397 * If we get back ENOMEM, then another mount has been added while we
3398 * slept in malloc above. If this is the case then try again.
3400 if (error
== ENOMEM
) {
3401 FREE(fsidlst
, M_TEMP
);
3402 req
->oldlen
= space
;
3406 error
= SYSCTL_OUT(req
, fsidlst
, actual
* sizeof(fsid_t
));
3408 FREE(fsidlst
, M_TEMP
);
3413 * Do a sysctl by fsid.
3416 sysctl_vfs_ctlbyfsid(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3417 struct sysctl_req
*req
)
3419 union union_vfsidctl vc
;
3421 struct vfsstatfs
*sp
;
3422 int *name
, flags
, namelen
;
3423 int error
=0, gotref
=0;
3424 vfs_context_t ctx
= vfs_context_current();
3425 proc_t p
= req
->p
; /* XXX req->p != current_proc()? */
3426 boolean_t is_64_bit
;
3430 is_64_bit
= proc_is64bit(p
);
3432 error
= SYSCTL_IN(req
, &vc
, is_64_bit
? sizeof(vc
.vc64
):sizeof(vc
.vc32
));
3435 if (vc
.vc32
.vc_vers
!= VFS_CTL_VERS1
) { /* works for 32 and 64 */
3439 mp
= mount_list_lookupby_fsid(&vc
.vc32
.vc_fsid
, 0, 1); /* works for 32 and 64 */
3445 /* reset so that the fs specific code can fetch it. */
3448 * Note if this is a VFS_CTL then we pass the actual sysctl req
3449 * in for "oldp" so that the lower layer can DTRT and use the
3450 * SYSCTL_IN/OUT routines.
3452 if (mp
->mnt_op
->vfs_sysctl
!= NULL
) {
3454 if (vfs_64bitready(mp
)) {
3455 error
= mp
->mnt_op
->vfs_sysctl(name
, namelen
,
3456 CAST_USER_ADDR_T(req
),
3457 NULL
, USER_ADDR_NULL
, 0,
3465 error
= mp
->mnt_op
->vfs_sysctl(name
, namelen
,
3466 CAST_USER_ADDR_T(req
),
3467 NULL
, USER_ADDR_NULL
, 0,
3470 if (error
!= ENOTSUP
) {
3475 case VFS_CTL_UMOUNT
:
3478 req
->newptr
= vc
.vc64
.vc_ptr
;
3479 req
->newlen
= (size_t)vc
.vc64
.vc_len
;
3482 req
->newptr
= CAST_USER_ADDR_T(vc
.vc32
.vc_ptr
);
3483 req
->newlen
= vc
.vc32
.vc_len
;
3485 error
= SYSCTL_IN(req
, &flags
, sizeof(flags
));
3492 /* safedounmount consumes a ref */
3493 error
= safedounmount(mp
, flags
, ctx
);
3495 case VFS_CTL_STATFS
:
3498 req
->newptr
= vc
.vc64
.vc_ptr
;
3499 req
->newlen
= (size_t)vc
.vc64
.vc_len
;
3502 req
->newptr
= CAST_USER_ADDR_T(vc
.vc32
.vc_ptr
);
3503 req
->newlen
= vc
.vc32
.vc_len
;
3505 error
= SYSCTL_IN(req
, &flags
, sizeof(flags
));
3508 sp
= &mp
->mnt_vfsstat
;
3509 if (((flags
& MNT_NOWAIT
) == 0 || (flags
& (MNT_WAIT
| MNT_DWAIT
))) &&
3510 (error
= vfs_update_vfsstat(mp
, ctx
, VFS_USER_EVENT
)))
3513 struct user64_statfs sfs
;
3514 bzero(&sfs
, sizeof(sfs
));
3515 sfs
.f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
3516 sfs
.f_type
= mp
->mnt_vtable
->vfc_typenum
;
3517 sfs
.f_bsize
= (user64_long_t
)sp
->f_bsize
;
3518 sfs
.f_iosize
= (user64_long_t
)sp
->f_iosize
;
3519 sfs
.f_blocks
= (user64_long_t
)sp
->f_blocks
;
3520 sfs
.f_bfree
= (user64_long_t
)sp
->f_bfree
;
3521 sfs
.f_bavail
= (user64_long_t
)sp
->f_bavail
;
3522 sfs
.f_files
= (user64_long_t
)sp
->f_files
;
3523 sfs
.f_ffree
= (user64_long_t
)sp
->f_ffree
;
3524 sfs
.f_fsid
= sp
->f_fsid
;
3525 sfs
.f_owner
= sp
->f_owner
;
3527 if (mp
->mnt_kern_flag
& MNTK_TYPENAME_OVERRIDE
) {
3528 strlcpy(&sfs
.f_fstypename
[0], &mp
->fstypename_override
[0], MFSTYPENAMELEN
);
3530 strlcpy(sfs
.f_fstypename
, sp
->f_fstypename
, MFSNAMELEN
);
3532 strlcpy(sfs
.f_mntonname
, sp
->f_mntonname
, MNAMELEN
);
3533 strlcpy(sfs
.f_mntfromname
, sp
->f_mntfromname
, MNAMELEN
);
3535 error
= SYSCTL_OUT(req
, &sfs
, sizeof(sfs
));
3538 struct user32_statfs sfs
;
3539 bzero(&sfs
, sizeof(sfs
));
3540 sfs
.f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
3541 sfs
.f_type
= mp
->mnt_vtable
->vfc_typenum
;
3544 * It's possible for there to be more than 2^^31 blocks in the filesystem, so we
3545 * have to fudge the numbers here in that case. We inflate the blocksize in order
3546 * to reflect the filesystem size as best we can.
3548 if (sp
->f_blocks
> INT_MAX
) {
3552 * Work out how far we have to shift the block count down to make it fit.
3553 * Note that it's possible to have to shift so far that the resulting
3554 * blocksize would be unreportably large. At that point, we will clip
3555 * any values that don't fit.
3557 * For safety's sake, we also ensure that f_iosize is never reported as
3558 * being smaller than f_bsize.
3560 for (shift
= 0; shift
< 32; shift
++) {
3561 if ((sp
->f_blocks
>> shift
) <= INT_MAX
)
3563 if ((((long long)sp
->f_bsize
) << (shift
+ 1)) > INT_MAX
)
3566 #define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s)))
3567 sfs
.f_blocks
= (user32_long_t
)__SHIFT_OR_CLIP(sp
->f_blocks
, shift
);
3568 sfs
.f_bfree
= (user32_long_t
)__SHIFT_OR_CLIP(sp
->f_bfree
, shift
);
3569 sfs
.f_bavail
= (user32_long_t
)__SHIFT_OR_CLIP(sp
->f_bavail
, shift
);
3570 #undef __SHIFT_OR_CLIP
3571 sfs
.f_bsize
= (user32_long_t
)(sp
->f_bsize
<< shift
);
3572 sfs
.f_iosize
= lmax(sp
->f_iosize
, sp
->f_bsize
);
3574 sfs
.f_bsize
= (user32_long_t
)sp
->f_bsize
;
3575 sfs
.f_iosize
= (user32_long_t
)sp
->f_iosize
;
3576 sfs
.f_blocks
= (user32_long_t
)sp
->f_blocks
;
3577 sfs
.f_bfree
= (user32_long_t
)sp
->f_bfree
;
3578 sfs
.f_bavail
= (user32_long_t
)sp
->f_bavail
;
3580 sfs
.f_files
= (user32_long_t
)sp
->f_files
;
3581 sfs
.f_ffree
= (user32_long_t
)sp
->f_ffree
;
3582 sfs
.f_fsid
= sp
->f_fsid
;
3583 sfs
.f_owner
= sp
->f_owner
;
3585 if (mp
->mnt_kern_flag
& MNTK_TYPENAME_OVERRIDE
) {
3586 strlcpy(&sfs
.f_fstypename
[0], &mp
->fstypename_override
[0], MFSTYPENAMELEN
);
3588 strlcpy(sfs
.f_fstypename
, sp
->f_fstypename
, MFSNAMELEN
);
3590 strlcpy(sfs
.f_mntonname
, sp
->f_mntonname
, MNAMELEN
);
3591 strlcpy(sfs
.f_mntfromname
, sp
->f_mntfromname
, MNAMELEN
);
3593 error
= SYSCTL_OUT(req
, &sfs
, sizeof(sfs
));
3606 static int filt_fsattach(struct knote
*kn
);
3607 static void filt_fsdetach(struct knote
*kn
);
3608 static int filt_fsevent(struct knote
*kn
, long hint
);
3609 struct filterops fs_filtops
= {
3610 .f_attach
= filt_fsattach
,
3611 .f_detach
= filt_fsdetach
,
3612 .f_event
= filt_fsevent
,
3616 filt_fsattach(struct knote
*kn
)
3619 lck_mtx_lock(fs_klist_lock
);
3620 kn
->kn_flags
|= EV_CLEAR
;
3621 KNOTE_ATTACH(&fs_klist
, kn
);
3622 lck_mtx_unlock(fs_klist_lock
);
3627 filt_fsdetach(struct knote
*kn
)
3629 lck_mtx_lock(fs_klist_lock
);
3630 KNOTE_DETACH(&fs_klist
, kn
);
3631 lck_mtx_unlock(fs_klist_lock
);
3635 filt_fsevent(struct knote
*kn
, long hint
)
3638 * Backwards compatibility:
3639 * Other filters would do nothing if kn->kn_sfflags == 0
3642 if ((kn
->kn_sfflags
== 0) || (kn
->kn_sfflags
& hint
)) {
3643 kn
->kn_fflags
|= hint
;
3646 return (kn
->kn_fflags
!= 0);
3650 sysctl_vfs_noremotehang(__unused
struct sysctl_oid
*oidp
,
3651 __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3657 /* We need a pid. */
3658 if (req
->newptr
== USER_ADDR_NULL
)
3661 error
= SYSCTL_IN(req
, &pid
, sizeof(pid
));
3665 p
= proc_find(pid
< 0 ? -pid
: pid
);
3670 * Fetching the value is ok, but we only fetch if the old
3673 if (req
->oldptr
!= USER_ADDR_NULL
) {
3674 out
= !((p
->p_flag
& P_NOREMOTEHANG
) == 0);
3676 error
= SYSCTL_OUT(req
, &out
, sizeof(out
));
3680 /* cansignal offers us enough security. */
3681 if (p
!= req
->p
&& proc_suser(req
->p
) != 0) {
3687 OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG
), &p
->p_flag
);
3689 OSBitOrAtomic(P_NOREMOTEHANG
, &p
->p_flag
);
3696 sysctl_vfs_generic_conf SYSCTL_HANDLER_ARGS
3699 struct vfstable
*vfsp
;
3700 struct vfsconf vfsc
;
3708 } else if (namelen
> 1) {
3713 for (vfsp
= vfsconf
; vfsp
; vfsp
= vfsp
->vfc_next
)
3714 if (vfsp
->vfc_typenum
== name
[0])
3718 mount_list_unlock();
3722 vfsc
.vfc_reserved1
= 0;
3723 bcopy(vfsp
->vfc_name
, vfsc
.vfc_name
, sizeof(vfsc
.vfc_name
));
3724 vfsc
.vfc_typenum
= vfsp
->vfc_typenum
;
3725 vfsc
.vfc_refcount
= vfsp
->vfc_refcount
;
3726 vfsc
.vfc_flags
= vfsp
->vfc_flags
;
3727 vfsc
.vfc_reserved2
= 0;
3728 vfsc
.vfc_reserved3
= 0;
3730 mount_list_unlock();
3731 return (SYSCTL_OUT(req
, &vfsc
, sizeof(struct vfsconf
)));
3734 /* the vfs.generic. branch. */
3735 SYSCTL_NODE(_vfs
, VFS_GENERIC
, generic
, CTLFLAG_RW
| CTLFLAG_LOCKED
, NULL
, "vfs generic hinge");
3736 /* retreive a list of mounted filesystem fsid_t */
3737 SYSCTL_PROC(_vfs_generic
, OID_AUTO
, vfsidlist
,
3738 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3739 NULL
, 0, sysctl_vfs_vfslist
, "S,fsid", "List of mounted filesystem ids");
3740 /* perform operations on filesystem via fsid_t */
3741 SYSCTL_NODE(_vfs_generic
, OID_AUTO
, ctlbyfsid
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
3742 sysctl_vfs_ctlbyfsid
, "ctlbyfsid");
3743 SYSCTL_PROC(_vfs_generic
, OID_AUTO
, noremotehang
, CTLFLAG_RW
| CTLFLAG_ANYBODY
,
3744 NULL
, 0, sysctl_vfs_noremotehang
, "I", "noremotehang");
3745 SYSCTL_INT(_vfs_generic
, VFS_MAXTYPENUM
, maxtypenum
,
3746 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3747 &maxvfstypenum
, 0, "");
3748 SYSCTL_INT(_vfs_generic
, OID_AUTO
, sync_timeout
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &sync_timeout
, 0, "");
3749 SYSCTL_NODE(_vfs_generic
, VFS_CONF
, conf
,
3750 CTLFLAG_RD
| CTLFLAG_LOCKED
,
3751 sysctl_vfs_generic_conf
, "");
3753 long num_reusedvnodes
= 0;
3757 process_vp(vnode_t vp
, int want_vp
, int *deferred
)
3765 vnode_list_remove_locked(vp
);
3767 vnode_list_unlock();
3769 vnode_lock_spin(vp
);
3772 * We could wait for the vnode_lock after removing the vp from the freelist
3773 * and the vid is bumped only at the very end of reclaim. So it is possible
3774 * that we are looking at a vnode that is being terminated. If so skip it.
3776 if ((vpid
!= vp
->v_id
) || (vp
->v_usecount
!= 0) || (vp
->v_iocount
!= 0) ||
3777 VONLIST(vp
) || (vp
->v_lflag
& VL_TERMINATE
)) {
3779 * we lost the race between dropping the list lock
3780 * and picking up the vnode_lock... someone else
3781 * used this vnode and it is now in a new state
3787 if ( (vp
->v_lflag
& (VL_NEEDINACTIVE
| VL_MARKTERM
)) == VL_NEEDINACTIVE
) {
3789 * we did a vnode_rele_ext that asked for
3790 * us not to reenter the filesystem during
3791 * the release even though VL_NEEDINACTIVE was
3792 * set... we'll do it here by doing a
3793 * vnode_get/vnode_put
3795 * pick up an iocount so that we can call
3796 * vnode_put and drive the VNOP_INACTIVE...
3797 * vnode_put will either leave us off
3798 * the freelist if a new ref comes in,
3799 * or put us back on the end of the freelist
3800 * or recycle us if we were marked for termination...
3801 * so we'll just go grab a new candidate
3807 vnode_put_locked(vp
);
3813 * Checks for anyone racing us for recycle
3815 if (vp
->v_type
!= VBAD
) {
3816 if (want_vp
&& (vnode_on_reliable_media(vp
) == FALSE
|| (vp
->v_flag
& VISDIRTY
))) {
3817 vnode_async_list_add(vp
);
3824 if (vp
->v_lflag
& VL_DEAD
)
3825 panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp
);
3827 vnode_lock_convert(vp
);
3828 (void)vnode_reclaim_internal(vp
, 1, want_vp
, 0);
3832 panic("new_vnode(%p): vp on list", vp
);
3833 if (vp
->v_usecount
|| vp
->v_iocount
|| vp
->v_kusecount
||
3834 (vp
->v_lflag
& (VNAMED_UBC
| VNAMED_MOUNT
| VNAMED_FSHASH
)))
3835 panic("new_vnode(%p): free vnode still referenced", vp
);
3836 if ((vp
->v_mntvnodes
.tqe_prev
!= 0) && (vp
->v_mntvnodes
.tqe_next
!= 0))
3837 panic("new_vnode(%p): vnode seems to be on mount list", vp
);
3838 if ( !LIST_EMPTY(&vp
->v_nclinks
) || !LIST_EMPTY(&vp
->v_ncchildren
))
3839 panic("new_vnode(%p): vnode still hooked into the name cache", vp
);
3851 async_work_continue(void)
3853 struct async_work_lst
*q
;
3857 q
= &vnode_async_work_list
;
3863 if ( TAILQ_EMPTY(q
) ) {
3864 assert_wait(q
, (THREAD_UNINT
));
3866 vnode_list_unlock();
3868 thread_block((thread_continue_t
)async_work_continue
);
3872 async_work_handled
++;
3874 vp
= TAILQ_FIRST(q
);
3876 vp
= process_vp(vp
, 0, &deferred
);
3879 panic("found VBAD vp (%p) on async queue", vp
);
3885 new_vnode(vnode_t
*vpp
)
3888 uint32_t retries
= 0, max_retries
= 100; /* retry incase of tablefull */
3889 int force_alloc
= 0, walk_count
= 0;
3890 boolean_t need_reliable_vp
= FALSE
;
3892 struct timeval initial_tv
;
3893 struct timeval current_tv
;
3894 proc_t curproc
= current_proc();
3896 initial_tv
.tv_sec
= 0;
3902 if (need_reliable_vp
== TRUE
)
3903 async_work_timed_out
++;
3905 if ((numvnodes
- deadvnodes
) < desiredvnodes
|| force_alloc
) {
3908 if ( !TAILQ_EMPTY(&vnode_dead_list
)) {
3910 * Can always reuse a dead one
3912 vp
= TAILQ_FIRST(&vnode_dead_list
);
3916 * no dead vnodes available... if we're under
3917 * the limit, we'll create a new vnode
3920 vnode_list_unlock();
3922 MALLOC_ZONE(vp
, struct vnode
*, sizeof(*vp
), M_VNODE
, M_WAITOK
);
3923 bzero((char *)vp
, sizeof(*vp
));
3924 VLISTNONE(vp
); /* avoid double queue removal */
3925 lck_mtx_init(&vp
->v_lock
, vnode_lck_grp
, vnode_lck_attr
);
3927 klist_init(&vp
->v_knotes
);
3929 vp
->v_id
= ts
.tv_nsec
;
3930 vp
->v_flag
= VSTANDARD
;
3933 if (mac_vnode_label_init_needed(vp
))
3934 mac_vnode_label_init(vp
);
3940 microuptime(¤t_tv
);
3942 #define MAX_WALK_COUNT 1000
3944 if ( !TAILQ_EMPTY(&vnode_rage_list
) &&
3945 (ragevnodes
>= rage_limit
||
3946 (current_tv
.tv_sec
- rage_tv
.tv_sec
) >= RAGE_TIME_LIMIT
)) {
3948 TAILQ_FOREACH(vp
, &vnode_rage_list
, v_freelist
) {
3949 if ( !(vp
->v_listflag
& VLIST_RAGE
))
3950 panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp
);
3952 // if we're a dependency-capable process, skip vnodes that can
3953 // cause recycling deadlocks. (i.e. this process is diskimages
3954 // helper and the vnode is in a disk image). Querying the
3955 // mnt_kern_flag for the mount's virtual device status
3956 // is safer than checking the mnt_dependent_process, which
3957 // may not be updated if there are multiple devnode layers
3958 // in between the disk image and the final consumer.
3960 if ((curproc
->p_flag
& P_DEPENDENCY_CAPABLE
) == 0 || vp
->v_mount
== NULL
||
3961 (vp
->v_mount
->mnt_kern_flag
& MNTK_VIRTUALDEV
) == 0) {
3963 * if need_reliable_vp == TRUE, then we've already sent one or more
3964 * non-reliable vnodes to the async thread for processing and timed
3965 * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT
3966 * mechanism to first scan for a reliable vnode before forcing
3967 * a new vnode to be created
3969 if (need_reliable_vp
== FALSE
|| vnode_on_reliable_media(vp
) == TRUE
)
3973 // don't iterate more than MAX_WALK_COUNT vnodes to
3974 // avoid keeping the vnode list lock held for too long.
3976 if (walk_count
++ > MAX_WALK_COUNT
) {
3983 if (vp
== NULL
&& !TAILQ_EMPTY(&vnode_free_list
)) {
3985 * Pick the first vp for possible reuse
3988 TAILQ_FOREACH(vp
, &vnode_free_list
, v_freelist
) {
3990 // if we're a dependency-capable process, skip vnodes that can
3991 // cause recycling deadlocks. (i.e. this process is diskimages
3992 // helper and the vnode is in a disk image). Querying the
3993 // mnt_kern_flag for the mount's virtual device status
3994 // is safer than checking the mnt_dependent_process, which
3995 // may not be updated if there are multiple devnode layers
3996 // in between the disk image and the final consumer.
3998 if ((curproc
->p_flag
& P_DEPENDENCY_CAPABLE
) == 0 || vp
->v_mount
== NULL
||
3999 (vp
->v_mount
->mnt_kern_flag
& MNTK_VIRTUALDEV
) == 0) {
4001 * if need_reliable_vp == TRUE, then we've already sent one or more
4002 * non-reliable vnodes to the async thread for processing and timed
4003 * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT
4004 * mechanism to first scan for a reliable vnode before forcing
4005 * a new vnode to be created
4007 if (need_reliable_vp
== FALSE
|| vnode_on_reliable_media(vp
) == TRUE
)
4011 // don't iterate more than MAX_WALK_COUNT vnodes to
4012 // avoid keeping the vnode list lock held for too long.
4014 if (walk_count
++ > MAX_WALK_COUNT
) {
4022 // if we don't have a vnode and the walk_count is >= MAX_WALK_COUNT
4023 // then we're trying to create a vnode on behalf of a
4024 // process like diskimages-helper that has file systems
4025 // mounted on top of itself (and thus we can't reclaim
4026 // vnodes in the file systems on top of us). if we can't
4027 // find a vnode to reclaim then we'll just have to force
4030 if (vp
== NULL
&& walk_count
>= MAX_WALK_COUNT
) {
4032 vnode_list_unlock();
4038 * we've reached the system imposed maximum number of vnodes
4039 * but there isn't a single one available
4040 * wait a bit and then retry... if we can't get a vnode
4041 * after our target number of retries, than log a complaint
4043 if (++retries
<= max_retries
) {
4044 vnode_list_unlock();
4045 delay_for_interval(1, 1000 * 1000);
4049 vnode_list_unlock();
4051 log(LOG_EMERG
, "%d desired, %d numvnodes, "
4052 "%d free, %d dead, %d rage\n",
4053 desiredvnodes
, numvnodes
, freevnodes
, deadvnodes
, ragevnodes
);
4056 * Running out of vnodes tends to make a system unusable. Start killing
4057 * processes that jetsam knows are killable.
4059 if (memorystatus_kill_on_vnode_limit() == FALSE
) {
4061 * If jetsam can't find any more processes to kill and there
4062 * still aren't any free vnodes, panic. Hopefully we'll get a
4063 * panic log to tell us why we ran out.
4065 panic("vnode table is full\n");
4069 * Now that we've killed someone, wait a bit and continue looking
4070 * (with fewer retries before trying another kill).
4072 delay_for_interval(3, 1000 * 1000);
4082 if ((vp
= process_vp(vp
, 1, &deferred
)) == NULLVP
) {
4085 struct timeval elapsed_tv
;
4087 if (initial_tv
.tv_sec
== 0)
4088 microuptime(&initial_tv
);
4092 dead_vnode_waited
++;
4093 dead_vnode_wanted
++;
4096 * note that we're only going to explicitly wait 10ms
4097 * for a dead vnode to become available, since even if one
4098 * isn't available, a reliable vnode might now be available
4099 * at the head of the VRAGE or free lists... if so, we
4100 * can satisfy the new_vnode request with less latency then waiting
4101 * for the full 100ms duration we're ultimately willing to tolerate
4103 assert_wait_timeout((caddr_t
)&dead_vnode_wanted
, (THREAD_INTERRUPTIBLE
), 10000, NSEC_PER_USEC
);
4105 vnode_list_unlock();
4107 thread_block(THREAD_CONTINUE_NULL
);
4109 microuptime(&elapsed_tv
);
4111 timevalsub(&elapsed_tv
, &initial_tv
);
4112 elapsed_msecs
= elapsed_tv
.tv_sec
* 1000 + elapsed_tv
.tv_usec
/ 1000;
4114 if (elapsed_msecs
>= 100) {
4116 * we've waited long enough... 100ms is
4117 * somewhat arbitrary for this case, but the
4118 * normal worst case latency used for UI
4119 * interaction is 100ms, so I've chosen to
4122 * setting need_reliable_vp to TRUE
4123 * forces us to find a reliable vnode
4124 * that we can process synchronously, or
4125 * to create a new one if the scan for
4126 * a reliable one hits the scan limit
4128 need_reliable_vp
= TRUE
;
4133 OSAddAtomicLong(1, &num_reusedvnodes
);
4138 * We should never see VL_LABELWAIT or VL_LABEL here.
4139 * as those operations hold a reference.
4141 assert ((vp
->v_lflag
& VL_LABELWAIT
) != VL_LABELWAIT
);
4142 assert ((vp
->v_lflag
& VL_LABEL
) != VL_LABEL
);
4143 if (vp
->v_lflag
& VL_LABELED
) {
4144 vnode_lock_convert(vp
);
4145 mac_vnode_label_recycle(vp
);
4146 } else if (mac_vnode_label_init_needed(vp
)) {
4147 vnode_lock_convert(vp
);
4148 mac_vnode_label_init(vp
);
4155 vp
->v_writecount
= 0;
4156 vp
->v_references
= 0;
4157 vp
->v_iterblkflags
= 0;
4158 vp
->v_flag
= VSTANDARD
;
4159 /* vbad vnodes can point to dead_mountp */
4161 vp
->v_defer_reclaimlist
= (vnode_t
)0;
4172 vnode_lock(vnode_t vp
)
4174 lck_mtx_lock(&vp
->v_lock
);
4178 vnode_lock_spin(vnode_t vp
)
4180 lck_mtx_lock_spin(&vp
->v_lock
);
4184 vnode_unlock(vnode_t vp
)
4186 lck_mtx_unlock(&vp
->v_lock
);
4192 vnode_get(struct vnode
*vp
)
4196 vnode_lock_spin(vp
);
4197 retval
= vnode_get_locked(vp
);
4204 vnode_get_locked(struct vnode
*vp
)
4207 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
4209 if ((vp
->v_iocount
== 0) && (vp
->v_lflag
& (VL_TERMINATE
| VL_DEAD
))) {
4220 * vnode_getwithvid() cuts in line in front of a vnode drain (that is,
4221 * while the vnode is draining, but at no point after that) to prevent
4222 * deadlocks when getting vnodes from filesystem hashes while holding
4223 * resources that may prevent other iocounts from being released.
4226 vnode_getwithvid(vnode_t vp
, uint32_t vid
)
4228 return(vget_internal(vp
, vid
, ( VNODE_NODEAD
| VNODE_WITHID
| VNODE_DRAINO
)));
4232 * vnode_getwithvid_drainok() is like vnode_getwithvid(), but *does* block behind a vnode
4233 * drain; it exists for use in the VFS name cache, where we really do want to block behind
4234 * vnode drain to prevent holding off an unmount.
4237 vnode_getwithvid_drainok(vnode_t vp
, uint32_t vid
)
4239 return(vget_internal(vp
, vid
, ( VNODE_NODEAD
| VNODE_WITHID
)));
4243 vnode_getwithref(vnode_t vp
)
4245 return(vget_internal(vp
, 0, 0));
4249 __private_extern__
int
4250 vnode_getalways(vnode_t vp
)
4252 return(vget_internal(vp
, 0, VNODE_ALWAYS
));
4256 vnode_put(vnode_t vp
)
4260 vnode_lock_spin(vp
);
4261 retval
= vnode_put_locked(vp
);
4268 vnode_put_locked(vnode_t vp
)
4270 vfs_context_t ctx
= vfs_context_current(); /* hoist outside loop */
4273 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
4276 if (vp
->v_iocount
< 1)
4277 panic("vnode_put(%p): iocount < 1", vp
);
4279 if ((vp
->v_usecount
> 0) || (vp
->v_iocount
> 1)) {
4280 vnode_dropiocount(vp
);
4283 if ((vp
->v_lflag
& (VL_DEAD
| VL_NEEDINACTIVE
)) == VL_NEEDINACTIVE
) {
4285 vp
->v_lflag
&= ~VL_NEEDINACTIVE
;
4288 VNOP_INACTIVE(vp
, ctx
);
4290 vnode_lock_spin(vp
);
4292 * because we had to drop the vnode lock before calling
4293 * VNOP_INACTIVE, the state of this vnode may have changed...
4294 * we may pick up both VL_MARTERM and either
4295 * an iocount or a usecount while in the VNOP_INACTIVE call
4296 * we don't want to call vnode_reclaim_internal on a vnode
4297 * that has active references on it... so loop back around
4298 * and reevaluate the state
4302 vp
->v_lflag
&= ~VL_NEEDINACTIVE
;
4304 if ((vp
->v_lflag
& (VL_MARKTERM
| VL_TERMINATE
| VL_DEAD
)) == VL_MARKTERM
) {
4305 vnode_lock_convert(vp
);
4306 vnode_reclaim_internal(vp
, 1, 1, 0);
4308 vnode_dropiocount(vp
);
4314 /* is vnode_t in use by others? */
4316 vnode_isinuse(vnode_t vp
, int refcnt
)
4318 return(vnode_isinuse_locked(vp
, refcnt
, 0));
4323 vnode_isinuse_locked(vnode_t vp
, int refcnt
, int locked
)
4328 vnode_lock_spin(vp
);
4329 if ((vp
->v_type
!= VREG
) && ((vp
->v_usecount
- vp
->v_kusecount
) > refcnt
)) {
4333 if (vp
->v_type
== VREG
) {
4334 retval
= ubc_isinuse_locked(vp
, refcnt
, 1);
4344 /* resume vnode_t */
4346 vnode_resume(vnode_t vp
)
4348 if ((vp
->v_lflag
& VL_SUSPENDED
) && vp
->v_owner
== current_thread()) {
4350 vnode_lock_spin(vp
);
4351 vp
->v_lflag
&= ~VL_SUSPENDED
;
4355 wakeup(&vp
->v_iocount
);
4361 * Please do not use on more than one vnode at a time as it may
4363 * xxx should we explicity prevent this from happening?
4367 vnode_suspend(vnode_t vp
)
4369 if (vp
->v_lflag
& VL_SUSPENDED
) {
4373 vnode_lock_spin(vp
);
4376 * xxx is this sufficient to check if a vnode_drain is
4380 if (vp
->v_owner
== NULL
) {
4381 vp
->v_lflag
|= VL_SUSPENDED
;
4382 vp
->v_owner
= current_thread();
4390 * Release any blocked locking requests on the vnode.
4391 * Used for forced-unmounts.
4393 * XXX What about network filesystems?
4396 vnode_abort_advlocks(vnode_t vp
)
4398 if (vp
->v_flag
& VLOCKLOCAL
)
4399 lf_abort_advlocks(vp
);
4404 vnode_drain(vnode_t vp
)
4407 if (vp
->v_lflag
& VL_DRAIN
) {
4408 panic("vnode_drain: recursive drain");
4411 vp
->v_lflag
|= VL_DRAIN
;
4412 vp
->v_owner
= current_thread();
4414 while (vp
->v_iocount
> 1)
4415 msleep(&vp
->v_iocount
, &vp
->v_lock
, PVFS
, "vnode_drain", NULL
);
4417 vp
->v_lflag
&= ~VL_DRAIN
;
4424 * if the number of recent references via vnode_getwithvid or vnode_getwithref
4425 * exceeds this threshold, than 'UN-AGE' the vnode by removing it from
4426 * the LRU list if it's currently on it... once the iocount and usecount both drop
4427 * to 0, it will get put back on the end of the list, effectively making it younger
4428 * this allows us to keep actively referenced vnodes in the list without having
4429 * to constantly remove and add to the list each time a vnode w/o a usecount is
4430 * referenced which costs us taking and dropping a global lock twice.
4431 * However, if the vnode is marked DIRTY, we want to pull it out much earlier
4433 #define UNAGE_THRESHHOLD 25
4434 #define UNAGE_DIRTYTHRESHHOLD 6
4437 vnode_getiocount(vnode_t vp
, unsigned int vid
, int vflags
)
4439 int nodead
= vflags
& VNODE_NODEAD
;
4440 int nosusp
= vflags
& VNODE_NOSUSPEND
;
4441 int always
= vflags
& VNODE_ALWAYS
;
4442 int beatdrain
= vflags
& VNODE_DRAINO
;
4443 int withvid
= vflags
& VNODE_WITHID
;
4447 * if it is a dead vnode with deadfs
4449 if (nodead
&& (vp
->v_lflag
& VL_DEAD
) && ((vp
->v_type
== VBAD
) || (vp
->v_data
== 0))) {
4453 * will return VL_DEAD ones
4455 if ((vp
->v_lflag
& (VL_SUSPENDED
| VL_DRAIN
| VL_TERMINATE
)) == 0 ) {
4459 * if suspended vnodes are to be failed
4461 if (nosusp
&& (vp
->v_lflag
& VL_SUSPENDED
)) {
4465 * if you are the owner of drain/suspend/termination , can acquire iocount
4466 * check for VL_TERMINATE; it does not set owner
4468 if ((vp
->v_lflag
& (VL_DRAIN
| VL_SUSPENDED
| VL_TERMINATE
)) &&
4469 (vp
->v_owner
== current_thread())) {
4477 * If this vnode is getting drained, there are some cases where
4480 if (vp
->v_lflag
& VL_DRAIN
) {
4482 * In some situations, we want to get an iocount
4483 * even if the vnode is draining to prevent deadlock,
4484 * e.g. if we're in the filesystem, potentially holding
4485 * resources that could prevent other iocounts from
4491 * Don't block if the vnode's mount point is unmounting as
4492 * we may be the thread the unmount is itself waiting on
4493 * Only callers who pass in vids (at this point, we've already
4494 * handled nosusp and nodead) are expecting error returns
4495 * from this function, so only we can only return errors for
4496 * those. ENODEV is intended to inform callers that the call
4497 * failed because an unmount is in progress.
4499 if (withvid
&& (vp
->v_mount
) && vfs_isunmount(vp
->v_mount
))
4503 vnode_lock_convert(vp
);
4505 if (vp
->v_lflag
& VL_TERMINATE
) {
4506 vp
->v_lflag
|= VL_TERMWANT
;
4508 msleep(&vp
->v_lflag
, &vp
->v_lock
, PVFS
, "vnode getiocount", NULL
);
4510 msleep(&vp
->v_iocount
, &vp
->v_lock
, PVFS
, "vnode_getiocount", NULL
);
4512 if (withvid
&& vid
!= vp
->v_id
) {
4515 if (++vp
->v_references
>= UNAGE_THRESHHOLD
||
4516 (vp
->v_flag
& VISDIRTY
&& vp
->v_references
>= UNAGE_DIRTYTHRESHHOLD
)) {
4517 vp
->v_references
= 0;
4518 vnode_list_remove(vp
);
4528 vnode_dropiocount (vnode_t vp
)
4530 if (vp
->v_iocount
< 1)
4531 panic("vnode_dropiocount(%p): v_iocount < 1", vp
);
4537 if ((vp
->v_lflag
& (VL_DRAIN
| VL_SUSPENDED
)) && (vp
->v_iocount
<= 1))
4538 wakeup(&vp
->v_iocount
);
4543 vnode_reclaim(struct vnode
* vp
)
4545 vnode_reclaim_internal(vp
, 0, 0, 0);
4550 vnode_reclaim_internal(struct vnode
* vp
, int locked
, int reuse
, int flags
)
4557 if (vp
->v_lflag
& VL_TERMINATE
) {
4558 panic("vnode reclaim in progress");
4560 vp
->v_lflag
|= VL_TERMINATE
;
4562 vn_clearunionwait(vp
, 1);
4566 isfifo
= (vp
->v_type
== VFIFO
);
4568 if (vp
->v_type
!= VBAD
)
4569 vgone(vp
, flags
); /* clean and reclaim the vnode */
4572 * give the vnode a new identity so that vnode_getwithvid will fail
4573 * on any stale cache accesses...
4574 * grab the list_lock so that if we're in "new_vnode"
4575 * behind the list_lock trying to steal this vnode, the v_id is stable...
4576 * once new_vnode drops the list_lock, it will block trying to take
4577 * the vnode lock until we release it... at that point it will evaluate
4578 * whether the v_vid has changed
4579 * also need to make sure that the vnode isn't on a list where "new_vnode"
4580 * can find it after the v_id has been bumped until we are completely done
4581 * with the vnode (i.e. putting it back on a list has to be the very last
4582 * thing we do to this vnode... many of the callers of vnode_reclaim_internal
4583 * are holding an io_count on the vnode... they need to drop the io_count
4584 * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
4585 * they are completely done with the vnode
4589 vnode_list_remove_locked(vp
);
4592 vnode_list_unlock();
4595 struct fifoinfo
* fip
;
4597 fip
= vp
->v_fifoinfo
;
4598 vp
->v_fifoinfo
= NULL
;
4604 panic("vnode_reclaim_internal: cleaned vnode isn't");
4605 if (vp
->v_numoutput
)
4606 panic("vnode_reclaim_internal: clean vnode has pending I/O's");
4607 if (UBCINFOEXISTS(vp
))
4608 panic("vnode_reclaim_internal: ubcinfo not cleaned");
4610 panic("vnode_reclaim_internal: vparent not removed");
4612 panic("vnode_reclaim_internal: vname not removed");
4614 vp
->v_socket
= NULL
;
4616 vp
->v_lflag
&= ~VL_TERMINATE
;
4619 KNOTE(&vp
->v_knotes
, NOTE_REVOKE
);
4621 /* Make sure that when we reuse the vnode, no knotes left over */
4622 klist_init(&vp
->v_knotes
);
4624 if (vp
->v_lflag
& VL_TERMWANT
) {
4625 vp
->v_lflag
&= ~VL_TERMWANT
;
4626 wakeup(&vp
->v_lflag
);
4630 * make sure we get on the
4631 * dead list if appropriate
4640 * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
4641 * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
4642 * is obsoleted by this.
4645 vnode_create(uint32_t flavor
, uint32_t size
, void *data
, vnode_t
*vpp
)
4653 struct componentname
*cnp
;
4654 struct vnode_fsparam
*param
= (struct vnode_fsparam
*)data
;
4656 struct vnode_trigger_param
*tinfo
= NULL
;
4661 /* Do quick sanity check on the parameters. */
4662 if (param
->vnfs_vtype
== VBAD
) {
4667 if ((flavor
== VNCREATE_TRIGGER
) && (size
== VNCREATE_TRIGGER_SIZE
)) {
4668 tinfo
= (struct vnode_trigger_param
*)data
;
4670 /* Validate trigger vnode input */
4671 if ((param
->vnfs_vtype
!= VDIR
) ||
4672 (tinfo
->vnt_resolve_func
== NULL
) ||
4673 (tinfo
->vnt_flags
& ~VNT_VALID_MASK
)) {
4676 /* Fall through a normal create (params will be the same) */
4677 flavor
= VNCREATE_FLAVOR
;
4681 if ((flavor
!= VNCREATE_FLAVOR
) || (size
!= VCREATESIZE
))
4684 if ( (error
= new_vnode(&vp
)) )
4687 dvp
= param
->vnfs_dvp
;
4688 cnp
= param
->vnfs_cnp
;
4690 vp
->v_op
= param
->vnfs_vops
;
4691 vp
->v_type
= param
->vnfs_vtype
;
4692 vp
->v_data
= param
->vnfs_fsnode
;
4694 if (param
->vnfs_markroot
)
4695 vp
->v_flag
|= VROOT
;
4696 if (param
->vnfs_marksystem
)
4697 vp
->v_flag
|= VSYSTEM
;
4698 if (vp
->v_type
== VREG
) {
4699 error
= ubc_info_init_withsize(vp
, param
->vnfs_filesize
);
4705 vp
->v_op
= dead_vnodeop_p
;
4709 vp
->v_lflag
|= VL_DEAD
;
4714 if (param
->vnfs_mp
->mnt_ioflags
& MNT_IOFLAGS_IOSCHED_SUPPORTED
)
4715 memory_object_mark_io_tracking(vp
->v_ubcinfo
->ui_control
);
4723 * For trigger vnodes, attach trigger info to vnode
4725 if ((vp
->v_type
== VDIR
) && (tinfo
!= NULL
)) {
4727 * Note: has a side effect of incrementing trigger count on the
4728 * mount if successful, which we would need to undo on a
4729 * subsequent failure.
4734 error
= vnode_resolver_create(param
->vnfs_mp
, vp
, tinfo
, FALSE
);
4736 printf("vnode_create: vnode_resolver_create() err %d\n", error
);
4738 vp
->v_op
= dead_vnodeop_p
;
4742 vp
->v_lflag
|= VL_DEAD
;
4751 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
) {
4753 vp
->v_tag
= VT_DEVFS
; /* callers will reset if needed (bdevvp) */
4755 if ( (nvp
= checkalias(vp
, param
->vnfs_rdev
)) ) {
4757 * if checkalias returns a vnode, it will be locked
4759 * first get rid of the unneeded vnode we acquired
4762 vp
->v_op
= spec_vnodeop_p
;
4764 vp
->v_lflag
= VL_DEAD
;
4770 * switch to aliased vnode and finish
4776 vp
->v_op
= param
->vnfs_vops
;
4777 vp
->v_type
= param
->vnfs_vtype
;
4778 vp
->v_data
= param
->vnfs_fsnode
;
4781 insmntque(vp
, param
->vnfs_mp
);
4786 if (VCHR
== vp
->v_type
) {
4787 u_int maj
= major(vp
->v_rdev
);
4789 if (maj
< (u_int
)nchrdev
&& cdevsw
[maj
].d_type
== D_TTY
)
4790 vp
->v_flag
|= VISTTY
;
4794 if (vp
->v_type
== VFIFO
) {
4795 struct fifoinfo
*fip
;
4797 MALLOC(fip
, struct fifoinfo
*,
4798 sizeof(*fip
), M_TEMP
, M_WAITOK
);
4799 bzero(fip
, sizeof(struct fifoinfo
));
4800 vp
->v_fifoinfo
= fip
;
4802 /* The file systems must pass the address of the location where
4803 * they store the vnode pointer. When we add the vnode into the mount
4804 * list and name cache they become discoverable. So the file system node
4805 * must have the connection to vnode setup by then
4809 /* Add fs named reference. */
4810 if (param
->vnfs_flags
& VNFS_ADDFSREF
) {
4811 vp
->v_lflag
|= VNAMED_FSHASH
;
4813 if (param
->vnfs_mp
) {
4814 if (param
->vnfs_mp
->mnt_kern_flag
& MNTK_LOCK_LOCAL
)
4815 vp
->v_flag
|= VLOCKLOCAL
;
4817 if ((vp
->v_freelist
.tqe_prev
!= (struct vnode
**)0xdeadb))
4818 panic("insmntque: vp on the free list\n");
4821 * enter in mount vnode list
4823 insmntque(vp
, param
->vnfs_mp
);
4826 if (dvp
&& vnode_ref(dvp
) == 0) {
4830 if (dvp
&& ((param
->vnfs_flags
& (VNFS_NOCACHE
| VNFS_CANTCACHE
)) == 0)) {
4832 * enter into name cache
4833 * we've got the info to enter it into the name cache now
4834 * cache_enter_create will pick up an extra reference on
4835 * the name entered into the string cache
4837 vp
->v_name
= cache_enter_create(dvp
, vp
, cnp
);
4839 vp
->v_name
= vfs_addname(cnp
->cn_nameptr
, cnp
->cn_namelen
, cnp
->cn_hash
, 0);
4841 if ((cnp
->cn_flags
& UNIONCREATED
) == UNIONCREATED
)
4842 vp
->v_flag
|= VISUNION
;
4844 if ((param
->vnfs_flags
& VNFS_CANTCACHE
) == 0) {
4846 * this vnode is being created as cacheable in the name cache
4847 * this allows us to re-enter it in the cache
4849 vp
->v_flag
|= VNCACHEABLE
;
4851 ut
= get_bsdthread_info(current_thread());
4853 if ((current_proc()->p_lflag
& P_LRAGE_VNODES
) ||
4854 (ut
->uu_flag
& UT_RAGE_VNODES
)) {
4856 * process has indicated that it wants any
4857 * vnodes created on its behalf to be rapidly
4858 * aged to reduce the impact on the cached set
4861 vp
->v_flag
|= VRAGE
;
4867 vnode_addfsref(vnode_t vp
)
4869 vnode_lock_spin(vp
);
4870 if (vp
->v_lflag
& VNAMED_FSHASH
)
4871 panic("add_fsref: vp already has named reference");
4872 if ((vp
->v_freelist
.tqe_prev
!= (struct vnode
**)0xdeadb))
4873 panic("addfsref: vp on the free list\n");
4874 vp
->v_lflag
|= VNAMED_FSHASH
;
4880 vnode_removefsref(vnode_t vp
)
4882 vnode_lock_spin(vp
);
4883 if ((vp
->v_lflag
& VNAMED_FSHASH
) == 0)
4884 panic("remove_fsref: no named reference");
4885 vp
->v_lflag
&= ~VNAMED_FSHASH
;
4893 vfs_iterate(int flags
, int (*callout
)(mount_t
, void *), void *arg
)
4898 int count
, actualcount
, i
;
4900 int indx_start
, indx_stop
, indx_incr
;
4901 int cb_dropref
= (flags
& VFS_ITERATE_CB_DROPREF
);
4903 count
= mount_getvfscnt();
4906 fsid_list
= (fsid_t
*)kalloc(count
* sizeof(fsid_t
));
4907 allocmem
= (void *)fsid_list
;
4909 actualcount
= mount_fillfsids(fsid_list
, count
);
4912 * Establish the iteration direction
4913 * VFS_ITERATE_TAIL_FIRST overrides default head first order (oldest first)
4915 if (flags
& VFS_ITERATE_TAIL_FIRST
) {
4916 indx_start
= actualcount
- 1;
4919 } else /* Head first by default */ {
4921 indx_stop
= actualcount
;
4925 for (i
=indx_start
; i
!= indx_stop
; i
+= indx_incr
) {
4927 /* obtain the mount point with iteration reference */
4928 mp
= mount_list_lookupby_fsid(&fsid_list
[i
], 0, 1);
4930 if(mp
== (struct mount
*)0)
4933 if (mp
->mnt_lflag
& (MNT_LDEAD
| MNT_LUNMOUNT
)) {
4941 /* iterate over all the vnodes */
4942 ret
= callout(mp
, arg
);
4945 * Drop the iterref here if the callback didn't do it.
4946 * Note: If cb_dropref is set the mp may no longer exist.
4953 case VFS_RETURNED_DONE
:
4954 if (ret
== VFS_RETURNED_DONE
) {
4960 case VFS_CLAIMED_DONE
:
4971 kfree(allocmem
, (count
* sizeof(fsid_t
)));
4976 * Update the vfsstatfs structure in the mountpoint.
4977 * MAC: Parameter eventtype added, indicating whether the event that
4978 * triggered this update came from user space, via a system call
4979 * (VFS_USER_EVENT) or an internal kernel call (VFS_KERNEL_EVENT).
4982 vfs_update_vfsstat(mount_t mp
, vfs_context_t ctx
, __unused
int eventtype
)
4988 * Request the attributes we want to propagate into
4989 * the per-mount vfsstat structure.
4992 VFSATTR_WANTED(&va
, f_iosize
);
4993 VFSATTR_WANTED(&va
, f_blocks
);
4994 VFSATTR_WANTED(&va
, f_bfree
);
4995 VFSATTR_WANTED(&va
, f_bavail
);
4996 VFSATTR_WANTED(&va
, f_bused
);
4997 VFSATTR_WANTED(&va
, f_files
);
4998 VFSATTR_WANTED(&va
, f_ffree
);
4999 VFSATTR_WANTED(&va
, f_bsize
);
5000 VFSATTR_WANTED(&va
, f_fssubtype
);
5002 if (eventtype
== VFS_USER_EVENT
) {
5003 error
= mac_mount_check_getattr(ctx
, mp
, &va
);
5009 if ((error
= vfs_getattr(mp
, &va
, ctx
)) != 0) {
5010 KAUTH_DEBUG("STAT - filesystem returned error %d", error
);
5015 * Unpack into the per-mount structure.
5017 * We only overwrite these fields, which are likely to change:
5025 * And these which are not, but which the FS has no other way
5026 * of providing to us:
5032 if (VFSATTR_IS_SUPPORTED(&va
, f_bsize
)) {
5033 /* 4822056 - protect against malformed server mount */
5034 mp
->mnt_vfsstat
.f_bsize
= (va
.f_bsize
> 0 ? va
.f_bsize
: 512);
5036 mp
->mnt_vfsstat
.f_bsize
= mp
->mnt_devblocksize
; /* default from the device block size */
5038 if (VFSATTR_IS_SUPPORTED(&va
, f_iosize
)) {
5039 mp
->mnt_vfsstat
.f_iosize
= va
.f_iosize
;
5041 mp
->mnt_vfsstat
.f_iosize
= 1024 * 1024; /* 1MB sensible I/O size */
5043 if (VFSATTR_IS_SUPPORTED(&va
, f_blocks
))
5044 mp
->mnt_vfsstat
.f_blocks
= va
.f_blocks
;
5045 if (VFSATTR_IS_SUPPORTED(&va
, f_bfree
))
5046 mp
->mnt_vfsstat
.f_bfree
= va
.f_bfree
;
5047 if (VFSATTR_IS_SUPPORTED(&va
, f_bavail
))
5048 mp
->mnt_vfsstat
.f_bavail
= va
.f_bavail
;
5049 if (VFSATTR_IS_SUPPORTED(&va
, f_bused
))
5050 mp
->mnt_vfsstat
.f_bused
= va
.f_bused
;
5051 if (VFSATTR_IS_SUPPORTED(&va
, f_files
))
5052 mp
->mnt_vfsstat
.f_files
= va
.f_files
;
5053 if (VFSATTR_IS_SUPPORTED(&va
, f_ffree
))
5054 mp
->mnt_vfsstat
.f_ffree
= va
.f_ffree
;
5056 /* this is unlikely to change, but has to be queried for */
5057 if (VFSATTR_IS_SUPPORTED(&va
, f_fssubtype
))
5058 mp
->mnt_vfsstat
.f_fssubtype
= va
.f_fssubtype
;
5064 mount_list_add(mount_t mp
)
5069 if (system_inshutdown
!= 0) {
5072 TAILQ_INSERT_TAIL(&mountlist
, mp
, mnt_list
);
5076 mount_list_unlock();
5082 mount_list_remove(mount_t mp
)
5085 TAILQ_REMOVE(&mountlist
, mp
, mnt_list
);
5087 mp
->mnt_list
.tqe_next
= NULL
;
5088 mp
->mnt_list
.tqe_prev
= NULL
;
5089 mount_list_unlock();
5093 mount_lookupby_volfsid(int volfs_id
, int withref
)
5095 mount_t cur_mount
= (mount_t
)0;
5099 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
) {
5100 if (!(mp
->mnt_kern_flag
& MNTK_UNMOUNT
) &&
5101 (mp
->mnt_kern_flag
& MNTK_PATH_FROM_ID
) &&
5102 (mp
->mnt_vfsstat
.f_fsid
.val
[0] == volfs_id
)) {
5105 if (mount_iterref(cur_mount
, 1)) {
5106 cur_mount
= (mount_t
)0;
5107 mount_list_unlock();
5114 mount_list_unlock();
5115 if (withref
&& (cur_mount
!= (mount_t
)0)) {
5117 if (vfs_busy(mp
, LK_NOWAIT
) != 0) {
5118 cur_mount
= (mount_t
)0;
5127 mount_list_lookupby_fsid(fsid_t
*fsid
, int locked
, int withref
)
5129 mount_t retmp
= (mount_t
)0;
5134 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
)
5135 if (mp
->mnt_vfsstat
.f_fsid
.val
[0] == fsid
->val
[0] &&
5136 mp
->mnt_vfsstat
.f_fsid
.val
[1] == fsid
->val
[1]) {
5139 if (mount_iterref(retmp
, 1))
5146 mount_list_unlock();
5151 vnode_lookup(const char *path
, int flags
, vnode_t
*vpp
, vfs_context_t ctx
)
5153 struct nameidata nd
;
5155 u_int32_t ndflags
= 0;
5161 if (flags
& VNODE_LOOKUP_NOFOLLOW
)
5166 if (flags
& VNODE_LOOKUP_NOCROSSMOUNT
)
5167 ndflags
|= NOCROSSMOUNT
;
5169 /* XXX AUDITVNPATH1 needed ? */
5170 NDINIT(&nd
, LOOKUP
, OP_LOOKUP
, ndflags
, UIO_SYSSPACE
,
5171 CAST_USER_ADDR_T(path
), ctx
);
5173 if ((error
= namei(&nd
)))
5182 vnode_open(const char *path
, int fmode
, int cmode
, int flags
, vnode_t
*vpp
, vfs_context_t ctx
)
5184 struct nameidata nd
;
5186 u_int32_t ndflags
= 0;
5189 if (ctx
== NULL
) { /* XXX technically an error */
5190 ctx
= vfs_context_current();
5193 if (fmode
& O_NOFOLLOW
)
5194 lflags
|= VNODE_LOOKUP_NOFOLLOW
;
5196 if (lflags
& VNODE_LOOKUP_NOFOLLOW
)
5201 if (lflags
& VNODE_LOOKUP_NOCROSSMOUNT
)
5202 ndflags
|= NOCROSSMOUNT
;
5204 /* XXX AUDITVNPATH1 needed ? */
5205 NDINIT(&nd
, LOOKUP
, OP_OPEN
, ndflags
, UIO_SYSSPACE
,
5206 CAST_USER_ADDR_T(path
), ctx
);
5208 if ((error
= vn_open(&nd
, fmode
, cmode
)))
5217 vnode_close(vnode_t vp
, int flags
, vfs_context_t ctx
)
5222 ctx
= vfs_context_current();
5225 error
= vn_close(vp
, flags
, ctx
);
5231 vnode_mtime(vnode_t vp
, struct timespec
*mtime
, vfs_context_t ctx
)
5233 struct vnode_attr va
;
5237 VATTR_WANTED(&va
, va_modify_time
);
5238 error
= vnode_getattr(vp
, &va
, ctx
);
5240 *mtime
= va
.va_modify_time
;
5245 vnode_flags(vnode_t vp
, uint32_t *flags
, vfs_context_t ctx
)
5247 struct vnode_attr va
;
5251 VATTR_WANTED(&va
, va_flags
);
5252 error
= vnode_getattr(vp
, &va
, ctx
);
5254 *flags
= va
.va_flags
;
5259 * Returns: 0 Success
5263 vnode_size(vnode_t vp
, off_t
*sizep
, vfs_context_t ctx
)
5265 struct vnode_attr va
;
5269 VATTR_WANTED(&va
, va_data_size
);
5270 error
= vnode_getattr(vp
, &va
, ctx
);
5272 *sizep
= va
.va_data_size
;
5277 vnode_setsize(vnode_t vp
, off_t size
, int ioflag
, vfs_context_t ctx
)
5279 struct vnode_attr va
;
5282 VATTR_SET(&va
, va_data_size
, size
);
5283 va
.va_vaflags
= ioflag
& 0xffff;
5284 return(vnode_setattr(vp
, &va
, ctx
));
5288 vnode_setdirty(vnode_t vp
)
5290 vnode_lock_spin(vp
);
5291 vp
->v_flag
|= VISDIRTY
;
5297 vnode_cleardirty(vnode_t vp
)
5299 vnode_lock_spin(vp
);
5300 vp
->v_flag
&= ~VISDIRTY
;
5306 vnode_isdirty(vnode_t vp
)
5310 vnode_lock_spin(vp
);
5311 dirty
= (vp
->v_flag
& VISDIRTY
) ? 1 : 0;
5318 vn_create_reg(vnode_t dvp
, vnode_t
*vpp
, struct nameidata
*ndp
, struct vnode_attr
*vap
, uint32_t flags
, int fmode
, uint32_t *statusp
, vfs_context_t ctx
)
5320 /* Only use compound VNOP for compound operation */
5321 if (vnode_compound_open_available(dvp
) && ((flags
& VN_CREATE_DOOPEN
) != 0)) {
5323 return VNOP_COMPOUND_OPEN(dvp
, vpp
, ndp
, O_CREAT
, fmode
, statusp
, vap
, ctx
);
5325 return VNOP_CREATE(dvp
, vpp
, &ndp
->ni_cnd
, vap
, ctx
);
5330 * Create a filesystem object of arbitrary type with arbitrary attributes in
5331 * the spevied directory with the specified name.
5333 * Parameters: dvp Pointer to the vnode of the directory
5334 * in which to create the object.
5335 * vpp Pointer to the area into which to
5336 * return the vnode of the created object.
5337 * cnp Component name pointer from the namei
5338 * data structure, containing the name to
5339 * use for the create object.
5340 * vap Pointer to the vnode_attr structure
5341 * describing the object to be created,
5342 * including the type of object.
5343 * flags VN_* flags controlling ACL inheritance
5344 * and whether or not authorization is to
5345 * be required for the operation.
5347 * Returns: 0 Success
5350 * Implicit: *vpp Contains the vnode of the object that
5351 * was created, if successful.
5352 * *cnp May be modified by the underlying VFS.
5353 * *vap May be modified by the underlying VFS.
5354 * modified by either ACL inheritance or
5357 * be modified, even if the operation is
5360 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
5362 * Modification of '*cnp' and '*vap' by the underlying VFS is
5363 * strongly discouraged.
5365 * XXX: This function is a 'vn_*' function; it belongs in vfs_vnops.c
5367 * XXX: We should enummerate the possible errno values here, and where
5368 * in the code they originated.
5371 vn_create(vnode_t dvp
, vnode_t
*vpp
, struct nameidata
*ndp
, struct vnode_attr
*vap
, uint32_t flags
, int fmode
, uint32_t *statusp
, vfs_context_t ctx
)
5373 errno_t error
, old_error
;
5374 vnode_t vp
= (vnode_t
)0;
5376 struct componentname
*cnp
;
5378 uint32_t dfflags
; // Directory file flags
5382 batched
= namei_compound_available(dvp
, ndp
) ? TRUE
: FALSE
;
5384 KAUTH_DEBUG("%p CREATE - '%s'", dvp
, cnp
->cn_nameptr
);
5386 if (flags
& VN_CREATE_NOINHERIT
)
5387 vap
->va_vaflags
|= VA_NOINHERIT
;
5388 if (flags
& VN_CREATE_NOAUTH
)
5389 vap
->va_vaflags
|= VA_NOAUTH
;
5391 * Handle ACL inheritance, initialize vap.
5393 error
= vn_attribute_prepare(dvp
, vap
, &defaulted
, ctx
);
5398 if (vap
->va_type
!= VREG
&& (fmode
!= 0 || (flags
& VN_CREATE_DOOPEN
) || statusp
)) {
5399 panic("Open parameters, but not a regular file.");
5401 if ((fmode
!= 0) && ((flags
& VN_CREATE_DOOPEN
) == 0)) {
5402 panic("Mode for open, but not trying to open...");
5406 * Handle inheritance of restricted flag
5408 error
= vnode_flags(dvp
, &dfflags
, ctx
);
5411 if (dfflags
& SF_RESTRICTED
)
5412 VATTR_SET(vap
, va_flags
, SF_RESTRICTED
);
5415 * Create the requested node.
5417 switch(vap
->va_type
) {
5419 error
= vn_create_reg(dvp
, vpp
, ndp
, vap
, flags
, fmode
, statusp
, ctx
);
5422 error
= vn_mkdir(dvp
, vpp
, ndp
, vap
, ctx
);
5428 error
= VNOP_MKNOD(dvp
, vpp
, cnp
, vap
, ctx
);
5431 panic("vnode_create: unknown vtype %d", vap
->va_type
);
5434 KAUTH_DEBUG("%p CREATE - error %d returned by filesystem", dvp
, error
);
5442 if (!(flags
& VN_CREATE_NOLABEL
)) {
5443 error
= vnode_label(vnode_mount(vp
), dvp
, vp
, cnp
, VNODE_LABEL_CREATE
, ctx
);
5450 * If some of the requested attributes weren't handled by the VNOP,
5451 * use our fallback code.
5453 if (!VATTR_ALL_SUPPORTED(vap
) && *vpp
) {
5454 KAUTH_DEBUG(" CREATE - doing fallback with ACL %p", vap
->va_acl
);
5455 error
= vnode_setattr_fallback(*vpp
, vap
, ctx
);
5460 if ((error
!= 0) && (vp
!= (vnode_t
)0)) {
5462 /* If we've done a compound open, close */
5463 if (batched
&& (old_error
== 0) && (vap
->va_type
== VREG
)) {
5464 VNOP_CLOSE(vp
, fmode
, ctx
);
5467 /* Need to provide notifications if a create succeeded */
5475 vn_attribute_cleanup(vap
, defaulted
);
5480 static kauth_scope_t vnode_scope
;
5481 static int vnode_authorize_callback(kauth_cred_t credential
, void *idata
, kauth_action_t action
,
5482 uintptr_t arg0
, uintptr_t arg1
, uintptr_t arg2
, uintptr_t arg3
);
5483 static int vnode_authorize_callback_int(__unused kauth_cred_t credential
, __unused
void *idata
, kauth_action_t action
,
5484 uintptr_t arg0
, uintptr_t arg1
, uintptr_t arg2
, uintptr_t arg3
);
5486 typedef struct _vnode_authorize_context
{
5488 struct vnode_attr
*vap
;
5490 struct vnode_attr
*dvap
;
5494 #define _VAC_IS_OWNER (1<<0)
5495 #define _VAC_IN_GROUP (1<<1)
5496 #define _VAC_IS_DIR_OWNER (1<<2)
5497 #define _VAC_IN_DIR_GROUP (1<<3)
5501 vnode_authorize_init(void)
5503 vnode_scope
= kauth_register_scope(KAUTH_SCOPE_VNODE
, vnode_authorize_callback
, NULL
);
5506 #define VATTR_PREPARE_DEFAULTED_UID 0x1
5507 #define VATTR_PREPARE_DEFAULTED_GID 0x2
5508 #define VATTR_PREPARE_DEFAULTED_MODE 0x4
5511 vn_attribute_prepare(vnode_t dvp
, struct vnode_attr
*vap
, uint32_t *defaulted_fieldsp
, vfs_context_t ctx
)
5513 kauth_acl_t nacl
= NULL
, oacl
= NULL
;
5517 * Handle ACL inheritance.
5519 if (!(vap
->va_vaflags
& VA_NOINHERIT
) && vfs_extendedsecurity(dvp
->v_mount
)) {
5520 /* save the original filesec */
5521 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
5526 if ((error
= kauth_acl_inherit(dvp
,
5529 vap
->va_type
== VDIR
,
5531 KAUTH_DEBUG("%p CREATE - error %d processing inheritance", dvp
, error
);
5536 * If the generated ACL is NULL, then we can save ourselves some effort
5537 * by clearing the active bit.
5540 VATTR_CLEAR_ACTIVE(vap
, va_acl
);
5542 vap
->va_base_acl
= oacl
;
5543 VATTR_SET(vap
, va_acl
, nacl
);
5547 error
= vnode_authattr_new_internal(dvp
, vap
, (vap
->va_vaflags
& VA_NOAUTH
), defaulted_fieldsp
, ctx
);
5549 vn_attribute_cleanup(vap
, *defaulted_fieldsp
);
5556 vn_attribute_cleanup(struct vnode_attr
*vap
, uint32_t defaulted_fields
)
5559 * If the caller supplied a filesec in vap, it has been replaced
5560 * now by the post-inheritance copy. We need to put the original back
5561 * and free the inherited product.
5563 kauth_acl_t nacl
, oacl
;
5565 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
5567 oacl
= vap
->va_base_acl
;
5570 VATTR_SET(vap
, va_acl
, oacl
);
5571 vap
->va_base_acl
= NULL
;
5573 VATTR_CLEAR_ACTIVE(vap
, va_acl
);
5577 kauth_acl_free(nacl
);
5581 if ((defaulted_fields
& VATTR_PREPARE_DEFAULTED_MODE
) != 0) {
5582 VATTR_CLEAR_ACTIVE(vap
, va_mode
);
5584 if ((defaulted_fields
& VATTR_PREPARE_DEFAULTED_GID
) != 0) {
5585 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
5587 if ((defaulted_fields
& VATTR_PREPARE_DEFAULTED_UID
) != 0) {
5588 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
5595 vn_authorize_unlink(vnode_t dvp
, vnode_t vp
, struct componentname
*cnp
, vfs_context_t ctx
, __unused
void *reserved
)
5603 * Normally, unlinking of directories is not supported.
5604 * However, some file systems may have limited support.
5606 if ((vp
->v_type
== VDIR
) &&
5607 !(vp
->v_mount
->mnt_vtable
->vfc_vfsflags
& VFC_VFSDIRLINKS
)) {
5608 return (EPERM
); /* POSIX */
5611 /* authorize the delete operation */
5614 error
= mac_vnode_check_unlink(ctx
, dvp
, vp
, cnp
);
5617 error
= vnode_authorize(vp
, dvp
, KAUTH_VNODE_DELETE
, ctx
);
5623 vn_authorize_open_existing(vnode_t vp
, struct componentname
*cnp
, int fmode
, vfs_context_t ctx
, void *reserved
)
5625 /* Open of existing case */
5626 kauth_action_t action
;
5628 if (cnp
->cn_ndp
== NULL
) {
5631 if (reserved
!= NULL
) {
5632 panic("reserved not NULL.");
5636 /* XXX may do duplicate work here, but ignore that for now (idempotent) */
5637 if (vfs_flags(vnode_mount(vp
)) & MNT_MULTILABEL
) {
5638 error
= vnode_label(vnode_mount(vp
), NULL
, vp
, NULL
, 0, ctx
);
5644 if ( (fmode
& O_DIRECTORY
) && vp
->v_type
!= VDIR
) {
5648 if (vp
->v_type
== VSOCK
&& vp
->v_tag
!= VT_FDESC
) {
5649 return (EOPNOTSUPP
); /* Operation not supported on socket */
5652 if (vp
->v_type
== VLNK
&& (fmode
& O_NOFOLLOW
) != 0) {
5653 return (ELOOP
); /* O_NOFOLLOW was specified and the target is a symbolic link */
5656 /* disallow write operations on directories */
5657 if (vnode_isdir(vp
) && (fmode
& (FWRITE
| O_TRUNC
))) {
5661 if ((cnp
->cn_ndp
->ni_flag
& NAMEI_TRAILINGSLASH
)) {
5662 if (vp
->v_type
!= VDIR
) {
5668 /* If a file being opened is a shadow file containing
5669 * namedstream data, ignore the macf checks because it
5670 * is a kernel internal file and access should always
5673 if (!(vnode_isshadow(vp
) && vnode_isnamedstream(vp
))) {
5674 error
= mac_vnode_check_open(ctx
, vp
, fmode
);
5681 /* compute action to be authorized */
5683 if (fmode
& FREAD
) {
5684 action
|= KAUTH_VNODE_READ_DATA
;
5686 if (fmode
& (FWRITE
| O_TRUNC
)) {
5688 * If we are writing, appending, and not truncating,
5689 * indicate that we are appending so that if the
5690 * UF_APPEND or SF_APPEND bits are set, we do not deny
5693 if ((fmode
& O_APPEND
) && !(fmode
& O_TRUNC
)) {
5694 action
|= KAUTH_VNODE_APPEND_DATA
;
5696 action
|= KAUTH_VNODE_WRITE_DATA
;
5699 error
= vnode_authorize(vp
, NULL
, action
, ctx
);
5701 if (error
== EACCES
) {
5703 * Shadow files may exist on-disk with a different UID/GID
5704 * than that of the current context. Verify that this file
5705 * is really a shadow file. If it was created successfully
5706 * then it should be authorized.
5708 if (vnode_isshadow(vp
) && vnode_isnamedstream (vp
)) {
5709 error
= vnode_verifynamedstream(vp
);
5718 vn_authorize_create(vnode_t dvp
, struct componentname
*cnp
, struct vnode_attr
*vap
, vfs_context_t ctx
, void *reserved
)
5726 if (cnp
->cn_ndp
== NULL
) {
5727 panic("NULL cn_ndp");
5729 if (reserved
!= NULL
) {
5730 panic("reserved not NULL.");
5733 /* Only validate path for creation if we didn't do a complete lookup */
5734 if (cnp
->cn_ndp
->ni_flag
& NAMEI_UNFINISHED
) {
5735 error
= lookup_validate_creation_path(cnp
->cn_ndp
);
5741 error
= mac_vnode_check_create(ctx
, dvp
, cnp
, vap
);
5744 #endif /* CONFIG_MACF */
5746 return (vnode_authorize(dvp
, NULL
, KAUTH_VNODE_ADD_FILE
, ctx
));
5750 vn_authorize_rename(struct vnode
*fdvp
, struct vnode
*fvp
, struct componentname
*fcnp
,
5751 struct vnode
*tdvp
, struct vnode
*tvp
, struct componentname
*tcnp
,
5752 vfs_context_t ctx
, void *reserved
)
5757 if (reserved
!= NULL
) {
5758 panic("Passed something other than NULL as reserved field!");
5762 * Avoid renaming "." and "..".
5764 * XXX No need to check for this in the FS. We should always have the leaves
5765 * in VFS in this case.
5767 if (fvp
->v_type
== VDIR
&&
5769 (fcnp
->cn_namelen
== 1 && fcnp
->cn_nameptr
[0] == '.') ||
5770 ((fcnp
->cn_flags
| tcnp
->cn_flags
) & ISDOTDOT
)) ) {
5775 if (tvp
== NULLVP
&& vnode_compound_rename_available(tdvp
)) {
5776 error
= lookup_validate_creation_path(tcnp
->cn_ndp
);
5781 /***** <MACF> *****/
5783 error
= mac_vnode_check_rename(ctx
, fdvp
, fvp
, fcnp
, tdvp
, tvp
, tcnp
);
5787 /***** </MACF> *****/
5789 /***** <MiscChecks> *****/
5791 if (fvp
->v_type
== VDIR
&& tvp
->v_type
!= VDIR
) {
5794 } else if (fvp
->v_type
!= VDIR
&& tvp
->v_type
== VDIR
) {
5806 * The following edge case is caught here:
5807 * (to cannot be a descendent of from)
5820 if (tdvp
->v_parent
== fvp
) {
5824 /***** </MiscChecks> *****/
5826 /***** <Kauth> *****/
5829 if ((tvp
!= NULL
) && vnode_isdir(tvp
)) {
5832 } else if (tdvp
!= fdvp
) {
5838 * must have delete rights to remove the old name even in
5839 * the simple case of fdvp == tdvp.
5841 * If fvp is a directory, and we are changing it's parent,
5842 * then we also need rights to rewrite its ".." entry as well.
5844 if (vnode_isdir(fvp
)) {
5845 if ((error
= vnode_authorize(fvp
, fdvp
, KAUTH_VNODE_DELETE
| KAUTH_VNODE_ADD_SUBDIRECTORY
, ctx
)) != 0)
5848 if ((error
= vnode_authorize(fvp
, fdvp
, KAUTH_VNODE_DELETE
, ctx
)) != 0)
5852 /* moving into tdvp or tvp, must have rights to add */
5853 if ((error
= vnode_authorize(((tvp
!= NULL
) && vnode_isdir(tvp
)) ? tvp
: tdvp
,
5855 vnode_isdir(fvp
) ? KAUTH_VNODE_ADD_SUBDIRECTORY
: KAUTH_VNODE_ADD_FILE
,
5860 /* node staying in same directory, must be allowed to add new name */
5861 if ((error
= vnode_authorize(fdvp
, NULL
,
5862 vnode_isdir(fvp
) ? KAUTH_VNODE_ADD_SUBDIRECTORY
: KAUTH_VNODE_ADD_FILE
, ctx
)) != 0)
5865 /* overwriting tvp */
5866 if ((tvp
!= NULL
) && !vnode_isdir(tvp
) &&
5867 ((error
= vnode_authorize(tvp
, tdvp
, KAUTH_VNODE_DELETE
, ctx
)) != 0)) {
5871 /***** </Kauth> *****/
5873 /* XXX more checks? */
5879 vn_authorize_mkdir(vnode_t dvp
, struct componentname
*cnp
, struct vnode_attr
*vap
, vfs_context_t ctx
, void *reserved
)
5886 if (reserved
!= NULL
) {
5887 panic("reserved not NULL in vn_authorize_mkdir()");
5890 /* XXX A hack for now, to make shadow files work */
5891 if (cnp
->cn_ndp
== NULL
) {
5895 if (vnode_compound_mkdir_available(dvp
)) {
5896 error
= lookup_validate_creation_path(cnp
->cn_ndp
);
5902 error
= mac_vnode_check_create(ctx
,
5908 /* authorize addition of a directory to the parent */
5909 if ((error
= vnode_authorize(dvp
, NULL
, KAUTH_VNODE_ADD_SUBDIRECTORY
, ctx
)) != 0)
5917 vn_authorize_rmdir(vnode_t dvp
, vnode_t vp
, struct componentname
*cnp
, vfs_context_t ctx
, void *reserved
)
5924 if (reserved
!= NULL
) {
5925 panic("Non-NULL reserved argument to vn_authorize_rmdir()");
5928 if (vp
->v_type
!= VDIR
) {
5930 * rmdir only deals with directories
5937 * No rmdir "." please.
5943 error
= mac_vnode_check_unlink(ctx
, dvp
,
5949 return vnode_authorize(vp
, dvp
, KAUTH_VNODE_DELETE
, ctx
);
5953 * Authorize an operation on a vnode.
5955 * This is KPI, but here because it needs vnode_scope.
5957 * Returns: 0 Success
5958 * kauth_authorize_action:EPERM ...
5959 * xlate => EACCES Permission denied
5960 * kauth_authorize_action:0 Success
5961 * kauth_authorize_action: Depends on callback return; this is
5962 * usually only vnode_authorize_callback(),
5963 * but may include other listerners, if any
5971 vnode_authorize(vnode_t vp
, vnode_t dvp
, kauth_action_t action
, vfs_context_t ctx
)
5976 * We can't authorize against a dead vnode; allow all operations through so that
5977 * the correct error can be returned.
5979 if (vp
->v_type
== VBAD
)
5983 result
= kauth_authorize_action(vnode_scope
, vfs_context_ucred(ctx
), action
,
5984 (uintptr_t)ctx
, (uintptr_t)vp
, (uintptr_t)dvp
, (uintptr_t)&error
);
5985 if (result
== EPERM
) /* traditional behaviour */
5987 /* did the lower layers give a better error return? */
5988 if ((result
!= 0) && (error
!= 0))
5994 * Test for vnode immutability.
5996 * The 'append' flag is set when the authorization request is constrained
5997 * to operations which only request the right to append to a file.
5999 * The 'ignore' flag is set when an operation modifying the immutability flags
6000 * is being authorized. We check the system securelevel to determine which
6001 * immutability flags we can ignore.
6004 vnode_immutable(struct vnode_attr
*vap
, int append
, int ignore
)
6008 /* start with all bits precluding the operation */
6009 mask
= IMMUTABLE
| APPEND
;
6011 /* if appending only, remove the append-only bits */
6015 /* ignore only set when authorizing flags changes */
6017 if (securelevel
<= 0) {
6018 /* in insecure state, flags do not inhibit changes */
6021 /* in secure state, user flags don't inhibit */
6022 mask
&= ~(UF_IMMUTABLE
| UF_APPEND
);
6025 KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap
->va_flags
, mask
, append
, ignore
);
6026 if ((vap
->va_flags
& mask
) != 0)
6032 vauth_node_owner(struct vnode_attr
*vap
, kauth_cred_t cred
)
6036 /* default assumption is not-owner */
6040 * If the filesystem has given us a UID, we treat this as authoritative.
6042 if (vap
&& VATTR_IS_SUPPORTED(vap
, va_uid
)) {
6043 result
= (vap
->va_uid
== kauth_cred_getuid(cred
)) ? 1 : 0;
6045 /* we could test the owner UUID here if we had a policy for it */
6053 * Description: Ask if a cred is a member of the group owning the vnode object
6055 * Parameters: vap vnode attribute
6056 * vap->va_gid group owner of vnode object
6057 * cred credential to check
6058 * ismember pointer to where to put the answer
6059 * idontknow Return this if we can't get an answer
6061 * Returns: 0 Success
6062 * idontknow Can't get information
6063 * kauth_cred_ismember_gid:? Error from kauth subsystem
6064 * kauth_cred_ismember_gid:? Error from kauth subsystem
6067 vauth_node_group(struct vnode_attr
*vap
, kauth_cred_t cred
, int *ismember
, int idontknow
)
6076 * The caller is expected to have asked the filesystem for a group
6077 * at some point prior to calling this function. The answer may
6078 * have been that there is no group ownership supported for the
6079 * vnode object, in which case we return
6081 if (vap
&& VATTR_IS_SUPPORTED(vap
, va_gid
)) {
6082 error
= kauth_cred_ismember_gid(cred
, vap
->va_gid
, &result
);
6084 * Credentials which are opted into external group membership
6085 * resolution which are not known to the external resolver
6086 * will result in an ENOENT error. We translate this into
6087 * the appropriate 'idontknow' response for our caller.
6089 * XXX We do not make a distinction here between an ENOENT
6090 * XXX arising from a response from the external resolver,
6091 * XXX and an ENOENT which is internally generated. This is
6092 * XXX a deficiency of the published kauth_cred_ismember_gid()
6093 * XXX KPI which can not be overcome without new KPI. For
6094 * XXX all currently known cases, however, this wil result
6095 * XXX in correct behaviour.
6097 if (error
== ENOENT
)
6101 * XXX We could test the group UUID here if we had a policy for it,
6102 * XXX but this is problematic from the perspective of synchronizing
6103 * XXX group UUID and POSIX GID ownership of a file and keeping the
6104 * XXX values coherent over time. The problem is that the local
6105 * XXX system will vend transient group UUIDs for unknown POSIX GID
6106 * XXX values, and these are not persistent, whereas storage of values
6107 * XXX is persistent. One potential solution to this is a local
6108 * XXX (persistent) replica of remote directory entries and vended
6109 * XXX local ids in a local directory server (think in terms of a
6110 * XXX caching DNS server).
6119 vauth_file_owner(vauth_ctx vcp
)
6123 if (vcp
->flags_valid
& _VAC_IS_OWNER
) {
6124 result
= (vcp
->flags
& _VAC_IS_OWNER
) ? 1 : 0;
6126 result
= vauth_node_owner(vcp
->vap
, vcp
->ctx
->vc_ucred
);
6128 /* cache our result */
6129 vcp
->flags_valid
|= _VAC_IS_OWNER
;
6131 vcp
->flags
|= _VAC_IS_OWNER
;
6133 vcp
->flags
&= ~_VAC_IS_OWNER
;
6141 * vauth_file_ingroup
6143 * Description: Ask if a user is a member of the group owning the directory
6145 * Parameters: vcp The vnode authorization context that
6146 * contains the user and directory info
6147 * vcp->flags_valid Valid flags
6148 * vcp->flags Flags values
6149 * vcp->vap File vnode attributes
6150 * vcp->ctx VFS Context (for user)
6151 * ismember pointer to where to put the answer
6152 * idontknow Return this if we can't get an answer
6154 * Returns: 0 Success
6155 * vauth_node_group:? Error from vauth_node_group()
6157 * Implicit returns: *ismember 0 The user is not a group member
6158 * 1 The user is a group member
6161 vauth_file_ingroup(vauth_ctx vcp
, int *ismember
, int idontknow
)
6165 /* Check for a cached answer first, to avoid the check if possible */
6166 if (vcp
->flags_valid
& _VAC_IN_GROUP
) {
6167 *ismember
= (vcp
->flags
& _VAC_IN_GROUP
) ? 1 : 0;
6170 /* Otherwise, go look for it */
6171 error
= vauth_node_group(vcp
->vap
, vcp
->ctx
->vc_ucred
, ismember
, idontknow
);
6174 /* cache our result */
6175 vcp
->flags_valid
|= _VAC_IN_GROUP
;
6177 vcp
->flags
|= _VAC_IN_GROUP
;
6179 vcp
->flags
&= ~_VAC_IN_GROUP
;
6188 vauth_dir_owner(vauth_ctx vcp
)
6192 if (vcp
->flags_valid
& _VAC_IS_DIR_OWNER
) {
6193 result
= (vcp
->flags
& _VAC_IS_DIR_OWNER
) ? 1 : 0;
6195 result
= vauth_node_owner(vcp
->dvap
, vcp
->ctx
->vc_ucred
);
6197 /* cache our result */
6198 vcp
->flags_valid
|= _VAC_IS_DIR_OWNER
;
6200 vcp
->flags
|= _VAC_IS_DIR_OWNER
;
6202 vcp
->flags
&= ~_VAC_IS_DIR_OWNER
;
6211 * Description: Ask if a user is a member of the group owning the directory
6213 * Parameters: vcp The vnode authorization context that
6214 * contains the user and directory info
6215 * vcp->flags_valid Valid flags
6216 * vcp->flags Flags values
6217 * vcp->dvap Dir vnode attributes
6218 * vcp->ctx VFS Context (for user)
6219 * ismember pointer to where to put the answer
6220 * idontknow Return this if we can't get an answer
6222 * Returns: 0 Success
6223 * vauth_node_group:? Error from vauth_node_group()
6225 * Implicit returns: *ismember 0 The user is not a group member
6226 * 1 The user is a group member
6229 vauth_dir_ingroup(vauth_ctx vcp
, int *ismember
, int idontknow
)
6233 /* Check for a cached answer first, to avoid the check if possible */
6234 if (vcp
->flags_valid
& _VAC_IN_DIR_GROUP
) {
6235 *ismember
= (vcp
->flags
& _VAC_IN_DIR_GROUP
) ? 1 : 0;
6238 /* Otherwise, go look for it */
6239 error
= vauth_node_group(vcp
->dvap
, vcp
->ctx
->vc_ucred
, ismember
, idontknow
);
6242 /* cache our result */
6243 vcp
->flags_valid
|= _VAC_IN_DIR_GROUP
;
6245 vcp
->flags
|= _VAC_IN_DIR_GROUP
;
6247 vcp
->flags
&= ~_VAC_IN_DIR_GROUP
;
6255 * Test the posix permissions in (vap) to determine whether (credential)
6256 * may perform (action)
6259 vnode_authorize_posix(vauth_ctx vcp
, int action
, int on_dir
)
6261 struct vnode_attr
*vap
;
6262 int needed
, error
, owner_ok
, group_ok
, world_ok
, ismember
;
6263 #ifdef KAUTH_DEBUG_ENABLE
6264 const char *where
= "uninitialized";
6265 # define _SETWHERE(c) where = c;
6267 # define _SETWHERE(c)
6270 /* checking file or directory? */
6280 * We want to do as little work here as possible. So first we check
6281 * which sets of permissions grant us the access we need, and avoid checking
6282 * whether specific permissions grant access when more generic ones would.
6285 /* owner permissions */
6289 if (action
& VWRITE
)
6293 owner_ok
= (needed
& vap
->va_mode
) == needed
;
6295 /* group permissions */
6299 if (action
& VWRITE
)
6303 group_ok
= (needed
& vap
->va_mode
) == needed
;
6305 /* world permissions */
6309 if (action
& VWRITE
)
6313 world_ok
= (needed
& vap
->va_mode
) == needed
;
6315 /* If granted/denied by all three, we're done */
6316 if (owner_ok
&& group_ok
&& world_ok
) {
6320 if (!owner_ok
&& !group_ok
&& !world_ok
) {
6326 /* Check ownership (relatively cheap) */
6327 if ((on_dir
&& vauth_dir_owner(vcp
)) ||
6328 (!on_dir
&& vauth_file_owner(vcp
))) {
6335 /* Not owner; if group and world both grant it we're done */
6336 if (group_ok
&& world_ok
) {
6337 _SETWHERE("group/world");
6340 if (!group_ok
&& !world_ok
) {
6341 _SETWHERE("group/world");
6346 /* Check group membership (most expensive) */
6347 ismember
= 0; /* Default to allow, if the target has no group owner */
6350 * In the case we can't get an answer about the user from the call to
6351 * vauth_dir_ingroup() or vauth_file_ingroup(), we want to fail on
6352 * the side of caution, rather than simply granting access, or we will
6353 * fail to correctly implement exclusion groups, so we set the third
6354 * parameter on the basis of the state of 'group_ok'.
6357 error
= vauth_dir_ingroup(vcp
, &ismember
, (!group_ok
? EACCES
: 0));
6359 error
= vauth_file_ingroup(vcp
, &ismember
, (!group_ok
? EACCES
: 0));
6373 /* Not owner, not in group, use world result */
6381 KAUTH_DEBUG("%p %s - posix %s permissions : need %s%s%s %x have %s%s%s%s%s%s%s%s%s UID = %d file = %d,%d",
6382 vcp
->vp
, (error
== 0) ? "ALLOWED" : "DENIED", where
,
6383 (action
& VREAD
) ? "r" : "-",
6384 (action
& VWRITE
) ? "w" : "-",
6385 (action
& VEXEC
) ? "x" : "-",
6387 (vap
->va_mode
& S_IRUSR
) ? "r" : "-",
6388 (vap
->va_mode
& S_IWUSR
) ? "w" : "-",
6389 (vap
->va_mode
& S_IXUSR
) ? "x" : "-",
6390 (vap
->va_mode
& S_IRGRP
) ? "r" : "-",
6391 (vap
->va_mode
& S_IWGRP
) ? "w" : "-",
6392 (vap
->va_mode
& S_IXGRP
) ? "x" : "-",
6393 (vap
->va_mode
& S_IROTH
) ? "r" : "-",
6394 (vap
->va_mode
& S_IWOTH
) ? "w" : "-",
6395 (vap
->va_mode
& S_IXOTH
) ? "x" : "-",
6396 kauth_cred_getuid(vcp
->ctx
->vc_ucred
),
6397 on_dir
? vcp
->dvap
->va_uid
: vcp
->vap
->va_uid
,
6398 on_dir
? vcp
->dvap
->va_gid
: vcp
->vap
->va_gid
);
6403 * Authorize the deletion of the node vp from the directory dvp.
6406 * - Neither the node nor the directory are immutable.
6407 * - The user is not the superuser.
6409 * Deletion is not permitted if the directory is sticky and the caller is
6410 * not owner of the node or directory.
6412 * If either the node grants DELETE, or the directory grants DELETE_CHILD,
6413 * the node may be deleted. If neither denies the permission, and the
6414 * caller has Posix write access to the directory, then the node may be
6417 * As an optimization, we cache whether or not delete child is permitted
6418 * on directories without the sticky bit set.
6421 vnode_authorize_delete(vauth_ctx vcp
, boolean_t cached_delete_child
);
6423 vnode_authorize_delete(vauth_ctx vcp
, boolean_t cached_delete_child
)
6425 struct vnode_attr
*vap
= vcp
->vap
;
6426 struct vnode_attr
*dvap
= vcp
->dvap
;
6427 kauth_cred_t cred
= vcp
->ctx
->vc_ucred
;
6428 struct kauth_acl_eval eval
;
6429 int error
, delete_denied
, delete_child_denied
, ismember
;
6431 /* check the ACL on the directory */
6432 delete_child_denied
= 0;
6433 if (!cached_delete_child
&& VATTR_IS_NOT(dvap
, va_acl
, NULL
)) {
6434 eval
.ae_requested
= KAUTH_VNODE_DELETE_CHILD
;
6435 eval
.ae_acl
= &dvap
->va_acl
->acl_ace
[0];
6436 eval
.ae_count
= dvap
->va_acl
->acl_entrycount
;
6437 eval
.ae_options
= 0;
6438 if (vauth_dir_owner(vcp
))
6439 eval
.ae_options
|= KAUTH_AEVAL_IS_OWNER
;
6441 * We use ENOENT as a marker to indicate we could not get
6442 * information in order to delay evaluation until after we
6443 * have the ACL evaluation answer. Previously, we would
6444 * always deny the operation at this point.
6446 if ((error
= vauth_dir_ingroup(vcp
, &ismember
, ENOENT
)) != 0 && error
!= ENOENT
)
6448 if (error
== ENOENT
)
6449 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP_UNKNOWN
;
6451 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP
;
6452 eval
.ae_exp_gall
= KAUTH_VNODE_GENERIC_ALL_BITS
;
6453 eval
.ae_exp_gread
= KAUTH_VNODE_GENERIC_READ_BITS
;
6454 eval
.ae_exp_gwrite
= KAUTH_VNODE_GENERIC_WRITE_BITS
;
6455 eval
.ae_exp_gexec
= KAUTH_VNODE_GENERIC_EXECUTE_BITS
;
6458 * If there is no entry, we are going to defer to other
6459 * authorization mechanisms.
6461 error
= kauth_acl_evaluate(cred
, &eval
);
6464 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp
->vp
, error
);
6467 switch(eval
.ae_result
) {
6468 case KAUTH_RESULT_DENY
:
6469 delete_child_denied
= 1;
6472 case KAUTH_RESULT_ALLOW
:
6473 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp
->vp
);
6475 case KAUTH_RESULT_DEFER
:
6477 /* Effectively the same as !delete_child_denied */
6478 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp
->vp
);
6483 /* check the ACL on the node */
6485 if (VATTR_IS_NOT(vap
, va_acl
, NULL
)) {
6486 eval
.ae_requested
= KAUTH_VNODE_DELETE
;
6487 eval
.ae_acl
= &vap
->va_acl
->acl_ace
[0];
6488 eval
.ae_count
= vap
->va_acl
->acl_entrycount
;
6489 eval
.ae_options
= 0;
6490 if (vauth_file_owner(vcp
))
6491 eval
.ae_options
|= KAUTH_AEVAL_IS_OWNER
;
6493 * We use ENOENT as a marker to indicate we could not get
6494 * information in order to delay evaluation until after we
6495 * have the ACL evaluation answer. Previously, we would
6496 * always deny the operation at this point.
6498 if ((error
= vauth_file_ingroup(vcp
, &ismember
, ENOENT
)) != 0 && error
!= ENOENT
)
6500 if (error
== ENOENT
)
6501 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP_UNKNOWN
;
6503 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP
;
6504 eval
.ae_exp_gall
= KAUTH_VNODE_GENERIC_ALL_BITS
;
6505 eval
.ae_exp_gread
= KAUTH_VNODE_GENERIC_READ_BITS
;
6506 eval
.ae_exp_gwrite
= KAUTH_VNODE_GENERIC_WRITE_BITS
;
6507 eval
.ae_exp_gexec
= KAUTH_VNODE_GENERIC_EXECUTE_BITS
;
6509 if ((error
= kauth_acl_evaluate(cred
, &eval
)) != 0) {
6510 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp
->vp
, error
);
6514 switch(eval
.ae_result
) {
6515 case KAUTH_RESULT_DENY
:
6518 case KAUTH_RESULT_ALLOW
:
6519 KAUTH_DEBUG("%p ALLOWED - granted by file ACL", vcp
->vp
);
6521 case KAUTH_RESULT_DEFER
:
6523 /* Effectively the same as !delete_child_denied */
6524 KAUTH_DEBUG("%p DEFERRED%s - by file ACL", vcp
->vp
, delete_denied
? "(DENY)" : "");
6529 /* if denied by ACL on directory or node, return denial */
6530 if (delete_denied
|| delete_child_denied
) {
6531 KAUTH_DEBUG("%p DENIED - denied by ACL", vcp
->vp
);
6535 /* enforce sticky bit behaviour */
6536 if ((dvap
->va_mode
& S_ISTXT
) && !vauth_file_owner(vcp
) && !vauth_dir_owner(vcp
)) {
6537 KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)",
6538 vcp
->vp
, cred
->cr_posix
.cr_uid
, vap
->va_uid
, dvap
->va_uid
);
6542 /* check the directory */
6543 if (!cached_delete_child
&& (error
= vnode_authorize_posix(vcp
, VWRITE
, 1 /* on_dir */)) != 0) {
6544 KAUTH_DEBUG("%p DENIED - denied by posix permisssions", vcp
->vp
);
6548 /* not denied, must be OK */
6554 * Authorize an operation based on the node's attributes.
6557 vnode_authorize_simple(vauth_ctx vcp
, kauth_ace_rights_t acl_rights
, kauth_ace_rights_t preauth_rights
, boolean_t
*found_deny
)
6559 struct vnode_attr
*vap
= vcp
->vap
;
6560 kauth_cred_t cred
= vcp
->ctx
->vc_ucred
;
6561 struct kauth_acl_eval eval
;
6562 int error
, ismember
;
6563 mode_t posix_action
;
6566 * If we are the file owner, we automatically have some rights.
6568 * Do we need to expand this to support group ownership?
6570 if (vauth_file_owner(vcp
))
6571 acl_rights
&= ~(KAUTH_VNODE_WRITE_SECURITY
);
6574 * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can
6575 * mask the latter. If TAKE_OWNERSHIP is requested the caller is about to
6576 * change ownership to themselves, and WRITE_SECURITY is implicitly
6577 * granted to the owner. We need to do this because at this point
6578 * WRITE_SECURITY may not be granted as the caller is not currently
6581 if ((acl_rights
& KAUTH_VNODE_TAKE_OWNERSHIP
) &&
6582 (acl_rights
& KAUTH_VNODE_WRITE_SECURITY
))
6583 acl_rights
&= ~KAUTH_VNODE_WRITE_SECURITY
;
6585 if (acl_rights
== 0) {
6586 KAUTH_DEBUG("%p ALLOWED - implicit or no rights required", vcp
->vp
);
6590 /* if we have an ACL, evaluate it */
6591 if (VATTR_IS_NOT(vap
, va_acl
, NULL
)) {
6592 eval
.ae_requested
= acl_rights
;
6593 eval
.ae_acl
= &vap
->va_acl
->acl_ace
[0];
6594 eval
.ae_count
= vap
->va_acl
->acl_entrycount
;
6595 eval
.ae_options
= 0;
6596 if (vauth_file_owner(vcp
))
6597 eval
.ae_options
|= KAUTH_AEVAL_IS_OWNER
;
6599 * We use ENOENT as a marker to indicate we could not get
6600 * information in order to delay evaluation until after we
6601 * have the ACL evaluation answer. Previously, we would
6602 * always deny the operation at this point.
6604 if ((error
= vauth_file_ingroup(vcp
, &ismember
, ENOENT
)) != 0 && error
!= ENOENT
)
6606 if (error
== ENOENT
)
6607 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP_UNKNOWN
;
6609 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP
;
6610 eval
.ae_exp_gall
= KAUTH_VNODE_GENERIC_ALL_BITS
;
6611 eval
.ae_exp_gread
= KAUTH_VNODE_GENERIC_READ_BITS
;
6612 eval
.ae_exp_gwrite
= KAUTH_VNODE_GENERIC_WRITE_BITS
;
6613 eval
.ae_exp_gexec
= KAUTH_VNODE_GENERIC_EXECUTE_BITS
;
6615 if ((error
= kauth_acl_evaluate(cred
, &eval
)) != 0) {
6616 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp
->vp
, error
);
6620 switch(eval
.ae_result
) {
6621 case KAUTH_RESULT_DENY
:
6622 KAUTH_DEBUG("%p DENIED - by ACL", vcp
->vp
);
6623 return(EACCES
); /* deny, deny, counter-allege */
6624 case KAUTH_RESULT_ALLOW
:
6625 KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp
->vp
);
6627 case KAUTH_RESULT_DEFER
:
6629 /* Effectively the same as !delete_child_denied */
6630 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp
->vp
);
6634 *found_deny
= eval
.ae_found_deny
;
6636 /* fall through and evaluate residual rights */
6638 /* no ACL, everything is residual */
6639 eval
.ae_residual
= acl_rights
;
6643 * Grant residual rights that have been pre-authorized.
6645 eval
.ae_residual
&= ~preauth_rights
;
6648 * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied.
6650 if (vauth_file_owner(vcp
))
6651 eval
.ae_residual
&= ~KAUTH_VNODE_WRITE_ATTRIBUTES
;
6653 if (eval
.ae_residual
== 0) {
6654 KAUTH_DEBUG("%p ALLOWED - rights already authorized", vcp
->vp
);
6659 * Bail if we have residual rights that can't be granted by posix permissions,
6660 * or aren't presumed granted at this point.
6662 * XXX these can be collapsed for performance
6664 if (eval
.ae_residual
& KAUTH_VNODE_CHANGE_OWNER
) {
6665 KAUTH_DEBUG("%p DENIED - CHANGE_OWNER not permitted", vcp
->vp
);
6668 if (eval
.ae_residual
& KAUTH_VNODE_WRITE_SECURITY
) {
6669 KAUTH_DEBUG("%p DENIED - WRITE_SECURITY not permitted", vcp
->vp
);
6674 if (eval
.ae_residual
& KAUTH_VNODE_DELETE
)
6675 panic("vnode_authorize: can't be checking delete permission here");
6679 * Compute the fallback posix permissions that will satisfy the remaining
6683 if (eval
.ae_residual
& (KAUTH_VNODE_READ_DATA
|
6684 KAUTH_VNODE_LIST_DIRECTORY
|
6685 KAUTH_VNODE_READ_EXTATTRIBUTES
))
6686 posix_action
|= VREAD
;
6687 if (eval
.ae_residual
& (KAUTH_VNODE_WRITE_DATA
|
6688 KAUTH_VNODE_ADD_FILE
|
6689 KAUTH_VNODE_ADD_SUBDIRECTORY
|
6690 KAUTH_VNODE_DELETE_CHILD
|
6691 KAUTH_VNODE_WRITE_ATTRIBUTES
|
6692 KAUTH_VNODE_WRITE_EXTATTRIBUTES
))
6693 posix_action
|= VWRITE
;
6694 if (eval
.ae_residual
& (KAUTH_VNODE_EXECUTE
|
6695 KAUTH_VNODE_SEARCH
))
6696 posix_action
|= VEXEC
;
6698 if (posix_action
!= 0) {
6699 return(vnode_authorize_posix(vcp
, posix_action
, 0 /* !on_dir */));
6701 KAUTH_DEBUG("%p ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping",
6703 (eval
.ae_residual
& KAUTH_VNODE_READ_DATA
)
6704 ? vnode_isdir(vcp
->vp
) ? " LIST_DIRECTORY" : " READ_DATA" : "",
6705 (eval
.ae_residual
& KAUTH_VNODE_WRITE_DATA
)
6706 ? vnode_isdir(vcp
->vp
) ? " ADD_FILE" : " WRITE_DATA" : "",
6707 (eval
.ae_residual
& KAUTH_VNODE_EXECUTE
)
6708 ? vnode_isdir(vcp
->vp
) ? " SEARCH" : " EXECUTE" : "",
6709 (eval
.ae_residual
& KAUTH_VNODE_DELETE
)
6711 (eval
.ae_residual
& KAUTH_VNODE_APPEND_DATA
)
6712 ? vnode_isdir(vcp
->vp
) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
6713 (eval
.ae_residual
& KAUTH_VNODE_DELETE_CHILD
)
6714 ? " DELETE_CHILD" : "",
6715 (eval
.ae_residual
& KAUTH_VNODE_READ_ATTRIBUTES
)
6716 ? " READ_ATTRIBUTES" : "",
6717 (eval
.ae_residual
& KAUTH_VNODE_WRITE_ATTRIBUTES
)
6718 ? " WRITE_ATTRIBUTES" : "",
6719 (eval
.ae_residual
& KAUTH_VNODE_READ_EXTATTRIBUTES
)
6720 ? " READ_EXTATTRIBUTES" : "",
6721 (eval
.ae_residual
& KAUTH_VNODE_WRITE_EXTATTRIBUTES
)
6722 ? " WRITE_EXTATTRIBUTES" : "",
6723 (eval
.ae_residual
& KAUTH_VNODE_READ_SECURITY
)
6724 ? " READ_SECURITY" : "",
6725 (eval
.ae_residual
& KAUTH_VNODE_WRITE_SECURITY
)
6726 ? " WRITE_SECURITY" : "",
6727 (eval
.ae_residual
& KAUTH_VNODE_CHECKIMMUTABLE
)
6728 ? " CHECKIMMUTABLE" : "",
6729 (eval
.ae_residual
& KAUTH_VNODE_CHANGE_OWNER
)
6730 ? " CHANGE_OWNER" : "");
6734 * Lack of required Posix permissions implies no reason to deny access.
6740 * Check for file immutability.
6743 vnode_authorize_checkimmutable(vnode_t vp
, struct vnode_attr
*vap
, int rights
, int ignore
)
6750 * Perform immutability checks for operations that change data.
6752 * Sockets, fifos and devices require special handling.
6754 switch(vp
->v_type
) {
6760 * Writing to these nodes does not change the filesystem data,
6761 * so forget that it's being tried.
6763 rights
&= ~KAUTH_VNODE_WRITE_DATA
;
6770 if (rights
& KAUTH_VNODE_WRITE_RIGHTS
) {
6772 /* check per-filesystem options if possible */
6776 /* check for no-EA filesystems */
6777 if ((rights
& KAUTH_VNODE_WRITE_EXTATTRIBUTES
) &&
6778 (vfs_flags(mp
) & MNT_NOUSERXATTR
)) {
6779 KAUTH_DEBUG("%p DENIED - filesystem disallowed extended attributes", vp
);
6780 error
= EACCES
; /* User attributes disabled */
6786 * check for file immutability. first, check if the requested rights are
6787 * allowable for a UF_APPEND file.
6790 if (vp
->v_type
== VDIR
) {
6791 if ((rights
& (KAUTH_VNODE_ADD_FILE
| KAUTH_VNODE_ADD_SUBDIRECTORY
| KAUTH_VNODE_WRITE_EXTATTRIBUTES
)) == rights
)
6794 if ((rights
& (KAUTH_VNODE_APPEND_DATA
| KAUTH_VNODE_WRITE_EXTATTRIBUTES
)) == rights
)
6797 if ((error
= vnode_immutable(vap
, append
, ignore
)) != 0) {
6798 KAUTH_DEBUG("%p DENIED - file is immutable", vp
);
6807 * Handle authorization actions for filesystems that advertise that the
6808 * server will be enforcing.
6810 * Returns: 0 Authorization should be handled locally
6811 * 1 Authorization was handled by the FS
6813 * Note: Imputed returns will only occur if the authorization request
6814 * was handled by the FS.
6816 * Imputed: *resultp, modified Return code from FS when the request is
6817 * handled by the FS.
6822 vnode_authorize_opaque(vnode_t vp
, int *resultp
, kauth_action_t action
, vfs_context_t ctx
)
6827 * If the vp is a device node, socket or FIFO it actually represents a local
6828 * endpoint, so we need to handle it locally.
6830 switch(vp
->v_type
) {
6841 * In the advisory request case, if the filesystem doesn't think it's reliable
6842 * we will attempt to formulate a result ourselves based on VNOP_GETATTR data.
6844 if ((action
& KAUTH_VNODE_ACCESS
) && !vfs_authopaqueaccess(vp
->v_mount
))
6848 * Let the filesystem have a say in the matter. It's OK for it to not implemnent
6849 * VNOP_ACCESS, as most will authorise inline with the actual request.
6851 if ((error
= VNOP_ACCESS(vp
, action
, ctx
)) != ENOTSUP
) {
6853 KAUTH_DEBUG("%p DENIED - opaque filesystem VNOP_ACCESS denied access", vp
);
6858 * Typically opaque filesystems do authorisation in-line, but exec is a special case. In
6859 * order to be reasonably sure that exec will be permitted, we try a bit harder here.
6861 if ((action
& KAUTH_VNODE_EXECUTE
) && (vp
->v_type
== VREG
)) {
6862 /* try a VNOP_OPEN for readonly access */
6863 if ((error
= VNOP_OPEN(vp
, FREAD
, ctx
)) != 0) {
6865 KAUTH_DEBUG("%p DENIED - EXECUTE denied because file could not be opened readonly", vp
);
6868 VNOP_CLOSE(vp
, FREAD
, ctx
);
6872 * We don't have any reason to believe that the request has to be denied at this point,
6873 * so go ahead and allow it.
6876 KAUTH_DEBUG("%p ALLOWED - bypassing access check for non-local filesystem", vp
);
6884 * Returns: KAUTH_RESULT_ALLOW
6887 * Imputed: *arg3, modified Error code in the deny case
6888 * EROFS Read-only file system
6889 * EACCES Permission denied
6890 * EPERM Operation not permitted [no execute]
6891 * vnode_getattr:ENOMEM Not enough space [only if has filesec]
6893 * vnode_authorize_opaque:*arg2 ???
6894 * vnode_authorize_checkimmutable:???
6895 * vnode_authorize_delete:???
6896 * vnode_authorize_simple:???
6901 vnode_authorize_callback(kauth_cred_t cred
, void *idata
, kauth_action_t action
,
6902 uintptr_t arg0
, uintptr_t arg1
, uintptr_t arg2
, uintptr_t arg3
)
6905 vnode_t cvp
= NULLVP
;
6907 int result
= KAUTH_RESULT_DENY
;
6908 int parent_iocount
= 0;
6909 int parent_action
; /* In case we need to use namedstream's data fork for cached rights*/
6911 ctx
= (vfs_context_t
)arg0
;
6913 dvp
= (vnode_t
)arg2
;
6916 * if there are 2 vnodes passed in, we don't know at
6917 * this point which rights to look at based on the
6918 * combined action being passed in... defer until later...
6919 * otherwise check the kauth 'rights' cache hung
6920 * off of the vnode we're interested in... if we've already
6921 * been granted the right we're currently interested in,
6922 * we can just return success... otherwise we'll go through
6923 * the process of authorizing the requested right(s)... if that
6924 * succeeds, we'll add the right(s) to the cache.
6925 * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache
6933 * For named streams on local-authorization volumes, rights are cached on the parent;
6934 * authorization is determined by looking at the parent's properties anyway, so storing
6935 * on the parent means that we don't recompute for the named stream and that if
6936 * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the
6937 * stream to flush its cache separately. If we miss in the cache, then we authorize
6938 * as if there were no cached rights (passing the named stream vnode and desired rights to
6939 * vnode_authorize_callback_int()).
6941 * On an opaquely authorized volume, we don't know the relationship between the
6942 * data fork's properties and the rights granted on a stream. Thus, named stream vnodes
6943 * on such a volume are authorized directly (rather than using the parent) and have their
6944 * own caches. When a named stream vnode is created, we mark the parent as having a named
6945 * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we
6946 * find the stream and flush its cache.
6948 if (vnode_isnamedstream(vp
) && (!vfs_authopaque(vp
->v_mount
))) {
6949 cvp
= vnode_getparent(vp
);
6950 if (cvp
!= NULLVP
) {
6954 goto defer
; /* If we can't use the parent, take the slow path */
6957 /* Have to translate some actions */
6958 parent_action
= action
;
6959 if (parent_action
& KAUTH_VNODE_READ_DATA
) {
6960 parent_action
&= ~KAUTH_VNODE_READ_DATA
;
6961 parent_action
|= KAUTH_VNODE_READ_EXTATTRIBUTES
;
6963 if (parent_action
& KAUTH_VNODE_WRITE_DATA
) {
6964 parent_action
&= ~KAUTH_VNODE_WRITE_DATA
;
6965 parent_action
|= KAUTH_VNODE_WRITE_EXTATTRIBUTES
;
6973 if (vnode_cache_is_authorized(cvp
, ctx
, parent_iocount
? parent_action
: action
) == TRUE
) {
6974 result
= KAUTH_RESULT_ALLOW
;
6978 result
= vnode_authorize_callback_int(cred
, idata
, action
, arg0
, arg1
, arg2
, arg3
);
6980 if (result
== KAUTH_RESULT_ALLOW
&& cvp
!= NULLVP
) {
6981 KAUTH_DEBUG("%p - caching action = %x", cvp
, action
);
6982 vnode_cache_authorized_action(cvp
, ctx
, action
);
6986 if (parent_iocount
) {
6995 vnode_authorize_callback_int(__unused kauth_cred_t unused_cred
, __unused
void *idata
, kauth_action_t action
,
6996 uintptr_t arg0
, uintptr_t arg1
, uintptr_t arg2
, uintptr_t arg3
)
6998 struct _vnode_authorize_context auth_context
;
7003 kauth_ace_rights_t rights
;
7004 struct vnode_attr va
, dva
;
7008 boolean_t parent_authorized_for_delete_child
= FALSE
;
7009 boolean_t found_deny
= FALSE
;
7010 boolean_t parent_ref
= FALSE
;
7012 vcp
= &auth_context
;
7013 ctx
= vcp
->ctx
= (vfs_context_t
)arg0
;
7014 vp
= vcp
->vp
= (vnode_t
)arg1
;
7015 dvp
= vcp
->dvp
= (vnode_t
)arg2
;
7016 errorp
= (int *)arg3
;
7018 * Note that we authorize against the context, not the passed cred
7019 * (the same thing anyway)
7021 cred
= ctx
->vc_ucred
;
7028 vcp
->flags
= vcp
->flags_valid
= 0;
7031 if ((ctx
== NULL
) || (vp
== NULL
) || (cred
== NULL
))
7032 panic("vnode_authorize: bad arguments (context %p vp %p cred %p)", ctx
, vp
, cred
);
7035 KAUTH_DEBUG("%p AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)",
7036 vp
, vfs_context_proc(ctx
)->p_comm
,
7037 (action
& KAUTH_VNODE_ACCESS
) ? "access" : "auth",
7038 (action
& KAUTH_VNODE_READ_DATA
) ? vnode_isdir(vp
) ? " LIST_DIRECTORY" : " READ_DATA" : "",
7039 (action
& KAUTH_VNODE_WRITE_DATA
) ? vnode_isdir(vp
) ? " ADD_FILE" : " WRITE_DATA" : "",
7040 (action
& KAUTH_VNODE_EXECUTE
) ? vnode_isdir(vp
) ? " SEARCH" : " EXECUTE" : "",
7041 (action
& KAUTH_VNODE_DELETE
) ? " DELETE" : "",
7042 (action
& KAUTH_VNODE_APPEND_DATA
) ? vnode_isdir(vp
) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
7043 (action
& KAUTH_VNODE_DELETE_CHILD
) ? " DELETE_CHILD" : "",
7044 (action
& KAUTH_VNODE_READ_ATTRIBUTES
) ? " READ_ATTRIBUTES" : "",
7045 (action
& KAUTH_VNODE_WRITE_ATTRIBUTES
) ? " WRITE_ATTRIBUTES" : "",
7046 (action
& KAUTH_VNODE_READ_EXTATTRIBUTES
) ? " READ_EXTATTRIBUTES" : "",
7047 (action
& KAUTH_VNODE_WRITE_EXTATTRIBUTES
) ? " WRITE_EXTATTRIBUTES" : "",
7048 (action
& KAUTH_VNODE_READ_SECURITY
) ? " READ_SECURITY" : "",
7049 (action
& KAUTH_VNODE_WRITE_SECURITY
) ? " WRITE_SECURITY" : "",
7050 (action
& KAUTH_VNODE_CHANGE_OWNER
) ? " CHANGE_OWNER" : "",
7051 (action
& KAUTH_VNODE_NOIMMUTABLE
) ? " (noimmutable)" : "",
7052 vnode_isdir(vp
) ? "directory" : "file",
7053 vp
->v_name
? vp
->v_name
: "<NULL>", action
, vp
, dvp
);
7056 * Extract the control bits from the action, everything else is
7059 noimmutable
= (action
& KAUTH_VNODE_NOIMMUTABLE
) ? 1 : 0;
7060 rights
= action
& ~(KAUTH_VNODE_ACCESS
| KAUTH_VNODE_NOIMMUTABLE
);
7062 if (rights
& KAUTH_VNODE_DELETE
) {
7065 panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory");
7068 * check to see if we've already authorized the parent
7069 * directory for deletion of its children... if so, we
7070 * can skip a whole bunch of work... we will still have to
7071 * authorize that this specific child can be removed
7073 if (vnode_cache_is_authorized(dvp
, ctx
, KAUTH_VNODE_DELETE_CHILD
) == TRUE
)
7074 parent_authorized_for_delete_child
= TRUE
;
7080 * Check for read-only filesystems.
7082 if ((rights
& KAUTH_VNODE_WRITE_RIGHTS
) &&
7083 (vp
->v_mount
->mnt_flag
& MNT_RDONLY
) &&
7084 ((vp
->v_type
== VREG
) || (vp
->v_type
== VDIR
) ||
7085 (vp
->v_type
== VLNK
) || (vp
->v_type
== VCPLX
) ||
7086 (rights
& KAUTH_VNODE_DELETE
) || (rights
& KAUTH_VNODE_DELETE_CHILD
))) {
7092 * Check for noexec filesystems.
7094 if ((rights
& KAUTH_VNODE_EXECUTE
) && (vp
->v_type
== VREG
) && (vp
->v_mount
->mnt_flag
& MNT_NOEXEC
)) {
7100 * Handle cases related to filesystems with non-local enforcement.
7101 * This call can return 0, in which case we will fall through to perform a
7102 * check based on VNOP_GETATTR data. Otherwise it returns 1 and sets
7103 * an appropriate result, at which point we can return immediately.
7105 if ((vp
->v_mount
->mnt_kern_flag
& MNTK_AUTH_OPAQUE
) && vnode_authorize_opaque(vp
, &result
, action
, ctx
))
7109 * Get vnode attributes and extended security information for the vnode
7110 * and directory if required.
7112 VATTR_WANTED(&va
, va_mode
);
7113 VATTR_WANTED(&va
, va_uid
);
7114 VATTR_WANTED(&va
, va_gid
);
7115 VATTR_WANTED(&va
, va_flags
);
7116 VATTR_WANTED(&va
, va_acl
);
7117 if ((result
= vnode_getattr(vp
, &va
, ctx
)) != 0) {
7118 KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp
, result
);
7122 VATTR_WANTED(&dva
, va_mode
);
7123 VATTR_WANTED(&dva
, va_uid
);
7124 VATTR_WANTED(&dva
, va_gid
);
7125 VATTR_WANTED(&dva
, va_flags
);
7126 VATTR_WANTED(&dva
, va_acl
);
7127 if ((result
= vnode_getattr(dvp
, &dva
, ctx
)) != 0) {
7128 KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp
, result
);
7134 * If the vnode is an extended attribute data vnode (eg. a resource fork), *_DATA becomes
7137 if (vnode_isnamedstream(vp
)) {
7138 if (rights
& KAUTH_VNODE_READ_DATA
) {
7139 rights
&= ~KAUTH_VNODE_READ_DATA
;
7140 rights
|= KAUTH_VNODE_READ_EXTATTRIBUTES
;
7142 if (rights
& KAUTH_VNODE_WRITE_DATA
) {
7143 rights
&= ~KAUTH_VNODE_WRITE_DATA
;
7144 rights
|= KAUTH_VNODE_WRITE_EXTATTRIBUTES
;
7149 * Point 'vp' to the resource fork's parent for ACL checking
7151 if (vnode_isnamedstream(vp
) &&
7152 (vp
->v_parent
!= NULL
) &&
7153 (vget_internal(vp
->v_parent
, 0, VNODE_NODEAD
| VNODE_DRAINO
) == 0)) {
7155 vcp
->vp
= vp
= vp
->v_parent
;
7156 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && (va
.va_acl
!= NULL
))
7157 kauth_acl_free(va
.va_acl
);
7159 VATTR_WANTED(&va
, va_mode
);
7160 VATTR_WANTED(&va
, va_uid
);
7161 VATTR_WANTED(&va
, va_gid
);
7162 VATTR_WANTED(&va
, va_flags
);
7163 VATTR_WANTED(&va
, va_acl
);
7164 if ((result
= vnode_getattr(vp
, &va
, ctx
)) != 0)
7169 * Check for immutability.
7171 * In the deletion case, parent directory immutability vetoes specific
7174 if ((result
= vnode_authorize_checkimmutable(vp
, &va
, rights
, noimmutable
)) != 0)
7176 if ((rights
& KAUTH_VNODE_DELETE
) &&
7177 parent_authorized_for_delete_child
== FALSE
&&
7178 ((result
= vnode_authorize_checkimmutable(dvp
, &dva
, KAUTH_VNODE_DELETE_CHILD
, 0)) != 0))
7182 * Clear rights that have been authorized by reaching this point, bail if nothing left to
7185 rights
&= ~(KAUTH_VNODE_LINKTARGET
| KAUTH_VNODE_CHECKIMMUTABLE
);
7190 * If we're not the superuser, authorize based on file properties;
7191 * note that even if parent_authorized_for_delete_child is TRUE, we
7192 * need to check on the node itself.
7194 if (!vfs_context_issuser(ctx
)) {
7195 /* process delete rights */
7196 if ((rights
& KAUTH_VNODE_DELETE
) &&
7197 ((result
= vnode_authorize_delete(vcp
, parent_authorized_for_delete_child
)) != 0))
7200 /* process remaining rights */
7201 if ((rights
& ~KAUTH_VNODE_DELETE
) &&
7202 (result
= vnode_authorize_simple(vcp
, rights
, rights
& KAUTH_VNODE_DELETE
, &found_deny
)) != 0)
7207 * Execute is only granted to root if one of the x bits is set. This check only
7208 * makes sense if the posix mode bits are actually supported.
7210 if ((rights
& KAUTH_VNODE_EXECUTE
) &&
7211 (vp
->v_type
== VREG
) &&
7212 VATTR_IS_SUPPORTED(&va
, va_mode
) &&
7213 !(va
.va_mode
& (S_IXUSR
| S_IXGRP
| S_IXOTH
))) {
7215 KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vp
, va
.va_mode
);
7219 KAUTH_DEBUG("%p ALLOWED - caller is superuser", vp
);
7222 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && (va
.va_acl
!= NULL
))
7223 kauth_acl_free(va
.va_acl
);
7224 if (VATTR_IS_SUPPORTED(&dva
, va_acl
) && (dva
.va_acl
!= NULL
))
7225 kauth_acl_free(dva
.va_acl
);
7231 KAUTH_DEBUG("%p DENIED - auth denied", vp
);
7232 return(KAUTH_RESULT_DENY
);
7234 if ((rights
& KAUTH_VNODE_SEARCH
) && found_deny
== FALSE
&& vp
->v_type
== VDIR
) {
7236 * if we were successfully granted the right to search this directory
7237 * and there were NO ACL DENYs for search and the posix permissions also don't
7238 * deny execute, we can synthesize a global right that allows anyone to
7239 * traverse this directory during a pathname lookup without having to
7240 * match the credential associated with this cache of rights.
7242 if (!VATTR_IS_SUPPORTED(&va
, va_mode
) ||
7243 ((va
.va_mode
& (S_IXUSR
| S_IXGRP
| S_IXOTH
)) ==
7244 (S_IXUSR
| S_IXGRP
| S_IXOTH
))) {
7245 vnode_cache_authorized_action(vp
, ctx
, KAUTH_VNODE_SEARCHBYANYONE
);
7248 if ((rights
& KAUTH_VNODE_DELETE
) && parent_authorized_for_delete_child
== FALSE
) {
7250 * parent was successfully and newly authorized for content deletions
7251 * add it to the cache, but only if it doesn't have the sticky
7252 * bit set on it. This same check is done earlier guarding
7253 * fetching of dva, and if we jumped to out without having done
7254 * this, we will have returned already because of a non-zero
7257 if (VATTR_IS_SUPPORTED(&dva
, va_mode
) &&
7258 !(dva
.va_mode
& (S_ISVTX
))) {
7259 /* OK to cache delete rights */
7260 KAUTH_DEBUG("%p - caching DELETE_CHILD rights", dvp
);
7261 vnode_cache_authorized_action(dvp
, ctx
, KAUTH_VNODE_DELETE_CHILD
);
7267 * Note that this implies that we will allow requests for no rights, as well as
7268 * for rights that we do not recognise. There should be none of these.
7270 KAUTH_DEBUG("%p ALLOWED - auth granted", vp
);
7271 return(KAUTH_RESULT_ALLOW
);
7275 vnode_authattr_new(vnode_t dvp
, struct vnode_attr
*vap
, int noauth
, vfs_context_t ctx
)
7277 return vnode_authattr_new_internal(dvp
, vap
, noauth
, NULL
, ctx
);
7281 * Check that the attribute information in vattr can be legally applied to
7282 * a new file by the context.
7285 vnode_authattr_new_internal(vnode_t dvp
, struct vnode_attr
*vap
, int noauth
, uint32_t *defaulted_fieldsp
, vfs_context_t ctx
)
7288 int has_priv_suser
, ismember
, defaulted_owner
, defaulted_group
, defaulted_mode
;
7295 if (defaulted_fieldsp
) {
7296 *defaulted_fieldsp
= 0;
7299 defaulted_owner
= defaulted_group
= defaulted_mode
= 0;
7302 * Require that the filesystem support extended security to apply any.
7304 if (!vfs_extendedsecurity(dvp
->v_mount
) &&
7305 (VATTR_IS_ACTIVE(vap
, va_acl
) || VATTR_IS_ACTIVE(vap
, va_uuuid
) || VATTR_IS_ACTIVE(vap
, va_guuid
))) {
7311 * Default some fields.
7316 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit owner is set, that
7317 * owner takes ownership of all new files.
7319 if ((dmp
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) && (dmp
->mnt_fsowner
!= KAUTH_UID_NONE
)) {
7320 VATTR_SET(vap
, va_uid
, dmp
->mnt_fsowner
);
7321 defaulted_owner
= 1;
7323 if (!VATTR_IS_ACTIVE(vap
, va_uid
)) {
7324 /* default owner is current user */
7325 VATTR_SET(vap
, va_uid
, kauth_cred_getuid(vfs_context_ucred(ctx
)));
7326 defaulted_owner
= 1;
7331 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that
7332 * group takes ownership of all new files.
7334 if ((dmp
->mnt_flag
& MNT_IGNORE_OWNERSHIP
) && (dmp
->mnt_fsgroup
!= KAUTH_GID_NONE
)) {
7335 VATTR_SET(vap
, va_gid
, dmp
->mnt_fsgroup
);
7336 defaulted_group
= 1;
7338 if (!VATTR_IS_ACTIVE(vap
, va_gid
)) {
7339 /* default group comes from parent object, fallback to current user */
7340 struct vnode_attr dva
;
7342 VATTR_WANTED(&dva
, va_gid
);
7343 if ((error
= vnode_getattr(dvp
, &dva
, ctx
)) != 0)
7345 if (VATTR_IS_SUPPORTED(&dva
, va_gid
)) {
7346 VATTR_SET(vap
, va_gid
, dva
.va_gid
);
7348 VATTR_SET(vap
, va_gid
, kauth_cred_getgid(vfs_context_ucred(ctx
)));
7350 defaulted_group
= 1;
7354 if (!VATTR_IS_ACTIVE(vap
, va_flags
))
7355 VATTR_SET(vap
, va_flags
, 0);
7357 /* default mode is everything, masked with current umask */
7358 if (!VATTR_IS_ACTIVE(vap
, va_mode
)) {
7359 VATTR_SET(vap
, va_mode
, ACCESSPERMS
& ~vfs_context_proc(ctx
)->p_fd
->fd_cmask
);
7360 KAUTH_DEBUG("ATTR - defaulting new file mode to %o from umask %o", vap
->va_mode
, vfs_context_proc(ctx
)->p_fd
->fd_cmask
);
7363 /* set timestamps to now */
7364 if (!VATTR_IS_ACTIVE(vap
, va_create_time
)) {
7365 nanotime(&vap
->va_create_time
);
7366 VATTR_SET_ACTIVE(vap
, va_create_time
);
7370 * Check for attempts to set nonsensical fields.
7372 if (vap
->va_active
& ~VNODE_ATTR_NEWOBJ
) {
7374 KAUTH_DEBUG("ATTR - ERROR - attempt to set unsupported new-file attributes %llx",
7375 vap
->va_active
& ~VNODE_ATTR_NEWOBJ
);
7380 * Quickly check for the applicability of any enforcement here.
7381 * Tests below maintain the integrity of the local security model.
7383 if (vfs_authopaque(dvp
->v_mount
))
7387 * We need to know if the caller is the superuser, or if the work is
7388 * otherwise already authorised.
7390 cred
= vfs_context_ucred(ctx
);
7392 /* doing work for the kernel */
7395 has_priv_suser
= vfs_context_issuser(ctx
);
7399 if (VATTR_IS_ACTIVE(vap
, va_flags
)) {
7400 if (has_priv_suser
) {
7401 if ((vap
->va_flags
& (UF_SETTABLE
| SF_SETTABLE
)) != vap
->va_flags
) {
7403 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
7407 if ((vap
->va_flags
& UF_SETTABLE
) != vap
->va_flags
) {
7409 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
7415 /* if not superuser, validate legality of new-item attributes */
7416 if (!has_priv_suser
) {
7417 if (!defaulted_mode
&& VATTR_IS_ACTIVE(vap
, va_mode
)) {
7419 if (vap
->va_mode
& S_ISGID
) {
7420 if ((error
= kauth_cred_ismember_gid(cred
, vap
->va_gid
, &ismember
)) != 0) {
7421 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error
, vap
->va_gid
);
7425 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", vap
->va_gid
);
7432 if ((vap
->va_mode
& S_ISUID
) && (vap
->va_uid
!= kauth_cred_getuid(cred
))) {
7433 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
7438 if (!defaulted_owner
&& (vap
->va_uid
!= kauth_cred_getuid(cred
))) {
7439 KAUTH_DEBUG(" DENIED - cannot create new item owned by %d", vap
->va_uid
);
7443 if (!defaulted_group
) {
7444 if ((error
= kauth_cred_ismember_gid(cred
, vap
->va_gid
, &ismember
)) != 0) {
7445 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error
, vap
->va_gid
);
7449 KAUTH_DEBUG(" DENIED - cannot create new item with group %d - not a member", vap
->va_gid
);
7455 /* initialising owner/group UUID */
7456 if (VATTR_IS_ACTIVE(vap
, va_uuuid
)) {
7457 if ((error
= kauth_cred_getguid(cred
, &changer
)) != 0) {
7458 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error
);
7459 /* XXX ENOENT here - no GUID - should perhaps become EPERM */
7462 if (!kauth_guid_equal(&vap
->va_uuuid
, &changer
)) {
7463 KAUTH_DEBUG(" ERROR - cannot create item with supplied owner UUID - not us");
7468 if (VATTR_IS_ACTIVE(vap
, va_guuid
)) {
7469 if ((error
= kauth_cred_ismember_guid(cred
, &vap
->va_guuid
, &ismember
)) != 0) {
7470 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error
);
7474 KAUTH_DEBUG(" ERROR - cannot create item with supplied group UUID - not a member");
7481 if (defaulted_fieldsp
) {
7482 if (defaulted_mode
) {
7483 *defaulted_fieldsp
|= VATTR_PREPARE_DEFAULTED_MODE
;
7485 if (defaulted_group
) {
7486 *defaulted_fieldsp
|= VATTR_PREPARE_DEFAULTED_GID
;
7488 if (defaulted_owner
) {
7489 *defaulted_fieldsp
|= VATTR_PREPARE_DEFAULTED_UID
;
7496 * Check that the attribute information in vap can be legally written by the
7499 * Call this when you're not sure about the vnode_attr; either its contents
7500 * have come from an unknown source, or when they are variable.
7502 * Returns errno, or zero and sets *actionp to the KAUTH_VNODE_* actions that
7503 * must be authorized to be permitted to write the vattr.
7506 vnode_authattr(vnode_t vp
, struct vnode_attr
*vap
, kauth_action_t
*actionp
, vfs_context_t ctx
)
7508 struct vnode_attr ova
;
7509 kauth_action_t required_action
;
7510 int error
, has_priv_suser
, ismember
, chowner
, chgroup
, clear_suid
, clear_sgid
;
7519 required_action
= 0;
7523 * Quickly check for enforcement applicability.
7525 if (vfs_authopaque(vp
->v_mount
))
7529 * Check for attempts to set nonsensical fields.
7531 if (vap
->va_active
& VNODE_ATTR_RDONLY
) {
7532 KAUTH_DEBUG("ATTR - ERROR: attempt to set readonly attribute(s)");
7538 * We need to know if the caller is the superuser.
7540 cred
= vfs_context_ucred(ctx
);
7541 has_priv_suser
= kauth_cred_issuser(cred
);
7544 * If any of the following are changing, we need information from the old file:
7551 if (VATTR_IS_ACTIVE(vap
, va_uid
) ||
7552 VATTR_IS_ACTIVE(vap
, va_gid
) ||
7553 VATTR_IS_ACTIVE(vap
, va_mode
) ||
7554 VATTR_IS_ACTIVE(vap
, va_uuuid
) ||
7555 VATTR_IS_ACTIVE(vap
, va_guuid
)) {
7556 VATTR_WANTED(&ova
, va_mode
);
7557 VATTR_WANTED(&ova
, va_uid
);
7558 VATTR_WANTED(&ova
, va_gid
);
7559 VATTR_WANTED(&ova
, va_uuuid
);
7560 VATTR_WANTED(&ova
, va_guuid
);
7561 KAUTH_DEBUG("ATTR - security information changing, fetching existing attributes");
7565 * If timestamps are being changed, we need to know who the file is owned
7568 if (VATTR_IS_ACTIVE(vap
, va_create_time
) ||
7569 VATTR_IS_ACTIVE(vap
, va_change_time
) ||
7570 VATTR_IS_ACTIVE(vap
, va_modify_time
) ||
7571 VATTR_IS_ACTIVE(vap
, va_access_time
) ||
7572 VATTR_IS_ACTIVE(vap
, va_backup_time
)) {
7574 VATTR_WANTED(&ova
, va_uid
);
7575 #if 0 /* enable this when we support UUIDs as official owners */
7576 VATTR_WANTED(&ova
, va_uuuid
);
7578 KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID");
7582 * If flags are being changed, we need the old flags.
7584 if (VATTR_IS_ACTIVE(vap
, va_flags
)) {
7585 KAUTH_DEBUG("ATTR - flags changing, fetching old flags");
7586 VATTR_WANTED(&ova
, va_flags
);
7590 * If ACLs are being changed, we need the old ACLs.
7592 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
7593 KAUTH_DEBUG("ATTR - acl changing, fetching old flags");
7594 VATTR_WANTED(&ova
, va_acl
);
7598 * If the size is being set, make sure it's not a directory.
7600 if (VATTR_IS_ACTIVE(vap
, va_data_size
)) {
7601 /* size is meaningless on a directory, don't permit this */
7602 if (vnode_isdir(vp
)) {
7603 KAUTH_DEBUG("ATTR - ERROR: size change requested on a directory");
7612 KAUTH_DEBUG("ATTR - fetching old attributes %016llx", ova
.va_active
);
7613 if ((error
= vnode_getattr(vp
, &ova
, ctx
)) != 0) {
7614 KAUTH_DEBUG(" ERROR - got %d trying to get attributes", error
);
7619 * Size changes require write access to the file data.
7621 if (VATTR_IS_ACTIVE(vap
, va_data_size
)) {
7622 /* if we can't get the size, or it's different, we need write access */
7623 KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA");
7624 required_action
|= KAUTH_VNODE_WRITE_DATA
;
7628 * Changing timestamps?
7630 * Note that we are only called to authorize user-requested time changes;
7631 * side-effect time changes are not authorized. Authorisation is only
7632 * required for existing files.
7634 * Non-owners are not permitted to change the time on an existing
7635 * file to anything other than the current time.
7637 if (VATTR_IS_ACTIVE(vap
, va_create_time
) ||
7638 VATTR_IS_ACTIVE(vap
, va_change_time
) ||
7639 VATTR_IS_ACTIVE(vap
, va_modify_time
) ||
7640 VATTR_IS_ACTIVE(vap
, va_access_time
) ||
7641 VATTR_IS_ACTIVE(vap
, va_backup_time
)) {
7643 * The owner and root may set any timestamps they like,
7644 * provided that the file is not immutable. The owner still needs
7645 * WRITE_ATTRIBUTES (implied by ownership but still deniable).
7647 if (has_priv_suser
|| vauth_node_owner(&ova
, cred
)) {
7648 KAUTH_DEBUG("ATTR - root or owner changing timestamps");
7649 required_action
|= KAUTH_VNODE_CHECKIMMUTABLE
| KAUTH_VNODE_WRITE_ATTRIBUTES
;
7651 /* just setting the current time? */
7652 if (vap
->va_vaflags
& VA_UTIMES_NULL
) {
7653 KAUTH_DEBUG("ATTR - non-root/owner changing timestamps, requiring WRITE_ATTRIBUTES");
7654 required_action
|= KAUTH_VNODE_WRITE_ATTRIBUTES
;
7656 KAUTH_DEBUG("ATTR - ERROR: illegal timestamp modification attempted");
7664 * Changing file mode?
7666 if (VATTR_IS_ACTIVE(vap
, va_mode
) && VATTR_IS_SUPPORTED(&ova
, va_mode
) && (ova
.va_mode
!= vap
->va_mode
)) {
7667 KAUTH_DEBUG("ATTR - mode change from %06o to %06o", ova
.va_mode
, vap
->va_mode
);
7670 * Mode changes always have the same basic auth requirements.
7672 if (has_priv_suser
) {
7673 KAUTH_DEBUG("ATTR - superuser mode change, requiring immutability check");
7674 required_action
|= KAUTH_VNODE_CHECKIMMUTABLE
;
7676 /* need WRITE_SECURITY */
7677 KAUTH_DEBUG("ATTR - non-superuser mode change, requiring WRITE_SECURITY");
7678 required_action
|= KAUTH_VNODE_WRITE_SECURITY
;
7682 * Can't set the setgid bit if you're not in the group and not root. Have to have
7683 * existing group information in the case we're not setting it right now.
7685 if (vap
->va_mode
& S_ISGID
) {
7686 required_action
|= KAUTH_VNODE_CHECKIMMUTABLE
; /* always required */
7687 if (!has_priv_suser
) {
7688 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
7689 group
= vap
->va_gid
;
7690 } else if (VATTR_IS_SUPPORTED(&ova
, va_gid
)) {
7693 KAUTH_DEBUG("ATTR - ERROR: setgid but no gid available");
7698 * This might be too restrictive; WRITE_SECURITY might be implied by
7699 * membership in this case, rather than being an additional requirement.
7701 if ((error
= kauth_cred_ismember_gid(cred
, group
, &ismember
)) != 0) {
7702 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error
, vap
->va_gid
);
7706 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", group
);
7714 * Can't set the setuid bit unless you're root or the file's owner.
7716 if (vap
->va_mode
& S_ISUID
) {
7717 required_action
|= KAUTH_VNODE_CHECKIMMUTABLE
; /* always required */
7718 if (!has_priv_suser
) {
7719 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
7720 owner
= vap
->va_uid
;
7721 } else if (VATTR_IS_SUPPORTED(&ova
, va_uid
)) {
7724 KAUTH_DEBUG("ATTR - ERROR: setuid but no uid available");
7728 if (owner
!= kauth_cred_getuid(cred
)) {
7730 * We could allow this if WRITE_SECURITY is permitted, perhaps.
7732 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
7741 * Validate/mask flags changes. This checks that only the flags in
7742 * the UF_SETTABLE mask are being set, and preserves the flags in
7743 * the SF_SETTABLE case.
7745 * Since flags changes may be made in conjunction with other changes,
7746 * we will ask the auth code to ignore immutability in the case that
7747 * the SF_* flags are not set and we are only manipulating the file flags.
7750 if (VATTR_IS_ACTIVE(vap
, va_flags
)) {
7751 /* compute changing flags bits */
7752 if (VATTR_IS_SUPPORTED(&ova
, va_flags
)) {
7753 fdelta
= vap
->va_flags
^ ova
.va_flags
;
7755 fdelta
= vap
->va_flags
;
7759 KAUTH_DEBUG("ATTR - flags changing, requiring WRITE_SECURITY");
7760 required_action
|= KAUTH_VNODE_WRITE_SECURITY
;
7762 /* check that changing bits are legal */
7763 if (has_priv_suser
) {
7765 * The immutability check will prevent us from clearing the SF_*
7766 * flags unless the system securelevel permits it, so just check
7767 * for legal flags here.
7769 if (fdelta
& ~(UF_SETTABLE
| SF_SETTABLE
)) {
7771 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
7775 if (fdelta
& ~UF_SETTABLE
) {
7777 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
7782 * If the caller has the ability to manipulate file flags,
7783 * security is not reduced by ignoring them for this operation.
7785 * A more complete test here would consider the 'after' states of the flags
7786 * to determine whether it would permit the operation, but this becomes
7789 * Ignoring immutability is conditional on securelevel; this does not bypass
7790 * the SF_* flags if securelevel > 0.
7792 required_action
|= KAUTH_VNODE_NOIMMUTABLE
;
7797 * Validate ownership information.
7806 * Note that if the filesystem didn't give us a UID, we expect that it doesn't
7807 * support them in general, and will ignore it if/when we try to set it.
7808 * We might want to clear the uid out of vap completely here.
7810 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
7811 if (VATTR_IS_SUPPORTED(&ova
, va_uid
) && (vap
->va_uid
!= ova
.va_uid
)) {
7812 if (!has_priv_suser
&& (kauth_cred_getuid(cred
) != vap
->va_uid
)) {
7813 KAUTH_DEBUG(" DENIED - non-superuser cannot change ownershipt to a third party");
7824 * Note that if the filesystem didn't give us a GID, we expect that it doesn't
7825 * support them in general, and will ignore it if/when we try to set it.
7826 * We might want to clear the gid out of vap completely here.
7828 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
7829 if (VATTR_IS_SUPPORTED(&ova
, va_gid
) && (vap
->va_gid
!= ova
.va_gid
)) {
7830 if (!has_priv_suser
) {
7831 if ((error
= kauth_cred_ismember_gid(cred
, vap
->va_gid
, &ismember
)) != 0) {
7832 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error
, vap
->va_gid
);
7836 KAUTH_DEBUG(" DENIED - group change from %d to %d but not a member of target group",
7837 ova
.va_gid
, vap
->va_gid
);
7848 * Owner UUID being set or changed.
7850 if (VATTR_IS_ACTIVE(vap
, va_uuuid
)) {
7851 /* if the owner UUID is not actually changing ... */
7852 if (VATTR_IS_SUPPORTED(&ova
, va_uuuid
)) {
7853 if (kauth_guid_equal(&vap
->va_uuuid
, &ova
.va_uuuid
))
7854 goto no_uuuid_change
;
7857 * If the current owner UUID is a null GUID, check
7858 * it against the UUID corresponding to the owner UID.
7860 if (kauth_guid_equal(&ova
.va_uuuid
, &kauth_null_guid
) &&
7861 VATTR_IS_SUPPORTED(&ova
, va_uid
)) {
7864 if (kauth_cred_uid2guid(ova
.va_uid
, &uid_guid
) == 0 &&
7865 kauth_guid_equal(&vap
->va_uuuid
, &uid_guid
))
7866 goto no_uuuid_change
;
7871 * The owner UUID cannot be set by a non-superuser to anything other than
7872 * their own or a null GUID (to "unset" the owner UUID).
7873 * Note that file systems must be prepared to handle the
7874 * null UUID case in a manner appropriate for that file
7877 if (!has_priv_suser
) {
7878 if ((error
= kauth_cred_getguid(cred
, &changer
)) != 0) {
7879 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error
);
7880 /* XXX ENOENT here - no UUID - should perhaps become EPERM */
7883 if (!kauth_guid_equal(&vap
->va_uuuid
, &changer
) &&
7884 !kauth_guid_equal(&vap
->va_uuuid
, &kauth_null_guid
)) {
7885 KAUTH_DEBUG(" ERROR - cannot set supplied owner UUID - not us / null");
7895 * Group UUID being set or changed.
7897 if (VATTR_IS_ACTIVE(vap
, va_guuid
)) {
7898 /* if the group UUID is not actually changing ... */
7899 if (VATTR_IS_SUPPORTED(&ova
, va_guuid
)) {
7900 if (kauth_guid_equal(&vap
->va_guuid
, &ova
.va_guuid
))
7901 goto no_guuid_change
;
7904 * If the current group UUID is a null UUID, check
7905 * it against the UUID corresponding to the group GID.
7907 if (kauth_guid_equal(&ova
.va_guuid
, &kauth_null_guid
) &&
7908 VATTR_IS_SUPPORTED(&ova
, va_gid
)) {
7911 if (kauth_cred_gid2guid(ova
.va_gid
, &gid_guid
) == 0 &&
7912 kauth_guid_equal(&vap
->va_guuid
, &gid_guid
))
7913 goto no_guuid_change
;
7918 * The group UUID cannot be set by a non-superuser to anything other than
7919 * one of which they are a member or a null GUID (to "unset"
7921 * Note that file systems must be prepared to handle the
7922 * null UUID case in a manner appropriate for that file
7925 if (!has_priv_suser
) {
7926 if (kauth_guid_equal(&vap
->va_guuid
, &kauth_null_guid
))
7928 else if ((error
= kauth_cred_ismember_guid(cred
, &vap
->va_guuid
, &ismember
)) != 0) {
7929 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error
);
7933 KAUTH_DEBUG(" ERROR - cannot set supplied group UUID - not a member / null");
7943 * Compute authorisation for group/ownership changes.
7945 if (chowner
|| chgroup
|| clear_suid
|| clear_sgid
) {
7946 if (has_priv_suser
) {
7947 KAUTH_DEBUG("ATTR - superuser changing file owner/group, requiring immutability check");
7948 required_action
|= KAUTH_VNODE_CHECKIMMUTABLE
;
7951 KAUTH_DEBUG("ATTR - ownership change, requiring TAKE_OWNERSHIP");
7952 required_action
|= KAUTH_VNODE_TAKE_OWNERSHIP
;
7954 if (chgroup
&& !chowner
) {
7955 KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY");
7956 required_action
|= KAUTH_VNODE_WRITE_SECURITY
;
7959 /* clear set-uid and set-gid bits as required by Posix */
7960 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
7961 newmode
= vap
->va_mode
;
7962 } else if (VATTR_IS_SUPPORTED(&ova
, va_mode
)) {
7963 newmode
= ova
.va_mode
;
7965 KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits");
7968 if (newmode
& (S_ISUID
| S_ISGID
)) {
7969 VATTR_SET(vap
, va_mode
, newmode
& ~(S_ISUID
| S_ISGID
));
7970 KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o", newmode
, vap
->va_mode
);
7976 * Authorise changes in the ACL.
7978 if (VATTR_IS_ACTIVE(vap
, va_acl
)) {
7980 /* no existing ACL */
7981 if (!VATTR_IS_ACTIVE(&ova
, va_acl
) || (ova
.va_acl
== NULL
)) {
7984 if (vap
->va_acl
!= NULL
) {
7985 required_action
|= KAUTH_VNODE_WRITE_SECURITY
;
7986 KAUTH_DEBUG("CHMOD - adding ACL");
7989 /* removing an existing ACL */
7990 } else if (vap
->va_acl
== NULL
) {
7991 required_action
|= KAUTH_VNODE_WRITE_SECURITY
;
7992 KAUTH_DEBUG("CHMOD - removing ACL");
7994 /* updating an existing ACL */
7996 if (vap
->va_acl
->acl_entrycount
!= ova
.va_acl
->acl_entrycount
) {
7997 /* entry count changed, must be different */
7998 required_action
|= KAUTH_VNODE_WRITE_SECURITY
;
7999 KAUTH_DEBUG("CHMOD - adding/removing ACL entries");
8000 } else if (vap
->va_acl
->acl_entrycount
> 0) {
8001 /* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */
8002 if (memcmp(&vap
->va_acl
->acl_ace
[0], &ova
.va_acl
->acl_ace
[0],
8003 sizeof(struct kauth_ace
) * vap
->va_acl
->acl_entrycount
)) {
8004 required_action
|= KAUTH_VNODE_WRITE_SECURITY
;
8005 KAUTH_DEBUG("CHMOD - changing ACL entries");
8012 * Other attributes that require authorisation.
8014 if (VATTR_IS_ACTIVE(vap
, va_encoding
))
8015 required_action
|= KAUTH_VNODE_WRITE_ATTRIBUTES
;
8018 if (VATTR_IS_SUPPORTED(&ova
, va_acl
) && (ova
.va_acl
!= NULL
))
8019 kauth_acl_free(ova
.va_acl
);
8021 *actionp
= required_action
;
8026 setlocklocal_callback(struct vnode
*vp
, __unused
void *cargs
)
8028 vnode_lock_spin(vp
);
8029 vp
->v_flag
|= VLOCKLOCAL
;
8032 return (VNODE_RETURNED
);
8036 vfs_setlocklocal(mount_t mp
)
8038 mount_lock_spin(mp
);
8039 mp
->mnt_kern_flag
|= MNTK_LOCK_LOCAL
;
8043 * The number of active vnodes is expected to be
8044 * very small when vfs_setlocklocal is invoked.
8046 vnode_iterate(mp
, 0, setlocklocal_callback
, NULL
);
8050 vfs_setcompoundopen(mount_t mp
)
8052 mount_lock_spin(mp
);
8053 mp
->mnt_compound_ops
|= COMPOUND_VNOP_OPEN
;
8059 vnode_setswapmount(vnode_t vp
)
8061 mount_lock(vp
->v_mount
);
8062 vp
->v_mount
->mnt_kern_flag
|= MNTK_SWAP_MOUNT
;
8063 mount_unlock(vp
->v_mount
);
8068 vn_setunionwait(vnode_t vp
)
8070 vnode_lock_spin(vp
);
8071 vp
->v_flag
|= VISUNION
;
8077 vn_checkunionwait(vnode_t vp
)
8079 vnode_lock_spin(vp
);
8080 while ((vp
->v_flag
& VISUNION
) == VISUNION
)
8081 msleep((caddr_t
)&vp
->v_flag
, &vp
->v_lock
, 0, 0, 0);
8086 vn_clearunionwait(vnode_t vp
, int locked
)
8089 vnode_lock_spin(vp
);
8090 if((vp
->v_flag
& VISUNION
) == VISUNION
) {
8091 vp
->v_flag
&= ~VISUNION
;
8092 wakeup((caddr_t
)&vp
->v_flag
);
8099 * Removes orphaned apple double files during a rmdir
8101 * 1. vnode_suspend().
8102 * 2. Call VNOP_READDIR() till the end of directory is reached.
8103 * 3. Check if the directory entries returned are regular files with name starting with "._". If not, return ENOTEMPTY.
8104 * 4. Continue (2) and (3) till end of directory is reached.
8105 * 5. If all the entries in the directory were files with "._" name, delete all the files.
8107 * 7. If deletion of all files succeeded, call VNOP_RMDIR() again.
8110 errno_t
rmdir_remove_orphaned_appleDouble(vnode_t vp
, vfs_context_t ctx
, int * restart_flag
)
8113 #define UIO_BUFF_SIZE 2048
8115 int eofflag
, siz
= UIO_BUFF_SIZE
, nentries
= 0;
8116 int open_flag
= 0, full_erase_flag
= 0;
8117 char uio_buf
[ UIO_SIZEOF(1) ];
8121 struct nameidata nd_temp
;
8125 error
= vnode_suspend(vp
);
8128 * restart_flag is set so that the calling rmdir sleeps and resets
8138 MALLOC(rbuf
, caddr_t
, siz
, M_TEMP
, M_WAITOK
);
8140 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_READ
,
8141 &uio_buf
[0], sizeof(uio_buf
));
8142 if (!rbuf
|| !auio
) {
8147 uio_setoffset(auio
,0);
8151 if ((error
= VNOP_OPEN(vp
, FREAD
, ctx
)))
8157 * First pass checks if all files are appleDouble files.
8161 siz
= UIO_BUFF_SIZE
;
8162 uio_reset(auio
, uio_offset(auio
), UIO_SYSSPACE
, UIO_READ
);
8163 uio_addiov(auio
, CAST_USER_ADDR_T(rbuf
), UIO_BUFF_SIZE
);
8165 if((error
= VNOP_READDIR(vp
, auio
, 0, &eofflag
, &nentries
, ctx
)))
8168 if (uio_resid(auio
) != 0)
8169 siz
-= uio_resid(auio
);
8172 * Iterate through directory
8174 dir_pos
= (void*) rbuf
;
8175 dir_end
= (void*) (rbuf
+ siz
);
8176 dp
= (struct dirent
*) (dir_pos
);
8178 if (dir_pos
== dir_end
)
8181 while (dir_pos
< dir_end
) {
8183 * Check for . and .. as well as directories
8185 if (dp
->d_ino
!= 0 &&
8186 !((dp
->d_namlen
== 1 && dp
->d_name
[0] == '.') ||
8187 (dp
->d_namlen
== 2 && dp
->d_name
[0] == '.' && dp
->d_name
[1] == '.'))) {
8189 * Check for irregular files and ._ files
8190 * If there is a ._._ file abort the op
8192 if ( dp
->d_namlen
< 2 ||
8193 strncmp(dp
->d_name
,"._",2) ||
8194 (dp
->d_namlen
>= 4 && !strncmp(&(dp
->d_name
[2]), "._",2))) {
8199 dir_pos
= (void*) ((uint8_t*)dir_pos
+ dp
->d_reclen
);
8200 dp
= (struct dirent
*)dir_pos
;
8204 * workaround for HFS/NFS setting eofflag before end of file
8206 if (vp
->v_tag
== VT_HFS
&& nentries
> 2)
8209 if (vp
->v_tag
== VT_NFS
) {
8210 if (eofflag
&& !full_erase_flag
) {
8211 full_erase_flag
= 1;
8213 uio_reset(auio
, 0, UIO_SYSSPACE
, UIO_READ
);
8215 else if (!eofflag
&& full_erase_flag
)
8216 full_erase_flag
= 0;
8221 * If we've made it here all the files in the dir are ._ files.
8222 * We can delete the files even though the node is suspended
8223 * because we are the owner of the file.
8226 uio_reset(auio
, 0, UIO_SYSSPACE
, UIO_READ
);
8228 full_erase_flag
= 0;
8231 siz
= UIO_BUFF_SIZE
;
8232 uio_reset(auio
, uio_offset(auio
), UIO_SYSSPACE
, UIO_READ
);
8233 uio_addiov(auio
, CAST_USER_ADDR_T(rbuf
), UIO_BUFF_SIZE
);
8235 error
= VNOP_READDIR(vp
, auio
, 0, &eofflag
, &nentries
, ctx
);
8240 if (uio_resid(auio
) != 0)
8241 siz
-= uio_resid(auio
);
8244 * Iterate through directory
8246 dir_pos
= (void*) rbuf
;
8247 dir_end
= (void*) (rbuf
+ siz
);
8248 dp
= (struct dirent
*) dir_pos
;
8250 if (dir_pos
== dir_end
)
8253 while (dir_pos
< dir_end
) {
8255 * Check for . and .. as well as directories
8257 if (dp
->d_ino
!= 0 &&
8258 !((dp
->d_namlen
== 1 && dp
->d_name
[0] == '.') ||
8259 (dp
->d_namlen
== 2 && dp
->d_name
[0] == '.' && dp
->d_name
[1] == '.'))
8262 NDINIT(&nd_temp
, DELETE
, OP_UNLINK
, USEDVP
,
8263 UIO_SYSSPACE
, CAST_USER_ADDR_T(dp
->d_name
),
8265 nd_temp
.ni_dvp
= vp
;
8266 error
= unlink1(ctx
, &nd_temp
, VNODE_REMOVE_SKIP_NAMESPACE_EVENT
);
8268 if (error
&& error
!= ENOENT
) {
8273 dir_pos
= (void*) ((uint8_t*)dir_pos
+ dp
->d_reclen
);
8274 dp
= (struct dirent
*)dir_pos
;
8278 * workaround for HFS/NFS setting eofflag before end of file
8280 if (vp
->v_tag
== VT_HFS
&& nentries
> 2)
8283 if (vp
->v_tag
== VT_NFS
) {
8284 if (eofflag
&& !full_erase_flag
) {
8285 full_erase_flag
= 1;
8287 uio_reset(auio
, 0, UIO_SYSSPACE
, UIO_READ
);
8289 else if (!eofflag
&& full_erase_flag
)
8290 full_erase_flag
= 0;
8300 VNOP_CLOSE(vp
, FREAD
, ctx
);
8314 lock_vnode_and_post(vnode_t vp
, int kevent_num
)
8316 /* Only take the lock if there's something there! */
8317 if (vp
->v_knotes
.slh_first
!= NULL
) {
8319 KNOTE(&vp
->v_knotes
, kevent_num
);
8325 #ifdef PANIC_PRINTS_VNODES
8327 void panic_print_vnodes(void);
8329 static const char *__vtype(uint16_t vtype
)
8358 * build a path from the bottom up
8359 * NOTE: called from the panic path - no alloc'ing of memory and no locks!
8361 static char *__vpath(vnode_t vp
, char *str
, int len
, int depth
)
8368 /* str + len is the start of the string we created */
8372 /* follow mount vnodes to get the full path */
8373 if ((vp
->v_flag
& VROOT
)) {
8374 if (vp
->v_mount
!= NULL
&& vp
->v_mount
->mnt_vnodecovered
) {
8377 return __vpath(vp
->v_mount
->mnt_vnodecovered
,
8383 src
= (char *)vp
->v_name
;
8384 vnm_len
= strlen(src
);
8385 if (vnm_len
> len
) {
8386 /* truncate the name to fit in the string */
8387 src
+= (vnm_len
- len
);
8391 /* start from the back and copy just characters (no NULLs) */
8393 /* this will chop off leaf path (file) names */
8395 dst
= str
+ len
- vnm_len
;
8396 memcpy(dst
, src
, vnm_len
);
8402 if (vp
->v_parent
&& len
> 1) {
8403 /* follow parents up the chain */
8406 return __vpath(vp
->v_parent
, str
, len
, depth
+ 1);
8412 extern int kdb_printf(const char *format
, ...) __printflike(1,2);
8414 #define SANE_VNODE_PRINT_LIMIT 5000
8415 void panic_print_vnodes(void)
8424 kdb_printf("\n***** VNODES *****\n"
8425 "TYPE UREF ICNT PATH\n");
8427 /* NULL-terminate the path name */
8428 vname
[sizeof(vname
)-1] = '\0';
8431 * iterate all vnodelist items in all mounts (mntlist) -> mnt_vnodelist
8433 TAILQ_FOREACH(mnt
, &mountlist
, mnt_list
) {
8434 TAILQ_FOREACH(vp
, &mnt
->mnt_vnodelist
, v_mntvnodes
) {
8435 if (++nvnodes
> SANE_VNODE_PRINT_LIMIT
)
8437 type
= __vtype(vp
->v_type
);
8438 nm
= __vpath(vp
, vname
, sizeof(vname
)-1, 0);
8439 kdb_printf("%s %0d %0d %s\n",
8440 type
, vp
->v_usecount
, vp
->v_iocount
, nm
);
8445 #else /* !PANIC_PRINTS_VNODES */
8446 void panic_print_vnodes(void)
8454 static void record_vp(vnode_t vp
, int count
) {
8461 if ((vp
->v_flag
& VSYSTEM
))
8464 ut
= get_bsdthread_info(current_thread());
8465 ut
->uu_iocount
+= count
;
8468 if (ut
->uu_vpindex
< 32) {
8469 OSBacktrace((void **)&ut
->uu_pcs
[ut
->uu_vpindex
][0], 10);
8471 ut
->uu_vps
[ut
->uu_vpindex
] = vp
;
8481 #define TRIG_DEBUG 0
8484 #define TRIG_LOG(...) do { printf("%s: ", __FUNCTION__); printf(__VA_ARGS__); } while (0)
8486 #define TRIG_LOG(...)
8490 * Resolver result functions
8494 vfs_resolver_result(uint32_t seq
, enum resolver_status stat
, int aux
)
8497 * |<--- 32 --->|<--- 28 --->|<- 4 ->|
8498 * sequence auxiliary status
8500 return (((uint64_t)seq
) << 32) |
8501 (((uint64_t)(aux
& 0x0fffffff)) << 4) |
8502 (uint64_t)(stat
& 0x0000000F);
8505 enum resolver_status
8506 vfs_resolver_status(resolver_result_t result
)
8508 /* lower 4 bits is status */
8509 return (result
& 0x0000000F);
8513 vfs_resolver_sequence(resolver_result_t result
)
8515 /* upper 32 bits is sequence */
8516 return (uint32_t)(result
>> 32);
8520 vfs_resolver_auxiliary(resolver_result_t result
)
8522 /* 28 bits of auxiliary */
8523 return (int)(((uint32_t)(result
& 0xFFFFFFF0)) >> 4);
8528 * Call in for resolvers to update vnode trigger state
8531 vnode_trigger_update(vnode_t vp
, resolver_result_t result
)
8535 enum resolver_status stat
;
8537 if (vp
->v_resolve
== NULL
) {
8541 stat
= vfs_resolver_status(result
);
8542 seq
= vfs_resolver_sequence(result
);
8544 if ((stat
!= RESOLVER_RESOLVED
) && (stat
!= RESOLVER_UNRESOLVED
)) {
8549 lck_mtx_lock(&rp
->vr_lock
);
8551 if (seq
> rp
->vr_lastseq
) {
8552 if (stat
== RESOLVER_RESOLVED
)
8553 rp
->vr_flags
|= VNT_RESOLVED
;
8555 rp
->vr_flags
&= ~VNT_RESOLVED
;
8557 rp
->vr_lastseq
= seq
;
8560 lck_mtx_unlock(&rp
->vr_lock
);
8566 vnode_resolver_attach(vnode_t vp
, vnode_resolve_t rp
, boolean_t ref
)
8570 vnode_lock_spin(vp
);
8571 if (vp
->v_resolve
!= NULL
) {
8580 error
= vnode_ref_ext(vp
, O_EVTONLY
, VNODE_REF_FORCE
);
8582 panic("VNODE_REF_FORCE didn't help...");
8590 * VFS internal interfaces for vnode triggers
8592 * vnode must already have an io count on entry
8593 * v_resolve is stable when io count is non-zero
8596 vnode_resolver_create(mount_t mp
, vnode_t vp
, struct vnode_trigger_param
*tinfo
, boolean_t external
)
8603 /* minimum pointer test (debugging) */
8604 if (tinfo
->vnt_data
)
8605 byte
= *((char *)tinfo
->vnt_data
);
8607 MALLOC(rp
, vnode_resolve_t
, sizeof(*rp
), M_TEMP
, M_WAITOK
);
8611 lck_mtx_init(&rp
->vr_lock
, trigger_vnode_lck_grp
, trigger_vnode_lck_attr
);
8613 rp
->vr_resolve_func
= tinfo
->vnt_resolve_func
;
8614 rp
->vr_unresolve_func
= tinfo
->vnt_unresolve_func
;
8615 rp
->vr_rearm_func
= tinfo
->vnt_rearm_func
;
8616 rp
->vr_reclaim_func
= tinfo
->vnt_reclaim_func
;
8617 rp
->vr_data
= tinfo
->vnt_data
;
8619 rp
->vr_flags
= tinfo
->vnt_flags
& VNT_VALID_MASK
;
8621 rp
->vr_flags
|= VNT_EXTERNAL
;
8624 result
= vnode_resolver_attach(vp
, rp
, external
);
8630 OSAddAtomic(1, &mp
->mnt_numtriggers
);
8641 vnode_resolver_release(vnode_resolve_t rp
)
8644 * Give them a chance to free any private data
8646 if (rp
->vr_data
&& rp
->vr_reclaim_func
) {
8647 rp
->vr_reclaim_func(NULLVP
, rp
->vr_data
);
8650 lck_mtx_destroy(&rp
->vr_lock
, trigger_vnode_lck_grp
);
8655 /* Called after the vnode has been drained */
8657 vnode_resolver_detach(vnode_t vp
)
8662 mp
= vnode_mount(vp
);
8666 vp
->v_resolve
= NULL
;
8669 if ((rp
->vr_flags
& VNT_EXTERNAL
) != 0) {
8670 vnode_rele_ext(vp
, O_EVTONLY
, 1);
8673 vnode_resolver_release(rp
);
8675 /* Keep count of active trigger vnodes per mount */
8676 OSAddAtomic(-1, &mp
->mnt_numtriggers
);
8681 vnode_trigger_rearm(vnode_t vp
, vfs_context_t ctx
)
8684 resolver_result_t result
;
8685 enum resolver_status status
;
8688 if ((vp
->v_resolve
== NULL
) ||
8689 (vp
->v_resolve
->vr_rearm_func
== NULL
) ||
8690 (vp
->v_resolve
->vr_flags
& VNT_AUTO_REARM
) == 0) {
8695 lck_mtx_lock(&rp
->vr_lock
);
8698 * Check if VFS initiated this unmount. If so, we'll catch it after the unresolve completes.
8700 if (rp
->vr_flags
& VNT_VFS_UNMOUNTED
) {
8701 lck_mtx_unlock(&rp
->vr_lock
);
8705 /* Check if this vnode is already armed */
8706 if ((rp
->vr_flags
& VNT_RESOLVED
) == 0) {
8707 lck_mtx_unlock(&rp
->vr_lock
);
8711 lck_mtx_unlock(&rp
->vr_lock
);
8713 result
= rp
->vr_rearm_func(vp
, 0, rp
->vr_data
, ctx
);
8714 status
= vfs_resolver_status(result
);
8715 seq
= vfs_resolver_sequence(result
);
8717 lck_mtx_lock(&rp
->vr_lock
);
8718 if (seq
> rp
->vr_lastseq
) {
8719 if (status
== RESOLVER_UNRESOLVED
)
8720 rp
->vr_flags
&= ~VNT_RESOLVED
;
8721 rp
->vr_lastseq
= seq
;
8723 lck_mtx_unlock(&rp
->vr_lock
);
8728 vnode_trigger_resolve(vnode_t vp
, struct nameidata
*ndp
, vfs_context_t ctx
)
8731 enum path_operation op
;
8732 resolver_result_t result
;
8733 enum resolver_status status
;
8736 /* Only trigger on topmost vnodes */
8737 if ((vp
->v_resolve
== NULL
) ||
8738 (vp
->v_resolve
->vr_resolve_func
== NULL
) ||
8739 (vp
->v_mountedhere
!= NULL
)) {
8744 lck_mtx_lock(&rp
->vr_lock
);
8746 /* Check if this vnode is already resolved */
8747 if (rp
->vr_flags
& VNT_RESOLVED
) {
8748 lck_mtx_unlock(&rp
->vr_lock
);
8752 lck_mtx_unlock(&rp
->vr_lock
);
8756 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
8757 * is there anyway to know this???
8758 * there can also be other legitimate lookups in parallel
8760 * XXX - should we call this on a separate thread with a timeout?
8762 * XXX - should we use ISLASTCN to pick the op value??? Perhaps only leafs should
8763 * get the richer set and non-leafs should get generic OP_LOOKUP? TBD
8765 op
= (ndp
->ni_op
< OP_MAXOP
) ? ndp
->ni_op
: OP_LOOKUP
;
8767 result
= rp
->vr_resolve_func(vp
, &ndp
->ni_cnd
, op
, 0, rp
->vr_data
, ctx
);
8768 status
= vfs_resolver_status(result
);
8769 seq
= vfs_resolver_sequence(result
);
8771 lck_mtx_lock(&rp
->vr_lock
);
8772 if (seq
> rp
->vr_lastseq
) {
8773 if (status
== RESOLVER_RESOLVED
)
8774 rp
->vr_flags
|= VNT_RESOLVED
;
8775 rp
->vr_lastseq
= seq
;
8777 lck_mtx_unlock(&rp
->vr_lock
);
8779 /* On resolver errors, propagate the error back up */
8780 return (status
== RESOLVER_ERROR
? vfs_resolver_auxiliary(result
) : 0);
8784 vnode_trigger_unresolve(vnode_t vp
, int flags
, vfs_context_t ctx
)
8787 resolver_result_t result
;
8788 enum resolver_status status
;
8791 if ((vp
->v_resolve
== NULL
) || (vp
->v_resolve
->vr_unresolve_func
== NULL
)) {
8796 lck_mtx_lock(&rp
->vr_lock
);
8798 /* Check if this vnode is already resolved */
8799 if ((rp
->vr_flags
& VNT_RESOLVED
) == 0) {
8800 printf("vnode_trigger_unresolve: not currently resolved\n");
8801 lck_mtx_unlock(&rp
->vr_lock
);
8805 rp
->vr_flags
|= VNT_VFS_UNMOUNTED
;
8807 lck_mtx_unlock(&rp
->vr_lock
);
8811 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
8812 * there can also be other legitimate lookups in parallel
8814 * XXX - should we call this on a separate thread with a timeout?
8817 result
= rp
->vr_unresolve_func(vp
, flags
, rp
->vr_data
, ctx
);
8818 status
= vfs_resolver_status(result
);
8819 seq
= vfs_resolver_sequence(result
);
8821 lck_mtx_lock(&rp
->vr_lock
);
8822 if (seq
> rp
->vr_lastseq
) {
8823 if (status
== RESOLVER_UNRESOLVED
)
8824 rp
->vr_flags
&= ~VNT_RESOLVED
;
8825 rp
->vr_lastseq
= seq
;
8827 rp
->vr_flags
&= ~VNT_VFS_UNMOUNTED
;
8828 lck_mtx_unlock(&rp
->vr_lock
);
8830 /* On resolver errors, propagate the error back up */
8831 return (status
== RESOLVER_ERROR
? vfs_resolver_auxiliary(result
) : 0);
8835 triggerisdescendant(mount_t mp
, mount_t rmp
)
8840 * walk up vnode covered chain looking for a match
8842 name_cache_lock_shared();
8847 /* did we encounter "/" ? */
8848 if (mp
->mnt_flag
& MNT_ROOTFS
)
8851 vp
= mp
->mnt_vnodecovered
;
8862 name_cache_unlock();
8867 struct trigger_unmount_info
{
8872 uint32_t trigger_vid
;
8877 trigger_unmount_callback(mount_t mp
, void * arg
)
8879 struct trigger_unmount_info
* infop
= (struct trigger_unmount_info
*)arg
;
8880 boolean_t mountedtrigger
= FALSE
;
8883 * When we encounter the top level mount we're done
8885 if (mp
== infop
->top_mp
)
8886 return (VFS_RETURNED_DONE
);
8888 if ((mp
->mnt_vnodecovered
== NULL
) ||
8889 (vnode_getwithref(mp
->mnt_vnodecovered
) != 0)) {
8890 return (VFS_RETURNED
);
8893 if ((mp
->mnt_vnodecovered
->v_mountedhere
== mp
) &&
8894 (mp
->mnt_vnodecovered
->v_resolve
!= NULL
) &&
8895 (mp
->mnt_vnodecovered
->v_resolve
->vr_flags
& VNT_RESOLVED
)) {
8896 mountedtrigger
= TRUE
;
8898 vnode_put(mp
->mnt_vnodecovered
);
8901 * When we encounter a mounted trigger, check if its under the top level mount
8903 if ( !mountedtrigger
|| !triggerisdescendant(mp
, infop
->top_mp
) )
8904 return (VFS_RETURNED
);
8907 * Process any pending nested mount (now that its not referenced)
8909 if ((infop
->trigger_vp
!= NULLVP
) &&
8910 (vnode_getwithvid(infop
->trigger_vp
, infop
->trigger_vid
) == 0)) {
8911 vnode_t vp
= infop
->trigger_vp
;
8914 infop
->trigger_vp
= NULLVP
;
8916 if (mp
== vp
->v_mountedhere
) {
8918 printf("trigger_unmount_callback: unexpected match '%s'\n",
8919 mp
->mnt_vfsstat
.f_mntonname
);
8920 return (VFS_RETURNED
);
8922 if (infop
->trigger_mp
!= vp
->v_mountedhere
) {
8924 printf("trigger_unmount_callback: trigger mnt changed! (%p != %p)\n",
8925 infop
->trigger_mp
, vp
->v_mountedhere
);
8929 error
= vnode_trigger_unresolve(vp
, infop
->flags
, infop
->ctx
);
8932 printf("unresolving: '%s', err %d\n",
8933 vp
->v_mountedhere
? vp
->v_mountedhere
->mnt_vfsstat
.f_mntonname
:
8935 return (VFS_RETURNED_DONE
); /* stop iteration on errors */
8940 * We can't call resolver here since we hold a mount iter
8941 * ref on mp so save its covered vp for later processing
8943 infop
->trigger_vp
= mp
->mnt_vnodecovered
;
8944 if ((infop
->trigger_vp
!= NULLVP
) &&
8945 (vnode_getwithref(infop
->trigger_vp
) == 0)) {
8946 if (infop
->trigger_vp
->v_mountedhere
== mp
) {
8947 infop
->trigger_vid
= infop
->trigger_vp
->v_id
;
8948 infop
->trigger_mp
= mp
;
8950 vnode_put(infop
->trigger_vp
);
8953 return (VFS_RETURNED
);
8957 * Attempt to unmount any trigger mounts nested underneath a mount.
8958 * This is a best effort attempt and no retries are performed here.
8960 * Note: mp->mnt_rwlock is held exclusively on entry (so be carefull)
8964 vfs_nested_trigger_unmounts(mount_t mp
, int flags
, vfs_context_t ctx
)
8966 struct trigger_unmount_info info
;
8968 /* Must have trigger vnodes */
8969 if (mp
->mnt_numtriggers
== 0) {
8972 /* Avoid recursive requests (by checking covered vnode) */
8973 if ((mp
->mnt_vnodecovered
!= NULL
) &&
8974 (vnode_getwithref(mp
->mnt_vnodecovered
) == 0)) {
8975 boolean_t recursive
= FALSE
;
8977 if ((mp
->mnt_vnodecovered
->v_mountedhere
== mp
) &&
8978 (mp
->mnt_vnodecovered
->v_resolve
!= NULL
) &&
8979 (mp
->mnt_vnodecovered
->v_resolve
->vr_flags
& VNT_VFS_UNMOUNTED
)) {
8982 vnode_put(mp
->mnt_vnodecovered
);
8988 * Attempt to unmount any nested trigger mounts (best effort)
8992 info
.trigger_vp
= NULLVP
;
8993 info
.trigger_vid
= 0;
8994 info
.trigger_mp
= NULL
;
8997 (void) vfs_iterate(VFS_ITERATE_TAIL_FIRST
, trigger_unmount_callback
, &info
);
9000 * Process remaining nested mount (now that its not referenced)
9002 if ((info
.trigger_vp
!= NULLVP
) &&
9003 (vnode_getwithvid(info
.trigger_vp
, info
.trigger_vid
) == 0)) {
9004 vnode_t vp
= info
.trigger_vp
;
9006 if (info
.trigger_mp
== vp
->v_mountedhere
) {
9007 (void) vnode_trigger_unresolve(vp
, flags
, ctx
);
9014 vfs_addtrigger(mount_t mp
, const char *relpath
, struct vnode_trigger_info
*vtip
, vfs_context_t ctx
)
9016 struct nameidata nd
;
9019 struct vnode_trigger_param vtp
;
9022 * Must be called for trigger callback, wherein rwlock is held
9024 lck_rw_assert(&mp
->mnt_rwlock
, LCK_RW_ASSERT_HELD
);
9026 TRIG_LOG("Adding trigger at %s\n", relpath
);
9027 TRIG_LOG("Trying VFS_ROOT\n");
9030 * We do a lookup starting at the root of the mountpoint, unwilling
9031 * to cross into other mountpoints.
9033 res
= VFS_ROOT(mp
, &rvp
, ctx
);
9038 TRIG_LOG("Trying namei\n");
9040 NDINIT(&nd
, LOOKUP
, OP_LOOKUP
, USEDVP
| NOCROSSMOUNT
| FOLLOW
, UIO_SYSSPACE
,
9041 CAST_USER_ADDR_T(relpath
), ctx
);
9053 TRIG_LOG("Trying vnode_resolver_create()\n");
9056 * Set up blob. vnode_create() takes a larger structure
9057 * with creation info, and we needed something different
9058 * for this case. One needs to win, or we need to munge both;
9059 * vnode_create() wins.
9061 bzero(&vtp
, sizeof(vtp
));
9062 vtp
.vnt_resolve_func
= vtip
->vti_resolve_func
;
9063 vtp
.vnt_unresolve_func
= vtip
->vti_unresolve_func
;
9064 vtp
.vnt_rearm_func
= vtip
->vti_rearm_func
;
9065 vtp
.vnt_reclaim_func
= vtip
->vti_reclaim_func
;
9066 vtp
.vnt_reclaim_func
= vtip
->vti_reclaim_func
;
9067 vtp
.vnt_data
= vtip
->vti_data
;
9068 vtp
.vnt_flags
= vtip
->vti_flags
;
9070 res
= vnode_resolver_create(mp
, vp
, &vtp
, TRUE
);
9073 TRIG_LOG("Returning %d\n", res
);
9077 #endif /* CONFIG_TRIGGERS */