/*
- * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/kdebug.h>
#include <sys/kauth.h>
#include <sys/user.h>
+#include <sys/systm.h>
#include <sys/kern_memorystatus.h>
+#include <sys/lockf.h>
#include <miscfs/fifofs/fifo.h>
#include <string.h>
#include <kern/assert.h>
+#include <mach/kern_return.h>
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
#include <miscfs/specfs/specdev.h>
#include <kern/kalloc.h> /* kalloc()/kfree() */
#include <kern/clock.h> /* delay_for_interval() */
#include <libkern/OSAtomic.h> /* OSAddAtomic() */
-
+#include <console/video_console.h>
#ifdef JOE_DEBUG
#include <libkern/OSDebug.h>
memory_object_control_t control,
boolean_t rage);
+extern void memory_object_mark_io_tracking(
+ memory_object_control_t control);
/* XXX next protptype should be from <nfs/nfs.h> */
extern int nfs_vinvalbuf(vnode_t, int, vfs_context_t, int);
__private_extern__ void vntblinit(void);
__private_extern__ kern_return_t reset_vmobjectcache(unsigned int val1,
unsigned int val2);
-__private_extern__ int unlink1(vfs_context_t, struct nameidata *, int);
+__private_extern__ int unlink1(vfs_context_t, vnode_t, user_addr_t,
+ enum uio_seg, int);
extern int system_inshutdown;
static void vnode_list_add(vnode_t);
+static void vnode_async_list_add(vnode_t);
static void vnode_list_remove(vnode_t);
static void vnode_list_remove_locked(vnode_t);
+static void vnode_abort_advlocks(vnode_t);
static errno_t vnode_drain(vnode_t);
static void vgone(vnode_t, int flags);
static void vclean(vnode_t vp, int flag);
static int vnode_reload(vnode_t);
static int vnode_isinuse_locked(vnode_t, int, int);
+static int unmount_callback(mount_t, __unused void *);
+
static void insmntque(vnode_t vp, mount_t mp);
static int mount_getvfscnt(void);
static int mount_fillfsids(fsid_t *, int );
static void record_vp(vnode_t vp, int count);
#endif
+#if CONFIG_JETSAM && (DEVELOPMENT || DEBUG)
+extern int bootarg_no_vnode_jetsam; /* from bsd_init.c default value is 0 */
+#endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */
+
+boolean_t root_is_CF_drive = FALSE;
+
#if CONFIG_TRIGGERS
static int vnode_resolver_create(mount_t, vnode_t, struct vnode_trigger_param *, boolean_t external);
static void vnode_resolver_detach(vnode_t);
TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */
+TAILQ_HEAD(async_work_lst, vnode) vnode_async_work_list;
+
TAILQ_HEAD(ragelst, vnode) vnode_rage_list; /* vnode rapid age list */
struct timeval rage_tv;
} while(0)
-
/* remove a vnode from dead vnode list */
#define VREMDEAD(fun, vp) \
do { \
} while(0)
+/* remove a vnode from async work vnode list */
+#define VREMASYNC_WORK(fun, vp) \
+ do { \
+ VLISTCHECK((fun), (vp), "async_work"); \
+ TAILQ_REMOVE(&vnode_async_work_list, (vp), v_freelist); \
+ VLISTNONE((vp)); \
+ vp->v_listflag &= ~VLIST_ASYNC_WORK; \
+ async_work_vnodes--; \
+ } while(0)
+
+
/* remove a vnode from rage vnode list */
#define VREMRAGE(fun, vp) \
do { \
*/
#define VNODE_FREE_MIN CONFIG_VNODE_FREE_MIN /* freelist should have at least this many */
+
+static void async_work_continue(void);
+
/*
* Initialize the vnode management data structures.
*/
__private_extern__ void
vntblinit(void)
{
+ thread_t thread = THREAD_NULL;
+
TAILQ_INIT(&vnode_free_list);
TAILQ_INIT(&vnode_rage_list);
TAILQ_INIT(&vnode_dead_list);
+ TAILQ_INIT(&vnode_async_work_list);
TAILQ_INIT(&mountlist);
if (!vnodetarget)
* we want to cache
*/
(void) adjust_vm_object_cache(0, desiredvnodes - VNODE_FREE_MIN);
+
+ /*
+ * create worker threads
+ */
+ kernel_thread_start((thread_continue_t)async_work_continue, NULL, &thread);
+ thread_deallocate(thread);
}
/* Reset the VM Object Cache with the values passed in */
vnode_t vp;
TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
- /* disable preflight only for udf, a hack to be removed after 4073176 is fixed */
- if (vp->v_tag == VT_UDF)
- return 0;
if (vp->v_type == VDIR)
continue;
if (vp == skipvp)
continue;
- if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
- (vp->v_flag & VNOFLUSH)))
+ if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || (vp->v_flag & VNOFLUSH)))
continue;
if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP))
continue;
- if ((flags & WRITECLOSE) &&
- (vp->v_writecount == 0 || vp->v_type != VREG))
+ if ((flags & WRITECLOSE) && (vp->v_writecount == 0 || vp->v_type != VREG))
continue;
+
/* Look for busy vnode */
- if (((vp->v_usecount != 0) &&
- ((vp->v_usecount - vp->v_kusecount) != 0)))
- return(1);
+ if ((vp->v_usecount != 0) && ((vp->v_usecount - vp->v_kusecount) != 0)) {
+ return 1;
+
+ } else if (vp->v_iocount > 0) {
+ /* Busy if iocount is > 0 for more than 3 seconds */
+ tsleep(&vp->v_iocount, PVFS, "vnode_drain_network", 3 * hz);
+ if (vp->v_iocount > 0)
+ return 1;
+ continue;
}
+ }
- return(0);
+ return 0;
}
/*
}
+#include <i386/panic_hooks.h>
+
+struct vnode_iterate_panic_hook {
+ panic_hook_t hook;
+ mount_t mp;
+ struct vnode *vp;
+};
+
+static void vnode_iterate_panic_hook(panic_hook_t *hook_)
+{
+ extern int kdb_log(const char *fmt, ...);
+ struct vnode_iterate_panic_hook *hook = (struct vnode_iterate_panic_hook *)hook_;
+ panic_phys_range_t range;
+ uint64_t phys;
+
+ if (panic_phys_range_before(hook->mp, &phys, &range)) {
+ kdb_log("mp = %p, phys = %p, prev (%p: %p-%p)\n",
+ hook->mp, phys, range.type, range.phys_start,
+ range.phys_start + range.len);
+ } else {
+ kdb_log("mp = %p, phys = %p, prev (!)\n", hook->mp, phys);
+ }
+
+ if (panic_phys_range_before(hook->vp, &phys, &range)) {
+ kdb_log("vp = %p, phys = %p, prev (%p: %p-%p)\n",
+ hook->vp, phys, range.type, range.phys_start,
+ range.phys_start + range.len);
+ } else {
+ kdb_log("vp = %p, phys = %p, prev (!)\n", hook->vp, phys);
+ }
+ panic_dump_mem((void *)(((vm_offset_t)hook->mp -4096) & ~4095), 12288);
+}
+
int
vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *),
void *arg)
mount_unlock(mp);
return(ret);
}
-
+
+ struct vnode_iterate_panic_hook hook;
+ hook.mp = mp;
+ hook.vp = NULL;
+ panic_hook(&hook.hook, vnode_iterate_panic_hook);
/* iterate over all the vnodes */
while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
vp = TAILQ_FIRST(&mp->mnt_workerqueue);
+ hook.vp = vp;
TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
vid = vp->v_id;
}
out:
+ panic_unhook(&hook.hook);
(void)vnode_iterate_reloadq(mp);
vnode_iterate_clear(mp);
mount_unlock(mp);
restart:
if (mp->mnt_lflag & MNT_LDEAD)
- return(ENOENT);
+ return (ENOENT);
- if (mp->mnt_lflag & MNT_LUNMOUNT) {
- if (flags & LK_NOWAIT)
- return (ENOENT);
-
- mount_lock(mp);
+ mount_lock(mp);
- if (mp->mnt_lflag & MNT_LDEAD) {
+ if (mp->mnt_lflag & MNT_LUNMOUNT) {
+ if (flags & LK_NOWAIT || mp->mnt_lflag & MNT_LDEAD) {
mount_unlock(mp);
- return(ENOENT);
- }
- if (mp->mnt_lflag & MNT_LUNMOUNT) {
- mp->mnt_lflag |= MNT_LWAIT;
- /*
- * Since all busy locks are shared except the exclusive
- * lock granted when unmounting, the only place that a
- * wakeup needs to be done is at the release of the
- * exclusive lock at the end of dounmount.
- */
- msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL);
return (ENOENT);
}
- mount_unlock(mp);
+
+ /*
+ * Since all busy locks are shared except the exclusive
+ * lock granted when unmounting, the only place that a
+ * wakeup needs to be done is at the release of the
+ * exclusive lock at the end of dounmount.
+ */
+ mp->mnt_lflag |= MNT_LWAIT;
+ msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL);
+ return (ENOENT);
}
+ mount_unlock(mp);
+
lck_rw_lock_shared(&mp->mnt_rwlock);
/*
- * until we are granted the rwlock, it's possible for the mount point to
- * change state, so reevaluate before granting the vfs_busy
+ * Until we are granted the rwlock, it's possible for the mount point to
+ * change state, so re-evaluate before granting the vfs_busy.
*/
if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
lck_rw_done(&mp->mnt_rwlock);
/*
* Free a busy filesystem.
*/
-
void
vfs_unbusy(mount_t mp)
{
vfsp->vfc_refcount++;
mount_list_unlock();
- strncpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
+ strlcpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
mp->mnt_vfsstat.f_mntonname[0] = '/';
/* XXX const poisoning layering violation */
(void) copystr((const void *)devname, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, NULL);
*/
vfs_init_io_attributes(rootvp, mp);
+ if ((mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) &&
+ (mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED)) {
+ /*
+ * only for CF
+ */
+ root_is_CF_drive = TRUE;
+ }
/*
* Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS.
*/
mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
}
+ uint32_t speed;
+
+ if (MNTK_VIRTUALDEV & mp->mnt_kern_flag) speed = 128;
+ else if (MNTK_SSD & mp->mnt_kern_flag) speed = 7*256;
+ else speed = 256;
+ vc_progress_setdiskspeed(speed);
/*
* Probe root file system for additional features.
*/
* Routines having to do with the management of the vnode table.
*/
extern int (**dead_vnodeop_p)(void *);
-long numvnodes, freevnodes, deadvnodes;
+long numvnodes, freevnodes, deadvnodes, async_work_vnodes;
+
+int async_work_timed_out = 0;
+int async_work_handled = 0;
+int dead_vnode_wanted = 0;
+int dead_vnode_waited = 0;
/*
* Move a vnode from one mount queue to another.
return (0);
}
+
/*
* Check to see if the new vnode represents a special device
* for which we already have a vnode (either because of
nvp->v_specflags = 0;
nvp->v_speclastr = -1;
nvp->v_specinfo->si_opencount = 0;
+ nvp->v_specinfo->si_initted = 0;
+ nvp->v_specinfo->si_throttleable = 0;
SPECHASH_LOCK();
}
+boolean_t
+vnode_on_reliable_media(vnode_t vp)
+{
+ if ( !(vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) && (vp->v_mount->mnt_flag & MNT_LOCAL) )
+ return (TRUE);
+ return (FALSE);
+}
+
+static void
+vnode_async_list_add(vnode_t vp)
+{
+ vnode_list_lock();
+
+ if (VONLIST(vp) || (vp->v_lflag & (VL_TERMINATE|VL_DEAD)))
+ panic("vnode_async_list_add: %p is in wrong state", vp);
+
+ TAILQ_INSERT_HEAD(&vnode_async_work_list, vp, v_freelist);
+ vp->v_listflag |= VLIST_ASYNC_WORK;
+
+ async_work_vnodes++;
+
+ vnode_list_unlock();
+
+ wakeup(&vnode_async_work_list);
+
+}
+
+
/*
* put the vnode on appropriate free list.
* called with vnode LOCKED
static void
vnode_list_add(vnode_t vp)
{
+ boolean_t need_dead_wakeup = FALSE;
+
#if DIAGNOSTIC
lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
#endif
+
+again:
+
/*
* if it is already on a list or non zero references return
*/
if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE))
return;
+ /*
+ * In vclean, we might have deferred ditching locked buffers
+ * because something was still referencing them (indicated by
+ * usecount). We can ditch them now.
+ */
+ if (ISSET(vp->v_lflag, VL_DEAD)
+ && (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))) {
+ ++vp->v_iocount; // Probably not necessary, but harmless
+#ifdef JOE_DEBUG
+ record_vp(vp, 1);
+#endif
+ vnode_unlock(vp);
+ buf_invalidateblks(vp, BUF_INVALIDATE_LOCKED, 0, 0);
+ vnode_lock(vp);
+ vnode_dropiocount(vp);
+ goto again;
+ }
+
vnode_list_lock();
if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) {
TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist);
vp->v_listflag |= VLIST_DEAD;
deadvnodes++;
- } else if ((vp->v_flag & VAGE)) {
+
+ if (dead_vnode_wanted) {
+ dead_vnode_wanted--;
+ need_dead_wakeup = TRUE;
+ }
+
+ } else if ( (vp->v_flag & VAGE) ) {
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
vp->v_flag &= ~VAGE;
freevnodes++;
}
}
vnode_list_unlock();
+
+ if (need_dead_wakeup == TRUE)
+ wakeup_one((caddr_t)&dead_vnode_wanted);
}
VREMRAGE("vnode_list_remove", vp);
else if (vp->v_listflag & VLIST_DEAD)
VREMDEAD("vnode_list_remove", vp);
+ else if (vp->v_listflag & VLIST_ASYNC_WORK)
+ VREMASYNC_WORK("vnode_list_remove", vp);
else
VREMFREE("vnode_list_remove", vp);
}
panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
if (fmode & FWRITE) {
- if (--vp->v_writecount < 0)
- panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
+ if (--vp->v_writecount < 0)
+ panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
}
if (fmode & O_EVTONLY) {
if (--vp->v_kusecount < 0)
panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.",vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) {
- /*
+ /*
* vnode is still busy... if we're the last
* usecount, mark for a future call to VNOP_INACTIVE
* when the iocount finally drops to 0
*/
- if (vp->v_usecount == 0) {
- vp->v_lflag |= VL_NEEDINACTIVE;
+ if (vp->v_usecount == 0) {
+ vp->v_lflag |= VL_NEEDINACTIVE;
vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
}
goto done;
}
vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
- if ( (vp->v_lflag & (VL_TERMINATE | VL_DEAD)) || dont_reenter) {
- /*
+ if (ISSET(vp->v_lflag, VL_TERMINATE | VL_DEAD) || dont_reenter) {
+ /*
* vnode is being cleaned, or
* we've requested that we don't reenter
- * the filesystem on this release... in
- * this case, we'll mark the vnode aged
- * if it's been marked for termination
+ * the filesystem on this release...in
+ * the latter case, we'll mark the vnode aged
*/
- if (dont_reenter) {
- if ( !(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM)) )
- vp->v_lflag |= VL_NEEDINACTIVE;
- vp->v_flag |= VAGE;
+ if (dont_reenter) {
+ if ( !(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM)) ) {
+ vp->v_lflag |= VL_NEEDINACTIVE;
+
+ if (vnode_on_reliable_media(vp) == FALSE || vp->v_flag & VISDIRTY) {
+ vnode_async_list_add(vp);
+ goto done;
+ }
+ }
+ vp->v_flag |= VAGE;
}
- vnode_list_add(vp);
+ vnode_list_add(vp);
goto done;
}
*/
#if DIAGNOSTIC
int busyprt = 0; /* print out busy vnodes */
-#if 0
-struct ctldebug debug1 = { "busyprt", &busyprt };
-#endif /* 0 */
#endif
int
vnode_lock_spin(vp);
- if ((vp->v_id != vid) || ((vp->v_lflag & (VL_DEAD | VL_TERMINATE)))) {
+ // If vnode is already terminating, wait for it...
+ while (vp->v_id == vid && ISSET(vp->v_lflag, VL_TERMINATE)) {
+ vp->v_lflag |= VL_TERMWANT;
+ msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vflush", NULL);
+ }
+
+ if ((vp->v_id != vid) || ISSET(vp->v_lflag, VL_DEAD)) {
vnode_unlock(vp);
mount_lock(mp);
continue;
#ifdef JOE_DEBUG
record_vp(vp, 1);
#endif
+ vnode_abort_advlocks(vp);
vnode_reclaim_internal(vp, 1, 1, 0);
vnode_dropiocount(vp);
vnode_list_add(vp);
vp->v_lflag |= VL_TERMINATE;
- /*
- * remove the vnode from any mount list
- * it might be on...
- */
- insmntque(vp, (struct mount *)0);
-
#if NAMEDSTREAMS
is_namedstream = vnode_isnamedstream(vp);
#endif
else
#endif
{
- VNOP_FSYNC(vp, MNT_WAIT, ctx);
- buf_invalidateblks(vp, BUF_WRITE_DATA | BUF_INVALIDATE_LOCKED, 0, 0);
+ VNOP_FSYNC(vp, MNT_WAIT, ctx);
+
+ /*
+ * If the vnode is still in use (by the journal for
+ * example) we don't want to invalidate locked buffers
+ * here. In that case, either the journal will tidy them
+ * up, or we will deal with it when the usecount is
+ * finally released in vnode_rele_internal.
+ */
+ buf_invalidateblks(vp, BUF_WRITE_DATA | (active ? 0 : BUF_INVALIDATE_LOCKED), 0, 0);
}
if (UBCINFOEXISTS(vp))
/*
/* Delete the shadow stream file before we reclaim its vnode */
if (vnode_isshadow(vp)) {
- vnode_relenamedstream(pvp, vp, ctx);
+ vnode_relenamedstream(pvp, vp);
}
/*
* Destroy ubc named reference
* cluster_release is done on this path
* along with dropping the reference on the ucred
+ * (and in the case of forced unmount of an mmap-ed file,
+ * the ubc reference on the vnode is dropped here too).
*/
ubc_destroy_named(vp);
vnode_lock(vp);
+ /*
+ * Remove the vnode from any mount list it might be on. It is not
+ * safe to do this any earlier because unmount needs to wait for
+ * any vnodes to terminate and it cannot do that if it cannot find
+ * them.
+ */
+ insmntque(vp, (struct mount *)0);
+
vp->v_mount = dead_mountp;
vp->v_op = dead_vnodeop_p;
vp->v_tag = VT_NON;
vp->v_data = NULL;
vp->v_lflag |= VL_DEAD;
+ vp->v_flag &= ~VISDIRTY;
if (already_terminating == 0) {
vp->v_lflag &= ~VL_TERMINATE;
SPECHASH_LOCK();
break;
}
- vnode_reclaim_internal(vq, 0, 1, 0);
- vnode_put(vq);
+ vnode_lock(vq);
+ if (!(vq->v_lflag & VL_TERMINATE)) {
+ vnode_reclaim_internal(vq, 1, 1, 0);
+ }
+ vnode_put_locked(vq);
+ vnode_unlock(vq);
SPECHASH_LOCK();
break;
}
}
SPECHASH_UNLOCK();
}
- vnode_reclaim_internal(vp, 0, 0, REVOKEALL);
+ vnode_lock(vp);
+ if (vp->v_lflag & VL_TERMINATE) {
+ vnode_unlock(vp);
+ return (ENOENT);
+ }
+ vnode_reclaim_internal(vp, 1, 0, REVOKEALL);
+ vnode_unlock(vp);
return (0);
}
*/
extern unsigned int vfs_nummntops;
+/*
+ * The VFS_NUMMNTOPS shouldn't be at name[1] since
+ * is a VFS generic variable. Since we no longer support
+ * VT_UFS, we reserve its value to support this sysctl node.
+ *
+ * It should have been:
+ * name[0]: VFS_GENERIC
+ * name[1]: VFS_NUMMNTOPS
+ */
+SYSCTL_INT(_vfs, VFS_NUMMNTOPS, nummntops,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vfs_nummntops, 0, "");
+
int
-vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
- user_addr_t newp, size_t newlen, proc_t p)
+vfs_sysctl(int *name __unused, u_int namelen __unused,
+ user_addr_t oldp __unused, size_t *oldlenp __unused,
+ user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused);
+
+int
+vfs_sysctl(int *name __unused, u_int namelen __unused,
+ user_addr_t oldp __unused, size_t *oldlenp __unused,
+ user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused)
{
- struct vfstable *vfsp;
- int *username;
- u_int usernamelen;
- int error;
- struct vfsconf vfsc;
+ return (EINVAL);
+}
- /* All non VFS_GENERIC and in VFS_GENERIC,
- * VFS_MAXTYPENUM, VFS_CONF, VFS_SET_PACKAGE_EXTS
- * needs to have root priv to have modifiers.
- * For rest the userland_sysctl(CTLFLAG_ANYBODY) would cover.
- */
- if ((newp != USER_ADDR_NULL) && ((name[0] != VFS_GENERIC) ||
- ((name[1] == VFS_MAXTYPENUM) ||
- (name[1] == VFS_CONF) ||
- (name[1] == VFS_SET_PACKAGE_EXTS)))
- && (error = suser(kauth_cred_get(), &p->p_acflag))) {
- return(error);
- }
- /*
- * The VFS_NUMMNTOPS shouldn't be at name[0] since
- * is a VFS generic variable. So now we must check
- * namelen so we don't end up covering any UFS
- * variables (sinc UFS vfc_typenum is 1).
- *
- * It should have been:
- * name[0]: VFS_GENERIC
- * name[1]: VFS_NUMMNTOPS
- */
- if (namelen == 1 && name[0] == VFS_NUMMNTOPS) {
- return (sysctl_rdint(oldp, oldlenp, newp, vfs_nummntops));
- }
- /* all sysctl names at this level are at least name and field */
- if (namelen < 2)
- return (EISDIR); /* overloaded */
- if (name[0] != VFS_GENERIC) {
+//
+// The following code disallows specific sysctl's that came through
+// the direct sysctl interface (vfs_sysctl_node) instead of the newer
+// sysctl_vfs_ctlbyfsid() interface. We can not allow these selectors
+// through vfs_sysctl_node() because it passes the user's oldp pointer
+// directly to the file system which (for these selectors) casts it
+// back to a struct sysctl_req and then proceed to use SYSCTL_IN()
+// which jumps through an arbitrary function pointer. When called
+// through the sysctl_vfs_ctlbyfsid() interface this does not happen
+// and so it's safe.
+//
+// Unfortunately we have to pull in definitions from AFP and SMB and
+// perform explicit name checks on the file system to determine if
+// these selectors are being used.
+//
+
+#define AFPFS_VFS_CTL_GETID 0x00020001
+#define AFPFS_VFS_CTL_NETCHANGE 0x00020002
+#define AFPFS_VFS_CTL_VOLCHANGE 0x00020003
- mount_list_lock();
- for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
- if (vfsp->vfc_typenum == name[0]) {
- vfsp->vfc_refcount++;
- break;
- }
- mount_list_unlock();
+#define SMBFS_SYSCTL_REMOUNT 1
+#define SMBFS_SYSCTL_REMOUNT_INFO 2
+#define SMBFS_SYSCTL_GET_SERVER_SHARE 3
- if (vfsp == NULL)
- return (ENOTSUP);
- /* XXX current context proxy for proc p? */
- error = ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
- oldp, oldlenp, newp, newlen,
- vfs_context_current()));
+static int
+is_bad_sysctl_name(struct vfstable *vfsp, int selector_name)
+{
+ switch(selector_name) {
+ case VFS_CTL_QUERY:
+ case VFS_CTL_TIMEO:
+ case VFS_CTL_NOLOCKS:
+ case VFS_CTL_NSTATUS:
+ case VFS_CTL_SADDR:
+ case VFS_CTL_DISC:
+ case VFS_CTL_SERVERINFO:
+ return 1;
+ break;
- mount_list_lock();
- vfsp->vfc_refcount--;
- mount_list_unlock();
- return error;
+ default:
+ break;
}
- switch (name[1]) {
- case VFS_MAXTYPENUM:
- return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
- case VFS_CONF:
- if (namelen < 3)
- return (ENOTDIR); /* overloaded */
- mount_list_lock();
- for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
- if (vfsp->vfc_typenum == name[2])
+ // the more complicated check for some of SMB's special values
+ if (strcmp(vfsp->vfc_name, "smbfs") == 0) {
+ switch(selector_name) {
+ case SMBFS_SYSCTL_REMOUNT:
+ case SMBFS_SYSCTL_REMOUNT_INFO:
+ case SMBFS_SYSCTL_GET_SERVER_SHARE:
+ return 1;
+ }
+ } else if (strcmp(vfsp->vfc_name, "afpfs") == 0) {
+ switch(selector_name) {
+ case AFPFS_VFS_CTL_GETID:
+ case AFPFS_VFS_CTL_NETCHANGE:
+ case AFPFS_VFS_CTL_VOLCHANGE:
+ return 1;
break;
-
- if (vfsp == NULL) {
- mount_list_unlock();
- return (ENOTSUP);
}
-
- vfsc.vfc_reserved1 = 0;
- bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
- vfsc.vfc_typenum = vfsp->vfc_typenum;
- vfsc.vfc_refcount = vfsp->vfc_refcount;
- vfsc.vfc_flags = vfsp->vfc_flags;
- vfsc.vfc_reserved2 = 0;
- vfsc.vfc_reserved3 = 0;
-
- mount_list_unlock();
- return (sysctl_rdstruct(oldp, oldlenp, newp, &vfsc,
- sizeof(struct vfsconf)));
-
- case VFS_SET_PACKAGE_EXTS:
- return set_package_extensions_table((user_addr_t)((unsigned)name[1]), name[2], name[3]);
}
- /*
- * We need to get back into the general MIB, so we need to re-prepend
- * CTL_VFS to our name and try userland_sysctl().
- */
- usernamelen = namelen + 1;
- MALLOC(username, int *, usernamelen * sizeof(*username),
- M_TEMP, M_WAITOK);
- bcopy(name, username + 1, namelen * sizeof(*name));
- username[0] = CTL_VFS;
- error = userland_sysctl(p, username, usernamelen, oldp,
- oldlenp, newp, newlen, oldlenp);
- FREE(username, M_TEMP);
- return (error);
+
+ //
+ // If we get here we passed all the checks so the selector is ok
+ //
+ return 0;
}
-/*
- * Dump vnode list (via sysctl) - defunct
- * use "pstat" instead
- */
-/* ARGSUSED */
-int
-sysctl_vnode
-(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
+
+int vfs_sysctl_node SYSCTL_HANDLER_ARGS
{
- return(EINVAL);
-}
+ int *name, namelen;
+ struct vfstable *vfsp;
+ int error;
+ int fstypenum;
+
+ fstypenum = oidp->oid_number;
+ name = arg1;
+ namelen = arg2;
+
+ /* all sysctl names at this level should have at least one name slot for the FS */
+ if (namelen < 1)
+ return (EISDIR); /* overloaded */
+
+ mount_list_lock();
+ for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
+ if (vfsp->vfc_typenum == fstypenum) {
+ vfsp->vfc_refcount++;
+ break;
+ }
+ mount_list_unlock();
+
+ if (vfsp == NULL) {
+ return (ENOTSUP);
+ }
+
+ if (is_bad_sysctl_name(vfsp, name[0])) {
+ printf("vfs: bad selector 0x%.8x for old-style sysctl(). use the sysctl-by-fsid interface instead\n", name[0]);
+ return EPERM;
+ }
+
+ error = (vfsp->vfc_vfsops->vfs_sysctl)(name, namelen, req->oldptr, &req->oldlen, req->newptr, req->newlen, vfs_context_current());
-SYSCTL_PROC(_kern, KERN_VNODE, vnode,
- CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED,
- 0, 0, sysctl_vnode, "S,", "");
+ mount_list_lock();
+ vfsp->vfc_refcount--;
+ mount_list_unlock();
+ return error;
+}
/*
* Check to see if a filesystem is mounted on a block device.
return (error);
}
+struct unmount_info {
+ int u_errs; // Total failed unmounts
+ int u_busy; // EBUSY failed unmounts
+};
+
+static int
+unmount_callback(mount_t mp, void *arg)
+{
+ int error;
+ char *mntname;
+ struct unmount_info *uip = arg;
+
+ mount_ref(mp, 0);
+ mount_iterdrop(mp); // avoid vfs_iterate deadlock in dounmount()
+
+ MALLOC_ZONE(mntname, void *, MAXPATHLEN, M_NAMEI, M_WAITOK);
+ if (mntname)
+ strlcpy(mntname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN);
+
+ error = dounmount(mp, MNT_FORCE, 1, vfs_context_current());
+ if (error) {
+ uip->u_errs++;
+ printf("Unmount of %s failed (%d)\n", mntname ? mntname:"?", error);
+ if (error == EBUSY)
+ uip->u_busy++;
+ }
+ if (mntname)
+ FREE_ZONE(mntname, MAXPATHLEN, M_NAMEI);
+
+ return (VFS_RETURNED);
+}
+
/*
* Unmount all filesystems. The list is traversed in reverse order
* of mounting to avoid dependencies.
+ * Busy mounts are retried.
*/
__private_extern__ void
vfs_unmountall(void)
{
- struct mount *mp;
- int error;
+ int mounts, sec = 1;
+ struct unmount_info ui;
- /*
- * Since this only runs when rebooting, it is not interlocked.
- */
- mount_list_lock();
- while(!TAILQ_EMPTY(&mountlist)) {
- mp = TAILQ_LAST(&mountlist, mntlist);
- mount_list_unlock();
- error = dounmount(mp, MNT_FORCE, 0, vfs_context_current());
- if ((error != 0) && (error != EBUSY)) {
- printf("unmount of %s failed (", mp->mnt_vfsstat.f_mntonname);
- printf("%d)\n", error);
- mount_list_lock();
- TAILQ_REMOVE(&mountlist, mp, mnt_list);
- continue;
- } else if (error == EBUSY) {
- /* If EBUSY is returned, the unmount was already in progress */
- printf("unmount of %p failed (", mp);
- printf("BUSY)\n");
- }
- mount_list_lock();
+retry:
+ ui.u_errs = ui.u_busy = 0;
+ vfs_iterate(VFS_ITERATE_CB_DROPREF | VFS_ITERATE_TAIL_FIRST, unmount_callback, &ui);
+ mounts = mount_getvfscnt();
+ if (mounts == 0)
+ return;
+
+ if (ui.u_busy > 0) { // Busy mounts - wait & retry
+ tsleep(&nummounts, PVFS, "busy mount", sec * hz);
+ sec *= 2;
+ if (sec <= 32)
+ goto retry;
+ printf("Unmounting timed out\n");
+ } else if (ui.u_errs < mounts) {
+ // If the vfs_iterate missed mounts in progress - wait a bit
+ tsleep(&nummounts, PVFS, "missed mount", 2 * hz);
}
- mount_list_unlock();
}
-
/*
* This routine is called from vnode_pager_deallocate out of the VM
* The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named
vnode_lock_spin(vp);
vp->v_lflag &= ~VNAMED_UBC;
+ if (vp->v_usecount != 0) {
+ /*
+ * At the eleventh hour, just before the ubcinfo is
+ * destroyed, ensure the ubc-specific v_usecount
+ * reference has gone. We use v_usecount != 0 as a hint;
+ * ubc_unmap() does nothing if there's no mapping.
+ *
+ * This case is caused by coming here via forced unmount,
+ * versus the usual vm_object_deallocate() path.
+ * In the forced unmount case, ubc_destroy_named()
+ * releases the pager before memory_object_last_unmap()
+ * can be called.
+ */
+ vnode_unlock(vp);
+ ubc_unmap(vp);
+ vnode_lock_spin(vp);
+ }
uip = vp->v_ubcinfo;
vp->v_ubcinfo = UBC_INFO_NULL;
u_int32_t rootunit = (u_int32_t)-1;
+#if CONFIG_IOSCHED
+extern int lowpri_throttle_enabled;
+extern int iosched_enabled;
+#endif
+
errno_t
vfs_init_io_attributes(vnode_t devvp, mount_t mp)
{
off_t readsegsize = 0;
off_t writesegsize = 0;
off_t alignment = 0;
- off_t ioqueue_depth = 0;
+ u_int32_t ioqueue_depth = 0;
u_int32_t blksize;
u_int64_t temp;
u_int32_t features;
vfs_context_t ctx = vfs_context_current();
+ dk_corestorage_info_t cs_info;
+ boolean_t cs_present = FALSE;;
int isssd = 0;
int isvirtual = 0;
* and if those advertised constraints result in a smaller
* limit for a given I/O
*/
- mp->mnt_maxreadcnt = MAX_UPL_SIZE * PAGE_SIZE;
- mp->mnt_maxwritecnt = MAX_UPL_SIZE * PAGE_SIZE;
+ mp->mnt_maxreadcnt = MAX_UPL_SIZE_BYTES;
+ mp->mnt_maxwritecnt = MAX_UPL_SIZE_BYTES;
if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) {
if (isvirtual)
if (features & DK_FEATURE_FORCE_UNIT_ACCESS)
mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED;
- if (features & DK_FEATURE_UNMAP)
- mp->mnt_ioflags |= MNT_IOFLAGS_UNMAP_SUPPORTED;
+
+ if (VNOP_IOCTL(devvp, DKIOCCORESTORAGE, (caddr_t)&cs_info, 0, ctx) == 0)
+ cs_present = TRUE;
+
+ if (features & DK_FEATURE_UNMAP) {
+ mp->mnt_ioflags |= MNT_IOFLAGS_UNMAP_SUPPORTED;
+
+ if (cs_present == TRUE)
+ mp->mnt_ioflags |= MNT_IOFLAGS_CSUNMAP_SUPPORTED;
+ }
+ if (cs_present == TRUE) {
+ /*
+ * for now we'll use the following test as a proxy for
+ * the underlying drive being FUSION in nature
+ */
+ if ((cs_info.flags & DK_CORESTORAGE_PIN_YOUR_METADATA))
+ mp->mnt_ioflags |= MNT_IOFLAGS_FUSION_DRIVE;
+ }
+
+#if CONFIG_IOSCHED
+ if (iosched_enabled && (features & DK_FEATURE_PRIORITY)) {
+ mp->mnt_ioflags |= MNT_IOFLAGS_IOSCHED_SUPPORTED;
+ throttle_info_disable_throttle(mp->mnt_devbsdunit, (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) != 0);
+ }
+#endif /* CONFIG_IOSCHED */
return (error);
}
}
void
-vfs_event_signal(__unused fsid_t *fsid, u_int32_t event, __unused intptr_t data)
-{
+vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data)
+{
+ if (event == VQ_DEAD || event == VQ_NOTRESP) {
+ struct mount *mp = vfs_getvfs(fsid);
+ if (mp) {
+ mount_lock_spin(mp);
+ if (data)
+ mp->mnt_kern_flag &= ~MNT_LNOTRESP; // Now responding
+ else
+ mp->mnt_kern_flag |= MNT_LNOTRESP; // Not responding
+ mount_unlock(mp);
+ }
+ }
+
lck_mtx_lock(fs_klist_lock);
KNOTE(&fs_klist, event);
lck_mtx_unlock(fs_klist_lock);
return (0);
}
+static int
+sysctl_vfs_generic_conf SYSCTL_HANDLER_ARGS
+{
+ int *name, namelen;
+ struct vfstable *vfsp;
+ struct vfsconf vfsc;
+
+ (void)oidp;
+ name = arg1;
+ namelen = arg2;
+
+ if (namelen < 1) {
+ return (EISDIR);
+ } else if (namelen > 1) {
+ return (ENOTDIR);
+ }
+
+ mount_list_lock();
+ for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
+ if (vfsp->vfc_typenum == name[0])
+ break;
+
+ if (vfsp == NULL) {
+ mount_list_unlock();
+ return (ENOTSUP);
+ }
+
+ vfsc.vfc_reserved1 = 0;
+ bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
+ vfsc.vfc_typenum = vfsp->vfc_typenum;
+ vfsc.vfc_refcount = vfsp->vfc_refcount;
+ vfsc.vfc_flags = vfsp->vfc_flags;
+ vfsc.vfc_reserved2 = 0;
+ vfsc.vfc_reserved3 = 0;
+
+ mount_list_unlock();
+ return (SYSCTL_OUT(req, &vfsc, sizeof(struct vfsconf)));
+}
+
/* the vfs.generic. branch. */
SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "vfs generic hinge");
/* retreive a list of mounted filesystem fsid_t */
-SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist, CTLFLAG_RD | CTLFLAG_LOCKED,
+SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist,
+ CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
NULL, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids");
/* perform operations on filesystem via fsid_t */
SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW | CTLFLAG_LOCKED,
sysctl_vfs_ctlbyfsid, "ctlbyfsid");
SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW | CTLFLAG_ANYBODY,
NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang");
-
-
+SYSCTL_INT(_vfs_generic, VFS_MAXTYPENUM, maxtypenum,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &maxvfstypenum, 0, "");
+SYSCTL_INT(_vfs_generic, OID_AUTO, sync_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, &sync_timeout, 0, "");
+SYSCTL_NODE(_vfs_generic, VFS_CONF, conf,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ sysctl_vfs_generic_conf, "");
+
+/*
+ * Print vnode state.
+ */
+void
+vn_print_state(struct vnode *vp, const char *fmt, ...)
+{
+ va_list ap;
+ char perm_str[] = "(VM_KERNEL_ADDRPERM pointer)";
+ char fs_name[MFSNAMELEN];
+
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+ printf("vp 0x%0llx %s: ", (uint64_t)VM_KERNEL_ADDRPERM(vp), perm_str);
+ printf("tag %d, type %d\n", vp->v_tag, vp->v_type);
+ /* Counts .. */
+ printf(" iocount %d, usecount %d, kusecount %d references %d\n",
+ vp->v_iocount, vp->v_usecount, vp->v_kusecount, vp->v_references);
+ printf(" writecount %d, numoutput %d\n", vp->v_writecount,
+ vp->v_numoutput);
+ /* Flags */
+ printf(" flag 0x%x, lflag 0x%x, listflag 0x%x\n", vp->v_flag,
+ vp->v_lflag, vp->v_listflag);
+
+ if (vp->v_mount == NULL || vp->v_mount == dead_mountp) {
+ strlcpy(fs_name, "deadfs", MFSNAMELEN);
+ } else {
+ vfs_name(vp->v_mount, fs_name);
+ }
+
+ printf(" v_data 0x%0llx %s\n",
+ (vp->v_data ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_data) : 0),
+ perm_str);
+ printf(" v_mount 0x%0llx %s vfs_name %s\n",
+ (vp->v_mount ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_mount) : 0),
+ perm_str, fs_name);
+}
+
long num_reusedvnodes = 0;
+
+static vnode_t
+process_vp(vnode_t vp, int want_vp, int *deferred)
+{
+ unsigned int vpid;
+
+ *deferred = 0;
+
+ vpid = vp->v_id;
+
+ vnode_list_remove_locked(vp);
+
+ vnode_list_unlock();
+
+ vnode_lock_spin(vp);
+
+ /*
+ * We could wait for the vnode_lock after removing the vp from the freelist
+ * and the vid is bumped only at the very end of reclaim. So it is possible
+ * that we are looking at a vnode that is being terminated. If so skip it.
+ */
+ if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) ||
+ VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) {
+ /*
+ * we lost the race between dropping the list lock
+ * and picking up the vnode_lock... someone else
+ * used this vnode and it is now in a new state
+ */
+ vnode_unlock(vp);
+
+ return (NULLVP);
+ }
+ if ( (vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE ) {
+ /*
+ * we did a vnode_rele_ext that asked for
+ * us not to reenter the filesystem during
+ * the release even though VL_NEEDINACTIVE was
+ * set... we'll do it here by doing a
+ * vnode_get/vnode_put
+ *
+ * pick up an iocount so that we can call
+ * vnode_put and drive the VNOP_INACTIVE...
+ * vnode_put will either leave us off
+ * the freelist if a new ref comes in,
+ * or put us back on the end of the freelist
+ * or recycle us if we were marked for termination...
+ * so we'll just go grab a new candidate
+ */
+ vp->v_iocount++;
+#ifdef JOE_DEBUG
+ record_vp(vp, 1);
+#endif
+ vnode_put_locked(vp);
+ vnode_unlock(vp);
+
+ return (NULLVP);
+ }
+ /*
+ * Checks for anyone racing us for recycle
+ */
+ if (vp->v_type != VBAD) {
+ if (want_vp && (vnode_on_reliable_media(vp) == FALSE || (vp->v_flag & VISDIRTY))) {
+ vnode_async_list_add(vp);
+ vnode_unlock(vp);
+
+ *deferred = 1;
+
+ return (NULLVP);
+ }
+ if (vp->v_lflag & VL_DEAD)
+ panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
+
+ vnode_lock_convert(vp);
+ (void)vnode_reclaim_internal(vp, 1, want_vp, 0);
+
+ if (want_vp) {
+ if ((VONLIST(vp)))
+ panic("new_vnode(%p): vp on list", vp);
+ if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
+ (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH)))
+ panic("new_vnode(%p): free vnode still referenced", vp);
+ if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0))
+ panic("new_vnode(%p): vnode seems to be on mount list", vp);
+ if ( !LIST_EMPTY(&vp->v_nclinks) || !LIST_EMPTY(&vp->v_ncchildren))
+ panic("new_vnode(%p): vnode still hooked into the name cache", vp);
+ } else {
+ vnode_unlock(vp);
+ vp = NULLVP;
+ }
+ }
+ return (vp);
+}
+
+
+
+static void
+async_work_continue(void)
+{
+ struct async_work_lst *q;
+ int deferred;
+ vnode_t vp;
+
+ q = &vnode_async_work_list;
+
+ for (;;) {
+
+ vnode_list_lock();
+
+ if ( TAILQ_EMPTY(q) ) {
+ assert_wait(q, (THREAD_UNINT));
+
+ vnode_list_unlock();
+
+ thread_block((thread_continue_t)async_work_continue);
+
+ continue;
+ }
+ async_work_handled++;
+
+ vp = TAILQ_FIRST(q);
+
+ vp = process_vp(vp, 0, &deferred);
+
+ if (vp != NULLVP)
+ panic("found VBAD vp (%p) on async queue", vp);
+ }
+}
+
+
static int
new_vnode(vnode_t *vpp)
{
vnode_t vp;
- int retries = 0; /* retry incase of tablefull */
+ uint32_t retries = 0, max_retries = 100; /* retry incase of tablefull */
int force_alloc = 0, walk_count = 0;
- unsigned int vpid;
- struct timespec ts;
+ boolean_t need_reliable_vp = FALSE;
+ int deferred;
+ struct timeval initial_tv;
struct timeval current_tv;
-#ifndef __LP64__
- struct unsafe_fsnode *l_unsafefs = 0;
-#endif /* __LP64__ */
proc_t curproc = current_proc();
+ initial_tv.tv_sec = 0;
retry:
- microuptime(¤t_tv);
-
vp = NULLVP;
vnode_list_lock();
+ if (need_reliable_vp == TRUE)
+ async_work_timed_out++;
+
if ((numvnodes - deadvnodes) < desiredvnodes || force_alloc) {
+ struct timespec ts;
+
if ( !TAILQ_EMPTY(&vnode_dead_list)) {
/*
* Can always reuse a dead one
vp->v_iocount = 1;
goto done;
}
+ microuptime(¤t_tv);
#define MAX_WALK_COUNT 1000
(current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) {
TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) {
- if ( !(vp->v_listflag & VLIST_RAGE))
- panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
+ if ( !(vp->v_listflag & VLIST_RAGE))
+ panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
- // if we're a dependency-capable process, skip vnodes that can
+ // if we're a dependency-capable process, skip vnodes that can
// cause recycling deadlocks. (i.e. this process is diskimages
// helper and the vnode is in a disk image). Querying the
// mnt_kern_flag for the mount's virtual device status
// may not be updated if there are multiple devnode layers
// in between the disk image and the final consumer.
- if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
- (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) {
- break;
- }
+ if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
+ (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) {
+ /*
+ * if need_reliable_vp == TRUE, then we've already sent one or more
+ * non-reliable vnodes to the async thread for processing and timed
+ * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT
+ * mechanism to first scan for a reliable vnode before forcing
+ * a new vnode to be created
+ */
+ if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE)
+ break;
+ }
- // don't iterate more than MAX_WALK_COUNT vnodes to
- // avoid keeping the vnode list lock held for too long.
- if (walk_count++ > MAX_WALK_COUNT) {
+ // don't iterate more than MAX_WALK_COUNT vnodes to
+ // avoid keeping the vnode list lock held for too long.
+
+ if (walk_count++ > MAX_WALK_COUNT) {
vp = NULL;
- break;
- }
+ break;
+ }
}
-
}
if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) {
// may not be updated if there are multiple devnode layers
// in between the disk image and the final consumer.
- if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
- (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) {
- break;
- }
+ if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
+ (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) {
+ /*
+ * if need_reliable_vp == TRUE, then we've already sent one or more
+ * non-reliable vnodes to the async thread for processing and timed
+ * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT
+ * mechanism to first scan for a reliable vnode before forcing
+ * a new vnode to be created
+ */
+ if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE)
+ break;
+ }
- // don't iterate more than MAX_WALK_COUNT vnodes to
- // avoid keeping the vnode list lock held for too long.
- if (walk_count++ > MAX_WALK_COUNT) {
- vp = NULL;
- break;
- }
- }
+ // don't iterate more than MAX_WALK_COUNT vnodes to
+ // avoid keeping the vnode list lock held for too long.
+ if (walk_count++ > MAX_WALK_COUNT) {
+ vp = NULL;
+ break;
+ }
+ }
}
//
// the allocation.
//
if (vp == NULL && walk_count >= MAX_WALK_COUNT) {
- force_alloc = 1;
- vnode_list_unlock();
- goto retry;
+ force_alloc = 1;
+ vnode_list_unlock();
+ goto retry;
}
if (vp == NULL) {
* we've reached the system imposed maximum number of vnodes
* but there isn't a single one available
* wait a bit and then retry... if we can't get a vnode
- * after 100 retries, than log a complaint
+ * after our target number of retries, than log a complaint
*/
- if (++retries <= 100) {
+ if (++retries <= max_retries) {
vnode_list_unlock();
delay_for_interval(1, 1000 * 1000);
goto retry;
vnode_list_unlock();
tablefull("vnode");
log(LOG_EMERG, "%d desired, %d numvnodes, "
- "%d free, %d dead, %d rage\n",
- desiredvnodes, numvnodes, freevnodes, deadvnodes, ragevnodes);
-#if CONFIG_EMBEDDED
+ "%d free, %d dead, %d async, %d rage\n",
+ desiredvnodes, numvnodes, freevnodes, deadvnodes, async_work_vnodes, ragevnodes);
+#if CONFIG_JETSAM
+
+#if DEVELOPMENT || DEBUG
+ if (bootarg_no_vnode_jetsam)
+ panic("vnode table is full\n");
+#endif /* DEVELOPMENT || DEBUG */
+
/*
* Running out of vnodes tends to make a system unusable. Start killing
* processes that jetsam knows are killable.
*/
- if (jetsam_kill_top_proc(TRUE, kJetsamFlagsKilledVnodes) < 0) {
+ if (memorystatus_kill_on_vnode_limit() == FALSE) {
/*
* If jetsam can't find any more processes to kill and there
* still aren't any free vnodes, panic. Hopefully we'll get a
panic("vnode table is full\n");
}
- delay_for_interval(1, 1000 * 1000);
+ /*
+ * Now that we've killed someone, wait a bit and continue looking
+ * (with fewer retries before trying another kill).
+ */
+ delay_for_interval(3, 1000 * 1000);
+ retries = 0;
+ max_retries = 10;
goto retry;
#endif
return (ENFILE);
}
steal_this_vp:
- vpid = vp->v_id;
+ if ((vp = process_vp(vp, 1, &deferred)) == NULLVP) {
+ if (deferred) {
+ int elapsed_msecs;
+ struct timeval elapsed_tv;
- vnode_list_remove_locked(vp);
+ if (initial_tv.tv_sec == 0)
+ microuptime(&initial_tv);
- vnode_list_unlock();
+ vnode_list_lock();
- vnode_lock_spin(vp);
+ dead_vnode_waited++;
+ dead_vnode_wanted++;
- /*
- * We could wait for the vnode_lock after removing the vp from the freelist
- * and the vid is bumped only at the very end of reclaim. So it is possible
- * that we are looking at a vnode that is being terminated. If so skip it.
- */
- if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) ||
- VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) {
- /*
- * we lost the race between dropping the list lock
- * and picking up the vnode_lock... someone else
- * used this vnode and it is now in a new state
- * so we need to go back and try again
- */
- vnode_unlock(vp);
- goto retry;
- }
- if ( (vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE ) {
- /*
- * we did a vnode_rele_ext that asked for
- * us not to reenter the filesystem during
- * the release even though VL_NEEDINACTIVE was
- * set... we'll do it here by doing a
- * vnode_get/vnode_put
- *
- * pick up an iocount so that we can call
- * vnode_put and drive the VNOP_INACTIVE...
- * vnode_put will either leave us off
- * the freelist if a new ref comes in,
- * or put us back on the end of the freelist
- * or recycle us if we were marked for termination...
- * so we'll just go grab a new candidate
- */
- vp->v_iocount++;
-#ifdef JOE_DEBUG
- record_vp(vp, 1);
-#endif
- vnode_put_locked(vp);
- vnode_unlock(vp);
- goto retry;
- }
- OSAddAtomicLong(1, &num_reusedvnodes);
+ /*
+ * note that we're only going to explicitly wait 10ms
+ * for a dead vnode to become available, since even if one
+ * isn't available, a reliable vnode might now be available
+ * at the head of the VRAGE or free lists... if so, we
+ * can satisfy the new_vnode request with less latency then waiting
+ * for the full 100ms duration we're ultimately willing to tolerate
+ */
+ assert_wait_timeout((caddr_t)&dead_vnode_wanted, (THREAD_INTERRUPTIBLE), 10000, NSEC_PER_USEC);
- /* Checks for anyone racing us for recycle */
- if (vp->v_type != VBAD) {
- if (vp->v_lflag & VL_DEAD)
- panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
- vnode_lock_convert(vp);
- (void)vnode_reclaim_internal(vp, 1, 1, 0);
+ vnode_list_unlock();
- if ((VONLIST(vp)))
- panic("new_vnode(%p): vp on list", vp);
- if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
- (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH)))
- panic("new_vnode(%p): free vnode still referenced", vp);
- if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0))
- panic("new_vnode(%p): vnode seems to be on mount list", vp);
- if ( !LIST_EMPTY(&vp->v_nclinks) || !LIST_EMPTY(&vp->v_ncchildren))
- panic("new_vnode(%p): vnode still hooked into the name cache", vp);
- }
+ thread_block(THREAD_CONTINUE_NULL);
+
+ microuptime(&elapsed_tv);
+
+ timevalsub(&elapsed_tv, &initial_tv);
+ elapsed_msecs = elapsed_tv.tv_sec * 1000 + elapsed_tv.tv_usec / 1000;
-#ifndef __LP64__
- if (vp->v_unsafefs) {
- l_unsafefs = vp->v_unsafefs;
- vp->v_unsafefs = (struct unsafe_fsnode *)NULL;
+ if (elapsed_msecs >= 100) {
+ /*
+ * we've waited long enough... 100ms is
+ * somewhat arbitrary for this case, but the
+ * normal worst case latency used for UI
+ * interaction is 100ms, so I've chosen to
+ * go with that.
+ *
+ * setting need_reliable_vp to TRUE
+ * forces us to find a reliable vnode
+ * that we can process synchronously, or
+ * to create a new one if the scan for
+ * a reliable one hits the scan limit
+ */
+ need_reliable_vp = TRUE;
+ }
+ }
+ goto retry;
}
-#endif /* __LP64__ */
+ OSAddAtomicLong(1, &num_reusedvnodes);
+
#if CONFIG_MACF
/*
vnode_unlock(vp);
-#ifndef __LP64__
- if (l_unsafefs) {
- lck_mtx_destroy(&l_unsafefs->fsnodelock, vnode_lck_grp);
- FREE_ZONE((void *)l_unsafefs, sizeof(struct unsafe_fsnode), M_UNSAFEFS);
- }
-#endif /* __LP64__ */
-
done:
*vpp = vp;
return(retval);
}
+static inline void
+vn_set_dead(vnode_t vp)
+{
+ vp->v_mount = NULL;
+ vp->v_op = dead_vnodeop_p;
+ vp->v_tag = VT_NON;
+ vp->v_data = NULL;
+ vp->v_type = VBAD;
+ vp->v_lflag |= VL_DEAD;
+}
+
int
vnode_put_locked(vnode_t vp)
{
return(0);
}
+/*
+ * Release any blocked locking requests on the vnode.
+ * Used for forced-unmounts.
+ *
+ * XXX What about network filesystems?
+ */
+static void
+vnode_abort_advlocks(vnode_t vp)
+{
+ if (vp->v_flag & VLOCKLOCAL)
+ lf_abort_advlocks(vp);
+}
static errno_t
* this allows us to keep actively referenced vnodes in the list without having
* to constantly remove and add to the list each time a vnode w/o a usecount is
* referenced which costs us taking and dropping a global lock twice.
+ * However, if the vnode is marked DIRTY, we want to pull it out much earlier
*/
-#define UNAGE_THRESHHOLD 25
+#define UNAGE_THRESHHOLD 25
+#define UNAGE_DIRTYTHRESHHOLD 6
errno_t
vnode_getiocount(vnode_t vp, unsigned int vid, int vflags)
int nosusp = vflags & VNODE_NOSUSPEND;
int always = vflags & VNODE_ALWAYS;
int beatdrain = vflags & VNODE_DRAINO;
+ int withvid = vflags & VNODE_WITHID;
for (;;) {
+ int sleepflg = 0;
+
/*
* if it is a dead vnode with deadfs
*/
break;
/*
- * In some situations, we want to get an iocount
- * even if the vnode is draining to prevent deadlock,
- * e.g. if we're in the filesystem, potentially holding
- * resources that could prevent other iocounts from
- * being released.
+ * If this vnode is getting drained, there are some cases where
+ * we can't block or, in case of tty vnodes, want to be
+ * interruptible.
*/
- if (beatdrain && (vp->v_lflag & VL_DRAIN)) {
- break;
+ if (vp->v_lflag & VL_DRAIN) {
+ /*
+ * In some situations, we want to get an iocount
+ * even if the vnode is draining to prevent deadlock,
+ * e.g. if we're in the filesystem, potentially holding
+ * resources that could prevent other iocounts from
+ * being released.
+ */
+ if (beatdrain)
+ break;
+ /*
+ * Don't block if the vnode's mount point is unmounting as
+ * we may be the thread the unmount is itself waiting on
+ * Only callers who pass in vids (at this point, we've already
+ * handled nosusp and nodead) are expecting error returns
+ * from this function, so only we can only return errors for
+ * those. ENODEV is intended to inform callers that the call
+ * failed because an unmount is in progress.
+ */
+ if (withvid && (vp->v_mount) && vfs_isunmount(vp->v_mount))
+ return (ENODEV);
+
+ if (vnode_istty(vp)) {
+ sleepflg = PCATCH;
+ }
}
vnode_lock_convert(vp);
if (vp->v_lflag & VL_TERMINATE) {
+ int error;
+
vp->v_lflag |= VL_TERMWANT;
- msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vnode getiocount", NULL);
+ error = msleep(&vp->v_lflag, &vp->v_lock,
+ (PVFS | sleepflg), "vnode getiocount", NULL);
+ if (error)
+ return (error);
} else
msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL);
}
- if (((vflags & VNODE_WITHID) != 0) && vid != vp->v_id) {
+ if (withvid && vid != vp->v_id) {
return(ENOENT);
}
- if (++vp->v_references >= UNAGE_THRESHHOLD) {
+ if (++vp->v_references >= UNAGE_THRESHHOLD ||
+ (vp->v_flag & VISDIRTY && vp->v_references >= UNAGE_DIRTYTHRESHHOLD)) {
vp->v_references = 0;
vnode_list_remove(vp);
}
vnode_unlock(vp);
}
-/* USAGE:
- * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
- * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
- * is obsoleted by this.
- */
-int
-vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
+static int
+vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp,
+ int init_vnode)
{
int error;
int insert = 1;
+ int existing_vnode;
vnode_t vp;
vnode_t nvp;
vnode_t dvp;
#if CONFIG_TRIGGERS
struct vnode_trigger_param *tinfo = NULL;
#endif
- if (param == NULL)
- return (EINVAL);
+ if (*vpp) {
+ vp = *vpp;
+ *vpp = NULLVP;
+ existing_vnode = 1;
+ } else {
+ existing_vnode = 0;
+ }
-#if CONFIG_TRIGGERS
- if ((flavor == VNCREATE_TRIGGER) && (size == VNCREATE_TRIGGER_SIZE)) {
- tinfo = (struct vnode_trigger_param *)data;
+ if (init_vnode) {
+ /* Do quick sanity check on the parameters. */
+ if ((param == NULL) || (param->vnfs_vtype == VBAD)) {
+ error = EINVAL;
+ goto error_out;
+ }
- /* Validate trigger vnode input */
- if ((param->vnfs_vtype != VDIR) ||
- (tinfo->vnt_resolve_func == NULL) ||
- (tinfo->vnt_flags & ~VNT_VALID_MASK)) {
- return (EINVAL);
+#if CONFIG_TRIGGERS
+ if ((flavor == VNCREATE_TRIGGER) && (size == VNCREATE_TRIGGER_SIZE)) {
+ tinfo = (struct vnode_trigger_param *)data;
+
+ /* Validate trigger vnode input */
+ if ((param->vnfs_vtype != VDIR) ||
+ (tinfo->vnt_resolve_func == NULL) ||
+ (tinfo->vnt_flags & ~VNT_VALID_MASK)) {
+ error = EINVAL;
+ goto error_out;
+ }
+ /* Fall through a normal create (params will be the same) */
+ flavor = VNCREATE_FLAVOR;
+ size = VCREATESIZE;
}
- /* Fall through a normal create (params will be the same) */
- flavor = VNCREATE_FLAVOR;
- size = VCREATESIZE;
- }
#endif
- if ((flavor != VNCREATE_FLAVOR) || (size != VCREATESIZE))
- return (EINVAL);
+ if ((flavor != VNCREATE_FLAVOR) || (size != VCREATESIZE)) {
+ error = EINVAL;
+ goto error_out;
+ }
+ }
- if ( (error = new_vnode(&vp)) )
- return(error);
+ if (!existing_vnode) {
+ if ((error = new_vnode(&vp)) ) {
+ return (error);
+ }
+ if (!init_vnode) {
+ /* Make it so that it can be released by a vnode_put) */
+ vn_set_dead(vp);
+ *vpp = vp;
+ return (0);
+ }
+ } else {
+ /*
+ * A vnode obtained by vnode_create_empty has been passed to
+ * vnode_initialize - Unset VL_DEAD set by vn_set_dead. After
+ * this point, it is set back on any error.
+ *
+ * N.B. vnode locking - We make the same assumptions as the
+ * "unsplit" vnode_create did - i.e. it is safe to update the
+ * vnode's fields without the vnode lock. This vnode has been
+ * out and about with the filesystem and hopefully nothing
+ * was done to the vnode between the vnode_create_empty and
+ * now when it has come in through vnode_initialize.
+ */
+ vp->v_lflag &= ~VL_DEAD;
+ }
dvp = param->vnfs_dvp;
cnp = param->vnfs_cnp;
#ifdef JOE_DEBUG
record_vp(vp, 1);
#endif
- vp->v_mount = NULL;
- vp->v_op = dead_vnodeop_p;
- vp->v_tag = VT_NON;
- vp->v_data = NULL;
- vp->v_type = VBAD;
- vp->v_lflag |= VL_DEAD;
+ vn_set_dead(vp);
vnode_put(vp);
return(error);
}
+ if (param->vnfs_mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED)
+ memory_object_mark_io_tracking(vp->v_ubcinfo->ui_control);
}
#ifdef JOE_DEBUG
record_vp(vp, 1);
error = vnode_resolver_create(param->vnfs_mp, vp, tinfo, FALSE);
if (error) {
printf("vnode_create: vnode_resolver_create() err %d\n", error);
- vp->v_mount = NULL;
- vp->v_op = dead_vnodeop_p;
- vp->v_tag = VT_NON;
- vp->v_data = NULL;
- vp->v_type = VBAD;
- vp->v_lflag |= VL_DEAD;
+ vn_set_dead(vp);
#ifdef JOE_DEBUG
record_vp(vp, 1);
#endif
insert = 0;
vnode_unlock(vp);
}
+
+ if (VCHR == vp->v_type) {
+ u_int maj = major(vp->v_rdev);
+
+ if (maj < (u_int)nchrdev && cdevsw[maj].d_type == D_TTY)
+ vp->v_flag |= VISTTY;
+ }
}
if (vp->v_type == VFIFO) {
*/
insmntque(vp, param->vnfs_mp);
}
-#ifndef __LP64__
- if ((param->vnfs_mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE) == 0) {
- MALLOC_ZONE(vp->v_unsafefs, struct unsafe_fsnode *,
- sizeof(struct unsafe_fsnode), M_UNSAFEFS, M_WAITOK);
- vp->v_unsafefs->fsnode_count = 0;
- vp->v_unsafefs->fsnodeowner = (void *)NULL;
- lck_mtx_init(&vp->v_unsafefs->fsnodelock, vnode_lck_grp, vnode_lck_attr);
- }
-#endif /* __LP64__ */
}
if (dvp && vnode_ref(dvp) == 0) {
vp->v_parent = dvp;
vp->v_flag |= VRAGE;
}
return (0);
+
+error_out:
+ if (existing_vnode) {
+ vnode_put(vp);
+ }
+ return (error);
+}
+
+/* USAGE:
+ * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
+ * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
+ * is obsoleted by this.
+ */
+int
+vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
+{
+ *vpp = NULLVP;
+ return (vnode_create_internal(flavor, size, data, vpp, 1));
+}
+
+int
+vnode_create_empty(vnode_t *vpp)
+{
+ *vpp = NULLVP;
+ return (vnode_create_internal(VNCREATE_FLAVOR, VCREATESIZE, NULL,
+ vpp, 0));
+}
+
+int
+vnode_initialize(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
+{
+ if (*vpp == NULLVP) {
+ panic("NULL vnode passed to vnode_initialize");
+ }
+#if DEVELOPMENT || DEBUG
+ /*
+ * We lock to check that vnode is fit for unlocked use in
+ * vnode_create_internal.
+ */
+ vnode_lock_spin(*vpp);
+ VNASSERT(((*vpp)->v_iocount == 1), *vpp,
+ ("vnode_initialize : iocount not 1, is %d", (*vpp)->v_iocount));
+ VNASSERT(((*vpp)->v_usecount == 0), *vpp,
+ ("vnode_initialize : usecount not 0, is %d", (*vpp)->v_usecount));
+ VNASSERT(((*vpp)->v_lflag & VL_DEAD), *vpp,
+ ("vnode_initialize : v_lflag does not have VL_DEAD, is 0x%x",
+ (*vpp)->v_lflag));
+ VNASSERT(((*vpp)->v_data == NULL), *vpp,
+ ("vnode_initialize : v_data not NULL"));
+ vnode_unlock(*vpp);
+#endif
+ return (vnode_create_internal(flavor, size, data, vpp, 1));
}
int
int count, actualcount, i;
void * allocmem;
int indx_start, indx_stop, indx_incr;
+ int cb_dropref = (flags & VFS_ITERATE_CB_DROPREF);
count = mount_getvfscnt();
count += 10;
/* iterate over all the vnodes */
ret = callout(mp, arg);
- mount_iterdrop(mp);
+ /*
+ * Drop the iterref here if the callback didn't do it.
+ * Note: If cb_dropref is set the mp may no longer exist.
+ */
+ if (!cb_dropref)
+ mount_iterdrop(mp);
switch (ret) {
case VFS_RETURNED:
int error;
u_int32_t ndflags = 0;
- if (ctx == NULL) { /* XXX technically an error */
- ctx = vfs_context_current();
+ if (ctx == NULL) {
+ return EINVAL;
}
if (flags & VNODE_LOOKUP_NOFOLLOW)
if (flags & VNODE_LOOKUP_NOCROSSMOUNT)
ndflags |= NOCROSSMOUNT;
- if (flags & VNODE_LOOKUP_DOWHITEOUT)
- ndflags |= DOWHITEOUT;
+
+ if (flags & VNODE_LOOKUP_CROSSMOUNTNOWAIT)
+ ndflags |= CN_NBMOUNTLOOK;
/* XXX AUDITVNPATH1 needed ? */
NDINIT(&nd, LOOKUP, OP_LOOKUP, ndflags, UIO_SYSSPACE,
if (lflags & VNODE_LOOKUP_NOCROSSMOUNT)
ndflags |= NOCROSSMOUNT;
- if (lflags & VNODE_LOOKUP_DOWHITEOUT)
- ndflags |= DOWHITEOUT;
+ if (lflags & VNODE_LOOKUP_CROSSMOUNTNOWAIT)
+ ndflags |= CN_NBMOUNTLOOK;
+
/* XXX AUDITVNPATH1 needed ? */
NDINIT(&nd, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE,
CAST_USER_ADDR_T(path), ctx);
return (error);
}
+errno_t
+vnode_mtime(vnode_t vp, struct timespec *mtime, vfs_context_t ctx)
+{
+ struct vnode_attr va;
+ int error;
+
+ VATTR_INIT(&va);
+ VATTR_WANTED(&va, va_modify_time);
+ error = vnode_getattr(vp, &va, ctx);
+ if (!error)
+ *mtime = va.va_modify_time;
+ return error;
+}
+
+errno_t
+vnode_flags(vnode_t vp, uint32_t *flags, vfs_context_t ctx)
+{
+ struct vnode_attr va;
+ int error;
+
+ VATTR_INIT(&va);
+ VATTR_WANTED(&va, va_flags);
+ error = vnode_getattr(vp, &va, ctx);
+ if (!error)
+ *flags = va.va_flags;
+ return error;
+}
+
/*
* Returns: 0 Success
* vnode_getattr:???
return(vnode_setattr(vp, &va, ctx));
}
+int
+vnode_setdirty(vnode_t vp)
+{
+ vnode_lock_spin(vp);
+ vp->v_flag |= VISDIRTY;
+ vnode_unlock(vp);
+ return 0;
+}
+
+int
+vnode_cleardirty(vnode_t vp)
+{
+ vnode_lock_spin(vp);
+ vp->v_flag &= ~VISDIRTY;
+ vnode_unlock(vp);
+ return 0;
+}
+
+int
+vnode_isdirty(vnode_t vp)
+{
+ int dirty;
+
+ vnode_lock_spin(vp);
+ dirty = (vp->v_flag & VISDIRTY) ? 1 : 0;
+ vnode_unlock(vp);
+
+ return dirty;
+}
+
static int
vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx)
{
/* Only use compound VNOP for compound operation */
if (vnode_compound_open_available(dvp) && ((flags & VN_CREATE_DOOPEN) != 0)) {
*vpp = NULLVP;
- return VNOP_COMPOUND_OPEN(dvp, vpp, ndp, VNOP_COMPOUND_OPEN_DO_CREATE, fmode, statusp, vap, ctx);
+ return VNOP_COMPOUND_OPEN(dvp, vpp, ndp, O_CREAT, fmode, statusp, vap, ctx);
} else {
return VNOP_CREATE(dvp, vpp, &ndp->ni_cnd, vap, ctx);
}
boolean_t batched;
struct componentname *cnp;
uint32_t defaulted;
+ uint32_t dfflags; // Directory file flags
cnp = &ndp->ni_cnd;
error = 0;
panic("Mode for open, but not trying to open...");
}
+ /*
+ * Handle inheritance of restricted flag
+ */
+ error = vnode_flags(dvp, &dfflags, ctx);
+ if (error)
+ return error;
+ if (dfflags & SF_RESTRICTED)
+ VATTR_SET(vap, va_flags, SF_RESTRICTED);
+
/*
* Create the requested node.
*/
int
vn_authorize_unlink(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, __unused void *reserved)
{
+#if !CONFIG_MACF
+#pragma unused(cnp)
+#endif
int error = 0;
/*
/* Open of existing case */
kauth_action_t action;
int error = 0;
-
if (cnp->cn_ndp == NULL) {
panic("NULL ndp");
}
action |= KAUTH_VNODE_WRITE_DATA;
}
}
- return (vnode_authorize(vp, NULL, action, ctx));
+ error = vnode_authorize(vp, NULL, action, ctx);
+#if NAMEDSTREAMS
+ if (error == EACCES) {
+ /*
+ * Shadow files may exist on-disk with a different UID/GID
+ * than that of the current context. Verify that this file
+ * is really a shadow file. If it was created successfully
+ * then it should be authorized.
+ */
+ if (vnode_isshadow(vp) && vnode_isnamedstream (vp)) {
+ error = vnode_verifynamedstream(vp);
+ }
+ }
+#endif
+
+ return error;
}
int
vn_authorize_create(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
{
+#if !CONFIG_MACF
+#pragma unused(vap)
+#endif
/* Creation case */
int error;
/***** <MACF> *****/
#if CONFIG_MACF
- error = mac_vnode_check_rename_from(ctx, fdvp, fvp, fcnp);
- if (error)
- goto out;
-#endif
-
-#if CONFIG_MACF
- error = mac_vnode_check_rename_to(ctx,
- tdvp, tvp, fdvp == tdvp, tcnp);
+ error = mac_vnode_check_rename(ctx, fdvp, fvp, fcnp, tdvp, tvp, tcnp);
if (error)
goto out;
#endif
int
vn_authorize_mkdir(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
{
+#if !CONFIG_MACF
+#pragma unused(vap)
+#endif
int error;
if (reserved != NULL) {
int
vn_authorize_rmdir(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, void *reserved)
{
+#if CONFIG_MACF
int error;
-
+#else
+#pragma unused(cnp)
+#endif
if (reserved != NULL) {
panic("Non-NULL reserved argument to vn_authorize_rmdir()");
}
goto out;
}
+ /* Assume that there were DENYs so we don't wrongly cache KAUTH_VNODE_SEARCHBYANYONE */
+ found_deny = TRUE;
+
KAUTH_DEBUG("%p ALLOWED - caller is superuser", vp);
}
out:
* deny execute, we can synthesize a global right that allows anyone to
* traverse this directory during a pathname lookup without having to
* match the credential associated with this cache of rights.
+ *
+ * Note that we can correctly cache KAUTH_VNODE_SEARCHBYANYONE
+ * only if we actually check ACLs which we don't for root. As
+ * a workaround, the lookup fast path checks for root.
*/
if (!VATTR_IS_SUPPORTED(&va, va_mode) ||
((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) ==
* If the size is being set, make sure it's not a directory.
*/
if (VATTR_IS_ACTIVE(vap, va_data_size)) {
- /* size is meaningless on a directory, don't permit this */
- if (vnode_isdir(vp)) {
- KAUTH_DEBUG("ATTR - ERROR: size change requested on a directory");
- error = EISDIR;
+ /* size is only meaningful on regular files, don't permit otherwise */
+ if (!vnode_isreg(vp)) {
+ KAUTH_DEBUG("ATTR - ERROR: size change requested on non-file");
+ error = vnode_isdir(vp) ? EISDIR : EINVAL;
goto out;
}
}
}
void
-vfs_setunmountpreflight(mount_t mp)
+vfs_setcompoundopen(mount_t mp)
{
mount_lock_spin(mp);
- mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
+ mp->mnt_compound_ops |= COMPOUND_VNOP_OPEN;
mount_unlock(mp);
}
+
void
-vfs_setcompoundopen(mount_t mp)
+vnode_setswapmount(vnode_t vp)
{
- mount_lock_spin(mp);
- mp->mnt_compound_ops |= COMPOUND_VNOP_OPEN;
- mount_unlock(mp);
+ mount_lock(vp->v_mount);
+ vp->v_mount->mnt_kern_flag |= MNTK_SWAP_MOUNT;
+ mount_unlock(vp->v_mount);
}
+
+int64_t
+vnode_getswappin_avail(vnode_t vp)
+{
+ int64_t max_swappin_avail = 0;
+
+ mount_lock(vp->v_mount);
+ if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_SWAPPIN_SUPPORTED)
+ max_swappin_avail = vp->v_mount->mnt_max_swappin_available;
+ mount_unlock(vp->v_mount);
+
+ return (max_swappin_avail);
+}
+
+
void
vn_setunionwait(vnode_t vp)
{
vnode_unlock(vp);
}
-/*
- * XXX - get "don't trigger mounts" flag for thread; used by autofs.
- */
-extern int thread_notrigger(void);
-
-int
-thread_notrigger(void)
-{
- struct uthread *uth = (struct uthread *)get_bsdthread_info(current_thread());
- return (uth->uu_notrigger);
-}
-
/*
* Removes orphaned apple double files during a rmdir
* Works by:
int eofflag, siz = UIO_BUFF_SIZE, nentries = 0;
int open_flag = 0, full_erase_flag = 0;
char uio_buf[ UIO_SIZEOF(1) ];
- char *rbuf = NULL, *cpos, *cend;
- struct nameidata nd_temp;
+ char *rbuf = NULL;
+ void *dir_pos;
+ void *dir_end;
struct dirent *dp;
errno_t error;
if (error == EBUSY)
*restart_flag = 1;
if (error != 0)
- goto outsc;
+ return (error);
/*
* set up UIO
/*
* Iterate through directory
*/
- cpos = rbuf;
- cend = rbuf + siz;
- dp = (struct dirent*) cpos;
+ dir_pos = (void*) rbuf;
+ dir_end = (void*) (rbuf + siz);
+ dp = (struct dirent*) (dir_pos);
- if (cpos == cend)
+ if (dir_pos == dir_end)
eofflag = 1;
- while ((cpos < cend)) {
+ while (dir_pos < dir_end) {
/*
* Check for . and .. as well as directories
*/
goto outsc;
}
}
- cpos += dp->d_reclen;
- dp = (struct dirent*)cpos;
+ dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen);
+ dp = (struct dirent*)dir_pos;
}
/*
/*
* Iterate through directory
*/
- cpos = rbuf;
- cend = rbuf + siz;
- dp = (struct dirent*) cpos;
+ dir_pos = (void*) rbuf;
+ dir_end = (void*) (rbuf + siz);
+ dp = (struct dirent*) dir_pos;
- if (cpos == cend)
+ if (dir_pos == dir_end)
eofflag = 1;
- while ((cpos < cend)) {
+ while (dir_pos < dir_end) {
/*
* Check for . and .. as well as directories
*/
(dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))
) {
- NDINIT(&nd_temp, DELETE, OP_UNLINK, USEDVP,
- UIO_SYSSPACE, CAST_USER_ADDR_T(dp->d_name),
- ctx);
- nd_temp.ni_dvp = vp;
- error = unlink1(ctx, &nd_temp, 0);
+ error = unlink1(ctx, vp,
+ CAST_USER_ADDR_T(dp->d_name), UIO_SYSSPACE,
+ VNODE_REMOVE_SKIP_NAMESPACE_EVENT |
+ VNODE_REMOVE_NO_AUDIT_PATH);
if (error && error != ENOENT) {
goto outsc;
}
}
- cpos += dp->d_reclen;
- dp = (struct dirent*)cpos;
+ dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen);
+ dp = (struct dirent*)dir_pos;
}
/*
if (open_flag)
VNOP_CLOSE(vp, FREAD, ctx);
- uio_free(auio);
+ if (auio)
+ uio_free(auio);
FREE(rbuf, M_TEMP);
vnode_resume(vp);
}
}
+void panic_print_vnodes(void);
+/* define PANIC_PRINTS_VNODES only if investigation is required. */
+#ifdef PANIC_PRINTS_VNODES
+
+static const char *__vtype(uint16_t vtype)
+{
+ switch (vtype) {
+ case VREG:
+ return "R";
+ case VDIR:
+ return "D";
+ case VBLK:
+ return "B";
+ case VCHR:
+ return "C";
+ case VLNK:
+ return "L";
+ case VSOCK:
+ return "S";
+ case VFIFO:
+ return "F";
+ case VBAD:
+ return "x";
+ case VSTR:
+ return "T";
+ case VCPLX:
+ return "X";
+ default:
+ return "?";
+ }
+}
+
+/*
+ * build a path from the bottom up
+ * NOTE: called from the panic path - no alloc'ing of memory and no locks!
+ */
+static char *__vpath(vnode_t vp, char *str, int len, int depth)
+{
+ int vnm_len;
+ const char *src;
+ char *dst;
+
+ if (len <= 0)
+ return str;
+ /* str + len is the start of the string we created */
+ if (!vp->v_name)
+ return str + len;
+
+ /* follow mount vnodes to get the full path */
+ if ((vp->v_flag & VROOT)) {
+ if (vp->v_mount != NULL && vp->v_mount->mnt_vnodecovered) {
+ return __vpath(vp->v_mount->mnt_vnodecovered,
+ str, len, depth+1);
+ }
+ return str + len;
+ }
+
+ src = vp->v_name;
+ vnm_len = strlen(src);
+ if (vnm_len > len) {
+ /* truncate the name to fit in the string */
+ src += (vnm_len - len);
+ vnm_len = len;
+ }
+
+ /* start from the back and copy just characters (no NULLs) */
+
+ /* this will chop off leaf path (file) names */
+ if (depth > 0) {
+ dst = str + len - vnm_len;
+ memcpy(dst, src, vnm_len);
+ len -= vnm_len;
+ } else {
+ dst = str + len;
+ }
+
+ if (vp->v_parent && len > 1) {
+ /* follow parents up the chain */
+ len--;
+ *(dst-1) = '/';
+ return __vpath(vp->v_parent, str, len, depth + 1);
+ }
+
+ return dst;
+}
+
+extern int kdb_printf(const char *format, ...) __printflike(1,2);
+
+#define SANE_VNODE_PRINT_LIMIT 5000
+void panic_print_vnodes(void)
+{
+ mount_t mnt;
+ vnode_t vp;
+ int nvnodes = 0;
+ const char *type;
+ char *nm;
+ char vname[257];
+
+ kdb_printf("\n***** VNODES *****\n"
+ "TYPE UREF ICNT PATH\n");
+
+ /* NULL-terminate the path name */
+ vname[sizeof(vname)-1] = '\0';
+
+ /*
+ * iterate all vnodelist items in all mounts (mntlist) -> mnt_vnodelist
+ */
+ TAILQ_FOREACH(mnt, &mountlist, mnt_list) {
+ TAILQ_FOREACH(vp, &mnt->mnt_vnodelist, v_mntvnodes) {
+ if (++nvnodes > SANE_VNODE_PRINT_LIMIT)
+ return;
+ type = __vtype(vp->v_type);
+ nm = __vpath(vp, vname, sizeof(vname)-1, 0);
+ kdb_printf("%s %0d %0d %s\n",
+ type, vp->v_usecount, vp->v_iocount, nm);
+ }
+ }
+}
+
+#else /* !PANIC_PRINTS_VNODES */
+void panic_print_vnodes(void)
+{
+ return;
+}
+#endif
+
+
#ifdef JOE_DEBUG
static void record_vp(vnode_t vp, int count) {
struct uthread *ut;
OSAddAtomic(-1, &mp->mnt_numtriggers);
}
-/*
- * Pathname operations that don't trigger a mount for trigger vnodes
- */
-static const u_int64_t ignorable_pathops_mask =
- 1LL << OP_MOUNT |
- 1LL << OP_UNMOUNT |
- 1LL << OP_STATFS |
- 1LL << OP_ACCESS |
- 1LL << OP_GETATTR |
- 1LL << OP_LISTXATTR;
-
-int
-vfs_istraditionaltrigger(enum path_operation op, const struct componentname *cnp)
-{
- if (cnp->cn_flags & ISLASTCN)
- return ((1LL << op) & ignorable_pathops_mask) == 0;
- else
- return (1);
-}
-
__private_extern__
void
vnode_trigger_rearm(vnode_t vp, vfs_context_t ctx)